ppc: Fix sign extension issue in mtmsr(d) emulation
[qemu.git] / linux-user / signal.c
1 /*
2 * Emulation of Linux signals
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include <sys/ucontext.h>
21 #include <sys/resource.h>
22
23 #include "qemu.h"
24 #include "qemu-common.h"
25 #include "target_signal.h"
26 #include "trace.h"
27
28 static struct target_sigaltstack target_sigaltstack_used = {
29 .ss_sp = 0,
30 .ss_size = 0,
31 .ss_flags = TARGET_SS_DISABLE,
32 };
33
34 static struct target_sigaction sigact_table[TARGET_NSIG];
35
36 static void host_signal_handler(int host_signum, siginfo_t *info,
37 void *puc);
38
39 static uint8_t host_to_target_signal_table[_NSIG] = {
40 [SIGHUP] = TARGET_SIGHUP,
41 [SIGINT] = TARGET_SIGINT,
42 [SIGQUIT] = TARGET_SIGQUIT,
43 [SIGILL] = TARGET_SIGILL,
44 [SIGTRAP] = TARGET_SIGTRAP,
45 [SIGABRT] = TARGET_SIGABRT,
46 /* [SIGIOT] = TARGET_SIGIOT,*/
47 [SIGBUS] = TARGET_SIGBUS,
48 [SIGFPE] = TARGET_SIGFPE,
49 [SIGKILL] = TARGET_SIGKILL,
50 [SIGUSR1] = TARGET_SIGUSR1,
51 [SIGSEGV] = TARGET_SIGSEGV,
52 [SIGUSR2] = TARGET_SIGUSR2,
53 [SIGPIPE] = TARGET_SIGPIPE,
54 [SIGALRM] = TARGET_SIGALRM,
55 [SIGTERM] = TARGET_SIGTERM,
56 #ifdef SIGSTKFLT
57 [SIGSTKFLT] = TARGET_SIGSTKFLT,
58 #endif
59 [SIGCHLD] = TARGET_SIGCHLD,
60 [SIGCONT] = TARGET_SIGCONT,
61 [SIGSTOP] = TARGET_SIGSTOP,
62 [SIGTSTP] = TARGET_SIGTSTP,
63 [SIGTTIN] = TARGET_SIGTTIN,
64 [SIGTTOU] = TARGET_SIGTTOU,
65 [SIGURG] = TARGET_SIGURG,
66 [SIGXCPU] = TARGET_SIGXCPU,
67 [SIGXFSZ] = TARGET_SIGXFSZ,
68 [SIGVTALRM] = TARGET_SIGVTALRM,
69 [SIGPROF] = TARGET_SIGPROF,
70 [SIGWINCH] = TARGET_SIGWINCH,
71 [SIGIO] = TARGET_SIGIO,
72 [SIGPWR] = TARGET_SIGPWR,
73 [SIGSYS] = TARGET_SIGSYS,
74 /* next signals stay the same */
75 /* Nasty hack: Reverse SIGRTMIN and SIGRTMAX to avoid overlap with
76 host libpthread signals. This assumes no one actually uses SIGRTMAX :-/
77 To fix this properly we need to do manual signal delivery multiplexed
78 over a single host signal. */
79 [__SIGRTMIN] = __SIGRTMAX,
80 [__SIGRTMAX] = __SIGRTMIN,
81 };
82 static uint8_t target_to_host_signal_table[_NSIG];
83
84 static inline int on_sig_stack(unsigned long sp)
85 {
86 return (sp - target_sigaltstack_used.ss_sp
87 < target_sigaltstack_used.ss_size);
88 }
89
90 static inline int sas_ss_flags(unsigned long sp)
91 {
92 return (target_sigaltstack_used.ss_size == 0 ? SS_DISABLE
93 : on_sig_stack(sp) ? SS_ONSTACK : 0);
94 }
95
96 int host_to_target_signal(int sig)
97 {
98 if (sig < 0 || sig >= _NSIG)
99 return sig;
100 return host_to_target_signal_table[sig];
101 }
102
103 int target_to_host_signal(int sig)
104 {
105 if (sig < 0 || sig >= _NSIG)
106 return sig;
107 return target_to_host_signal_table[sig];
108 }
109
110 static inline void target_sigemptyset(target_sigset_t *set)
111 {
112 memset(set, 0, sizeof(*set));
113 }
114
115 static inline void target_sigaddset(target_sigset_t *set, int signum)
116 {
117 signum--;
118 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
119 set->sig[signum / TARGET_NSIG_BPW] |= mask;
120 }
121
122 static inline int target_sigismember(const target_sigset_t *set, int signum)
123 {
124 signum--;
125 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
126 return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0);
127 }
128
129 static void host_to_target_sigset_internal(target_sigset_t *d,
130 const sigset_t *s)
131 {
132 int i;
133 target_sigemptyset(d);
134 for (i = 1; i <= TARGET_NSIG; i++) {
135 if (sigismember(s, i)) {
136 target_sigaddset(d, host_to_target_signal(i));
137 }
138 }
139 }
140
141 void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
142 {
143 target_sigset_t d1;
144 int i;
145
146 host_to_target_sigset_internal(&d1, s);
147 for(i = 0;i < TARGET_NSIG_WORDS; i++)
148 d->sig[i] = tswapal(d1.sig[i]);
149 }
150
151 static void target_to_host_sigset_internal(sigset_t *d,
152 const target_sigset_t *s)
153 {
154 int i;
155 sigemptyset(d);
156 for (i = 1; i <= TARGET_NSIG; i++) {
157 if (target_sigismember(s, i)) {
158 sigaddset(d, target_to_host_signal(i));
159 }
160 }
161 }
162
163 void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
164 {
165 target_sigset_t s1;
166 int i;
167
168 for(i = 0;i < TARGET_NSIG_WORDS; i++)
169 s1.sig[i] = tswapal(s->sig[i]);
170 target_to_host_sigset_internal(d, &s1);
171 }
172
173 void host_to_target_old_sigset(abi_ulong *old_sigset,
174 const sigset_t *sigset)
175 {
176 target_sigset_t d;
177 host_to_target_sigset(&d, sigset);
178 *old_sigset = d.sig[0];
179 }
180
181 void target_to_host_old_sigset(sigset_t *sigset,
182 const abi_ulong *old_sigset)
183 {
184 target_sigset_t d;
185 int i;
186
187 d.sig[0] = *old_sigset;
188 for(i = 1;i < TARGET_NSIG_WORDS; i++)
189 d.sig[i] = 0;
190 target_to_host_sigset(sigset, &d);
191 }
192
193 /* Wrapper for sigprocmask function
194 * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
195 * are host signal set, not guest ones. This wraps the sigprocmask host calls
196 * that should be protected (calls originated from guest)
197 */
198 int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
199 {
200 int ret;
201 sigset_t val;
202 sigset_t *temp = NULL;
203 CPUState *cpu = thread_cpu;
204 TaskState *ts = (TaskState *)cpu->opaque;
205 bool segv_was_blocked = ts->sigsegv_blocked;
206
207 if (set) {
208 bool has_sigsegv = sigismember(set, SIGSEGV);
209 val = *set;
210 temp = &val;
211
212 sigdelset(temp, SIGSEGV);
213
214 switch (how) {
215 case SIG_BLOCK:
216 if (has_sigsegv) {
217 ts->sigsegv_blocked = true;
218 }
219 break;
220 case SIG_UNBLOCK:
221 if (has_sigsegv) {
222 ts->sigsegv_blocked = false;
223 }
224 break;
225 case SIG_SETMASK:
226 ts->sigsegv_blocked = has_sigsegv;
227 break;
228 default:
229 g_assert_not_reached();
230 }
231 }
232
233 ret = sigprocmask(how, temp, oldset);
234
235 if (oldset && segv_was_blocked) {
236 sigaddset(oldset, SIGSEGV);
237 }
238
239 return ret;
240 }
241
242 /* siginfo conversion */
243
244 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
245 const siginfo_t *info)
246 {
247 int sig = host_to_target_signal(info->si_signo);
248 tinfo->si_signo = sig;
249 tinfo->si_errno = 0;
250 tinfo->si_code = info->si_code;
251
252 if (sig == TARGET_SIGILL || sig == TARGET_SIGFPE || sig == TARGET_SIGSEGV
253 || sig == TARGET_SIGBUS || sig == TARGET_SIGTRAP) {
254 /* Should never come here, but who knows. The information for
255 the target is irrelevant. */
256 tinfo->_sifields._sigfault._addr = 0;
257 } else if (sig == TARGET_SIGIO) {
258 tinfo->_sifields._sigpoll._band = info->si_band;
259 tinfo->_sifields._sigpoll._fd = info->si_fd;
260 } else if (sig == TARGET_SIGCHLD) {
261 tinfo->_sifields._sigchld._pid = info->si_pid;
262 tinfo->_sifields._sigchld._uid = info->si_uid;
263 tinfo->_sifields._sigchld._status
264 = host_to_target_waitstatus(info->si_status);
265 tinfo->_sifields._sigchld._utime = info->si_utime;
266 tinfo->_sifields._sigchld._stime = info->si_stime;
267 } else if (sig >= TARGET_SIGRTMIN) {
268 tinfo->_sifields._rt._pid = info->si_pid;
269 tinfo->_sifields._rt._uid = info->si_uid;
270 /* XXX: potential problem if 64 bit */
271 tinfo->_sifields._rt._sigval.sival_ptr
272 = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
273 }
274 }
275
276 static void tswap_siginfo(target_siginfo_t *tinfo,
277 const target_siginfo_t *info)
278 {
279 int sig = info->si_signo;
280 tinfo->si_signo = tswap32(sig);
281 tinfo->si_errno = tswap32(info->si_errno);
282 tinfo->si_code = tswap32(info->si_code);
283
284 if (sig == TARGET_SIGILL || sig == TARGET_SIGFPE || sig == TARGET_SIGSEGV
285 || sig == TARGET_SIGBUS || sig == TARGET_SIGTRAP) {
286 tinfo->_sifields._sigfault._addr
287 = tswapal(info->_sifields._sigfault._addr);
288 } else if (sig == TARGET_SIGIO) {
289 tinfo->_sifields._sigpoll._band
290 = tswap32(info->_sifields._sigpoll._band);
291 tinfo->_sifields._sigpoll._fd = tswap32(info->_sifields._sigpoll._fd);
292 } else if (sig == TARGET_SIGCHLD) {
293 tinfo->_sifields._sigchld._pid
294 = tswap32(info->_sifields._sigchld._pid);
295 tinfo->_sifields._sigchld._uid
296 = tswap32(info->_sifields._sigchld._uid);
297 tinfo->_sifields._sigchld._status
298 = tswap32(info->_sifields._sigchld._status);
299 tinfo->_sifields._sigchld._utime
300 = tswapal(info->_sifields._sigchld._utime);
301 tinfo->_sifields._sigchld._stime
302 = tswapal(info->_sifields._sigchld._stime);
303 } else if (sig >= TARGET_SIGRTMIN) {
304 tinfo->_sifields._rt._pid = tswap32(info->_sifields._rt._pid);
305 tinfo->_sifields._rt._uid = tswap32(info->_sifields._rt._uid);
306 tinfo->_sifields._rt._sigval.sival_ptr
307 = tswapal(info->_sifields._rt._sigval.sival_ptr);
308 }
309 }
310
311
312 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
313 {
314 host_to_target_siginfo_noswap(tinfo, info);
315 tswap_siginfo(tinfo, tinfo);
316 }
317
318 /* XXX: we support only POSIX RT signals are used. */
319 /* XXX: find a solution for 64 bit (additional malloced data is needed) */
320 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
321 {
322 info->si_signo = tswap32(tinfo->si_signo);
323 info->si_errno = tswap32(tinfo->si_errno);
324 info->si_code = tswap32(tinfo->si_code);
325 info->si_pid = tswap32(tinfo->_sifields._rt._pid);
326 info->si_uid = tswap32(tinfo->_sifields._rt._uid);
327 info->si_value.sival_ptr =
328 (void *)(long)tswapal(tinfo->_sifields._rt._sigval.sival_ptr);
329 }
330
331 static int fatal_signal (int sig)
332 {
333 switch (sig) {
334 case TARGET_SIGCHLD:
335 case TARGET_SIGURG:
336 case TARGET_SIGWINCH:
337 /* Ignored by default. */
338 return 0;
339 case TARGET_SIGCONT:
340 case TARGET_SIGSTOP:
341 case TARGET_SIGTSTP:
342 case TARGET_SIGTTIN:
343 case TARGET_SIGTTOU:
344 /* Job control signals. */
345 return 0;
346 default:
347 return 1;
348 }
349 }
350
351 /* returns 1 if given signal should dump core if not handled */
352 static int core_dump_signal(int sig)
353 {
354 switch (sig) {
355 case TARGET_SIGABRT:
356 case TARGET_SIGFPE:
357 case TARGET_SIGILL:
358 case TARGET_SIGQUIT:
359 case TARGET_SIGSEGV:
360 case TARGET_SIGTRAP:
361 case TARGET_SIGBUS:
362 return (1);
363 default:
364 return (0);
365 }
366 }
367
368 void signal_init(void)
369 {
370 struct sigaction act;
371 struct sigaction oact;
372 int i, j;
373 int host_sig;
374
375 /* generate signal conversion tables */
376 for(i = 1; i < _NSIG; i++) {
377 if (host_to_target_signal_table[i] == 0)
378 host_to_target_signal_table[i] = i;
379 }
380 for(i = 1; i < _NSIG; i++) {
381 j = host_to_target_signal_table[i];
382 target_to_host_signal_table[j] = i;
383 }
384
385 /* set all host signal handlers. ALL signals are blocked during
386 the handlers to serialize them. */
387 memset(sigact_table, 0, sizeof(sigact_table));
388
389 sigfillset(&act.sa_mask);
390 act.sa_flags = SA_SIGINFO;
391 act.sa_sigaction = host_signal_handler;
392 for(i = 1; i <= TARGET_NSIG; i++) {
393 host_sig = target_to_host_signal(i);
394 sigaction(host_sig, NULL, &oact);
395 if (oact.sa_sigaction == (void *)SIG_IGN) {
396 sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
397 } else if (oact.sa_sigaction == (void *)SIG_DFL) {
398 sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
399 }
400 /* If there's already a handler installed then something has
401 gone horribly wrong, so don't even try to handle that case. */
402 /* Install some handlers for our own use. We need at least
403 SIGSEGV and SIGBUS, to detect exceptions. We can not just
404 trap all signals because it affects syscall interrupt
405 behavior. But do trap all default-fatal signals. */
406 if (fatal_signal (i))
407 sigaction(host_sig, &act, NULL);
408 }
409 }
410
411 /* signal queue handling */
412
413 static inline struct sigqueue *alloc_sigqueue(CPUArchState *env)
414 {
415 CPUState *cpu = ENV_GET_CPU(env);
416 TaskState *ts = cpu->opaque;
417 struct sigqueue *q = ts->first_free;
418 if (!q)
419 return NULL;
420 ts->first_free = q->next;
421 return q;
422 }
423
424 static inline void free_sigqueue(CPUArchState *env, struct sigqueue *q)
425 {
426 CPUState *cpu = ENV_GET_CPU(env);
427 TaskState *ts = cpu->opaque;
428
429 q->next = ts->first_free;
430 ts->first_free = q;
431 }
432
433 /* abort execution with signal */
434 static void QEMU_NORETURN force_sig(int target_sig)
435 {
436 CPUState *cpu = thread_cpu;
437 CPUArchState *env = cpu->env_ptr;
438 TaskState *ts = (TaskState *)cpu->opaque;
439 int host_sig, core_dumped = 0;
440 struct sigaction act;
441
442 host_sig = target_to_host_signal(target_sig);
443 trace_user_force_sig(env, target_sig, host_sig);
444 gdb_signalled(env, target_sig);
445
446 /* dump core if supported by target binary format */
447 if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
448 stop_all_tasks();
449 core_dumped =
450 ((*ts->bprm->core_dump)(target_sig, env) == 0);
451 }
452 if (core_dumped) {
453 /* we already dumped the core of target process, we don't want
454 * a coredump of qemu itself */
455 struct rlimit nodump;
456 getrlimit(RLIMIT_CORE, &nodump);
457 nodump.rlim_cur=0;
458 setrlimit(RLIMIT_CORE, &nodump);
459 (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n",
460 target_sig, strsignal(host_sig), "core dumped" );
461 }
462
463 /* The proper exit code for dying from an uncaught signal is
464 * -<signal>. The kernel doesn't allow exit() or _exit() to pass
465 * a negative value. To get the proper exit code we need to
466 * actually die from an uncaught signal. Here the default signal
467 * handler is installed, we send ourself a signal and we wait for
468 * it to arrive. */
469 sigfillset(&act.sa_mask);
470 act.sa_handler = SIG_DFL;
471 act.sa_flags = 0;
472 sigaction(host_sig, &act, NULL);
473
474 /* For some reason raise(host_sig) doesn't send the signal when
475 * statically linked on x86-64. */
476 kill(getpid(), host_sig);
477
478 /* Make sure the signal isn't masked (just reuse the mask inside
479 of act) */
480 sigdelset(&act.sa_mask, host_sig);
481 sigsuspend(&act.sa_mask);
482
483 /* unreachable */
484 abort();
485 }
486
487 /* queue a signal so that it will be send to the virtual CPU as soon
488 as possible */
489 int queue_signal(CPUArchState *env, int sig, target_siginfo_t *info)
490 {
491 CPUState *cpu = ENV_GET_CPU(env);
492 TaskState *ts = cpu->opaque;
493 struct emulated_sigtable *k;
494 struct sigqueue *q, **pq;
495 abi_ulong handler;
496 int queue;
497
498 trace_user_queue_signal(env, sig);
499 k = &ts->sigtab[sig - 1];
500 queue = gdb_queuesig ();
501 handler = sigact_table[sig - 1]._sa_handler;
502
503 if (ts->sigsegv_blocked && sig == TARGET_SIGSEGV) {
504 /* Guest has blocked SIGSEGV but we got one anyway. Assume this
505 * is a forced SIGSEGV (ie one the kernel handles via force_sig_info
506 * because it got a real MMU fault). A blocked SIGSEGV in that
507 * situation is treated as if using the default handler. This is
508 * not correct if some other process has randomly sent us a SIGSEGV
509 * via kill(), but that is not easy to distinguish at this point,
510 * so we assume it doesn't happen.
511 */
512 handler = TARGET_SIG_DFL;
513 }
514
515 if (!queue && handler == TARGET_SIG_DFL) {
516 if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
517 kill(getpid(),SIGSTOP);
518 return 0;
519 } else
520 /* default handler : ignore some signal. The other are fatal */
521 if (sig != TARGET_SIGCHLD &&
522 sig != TARGET_SIGURG &&
523 sig != TARGET_SIGWINCH &&
524 sig != TARGET_SIGCONT) {
525 force_sig(sig);
526 } else {
527 return 0; /* indicate ignored */
528 }
529 } else if (!queue && handler == TARGET_SIG_IGN) {
530 /* ignore signal */
531 return 0;
532 } else if (!queue && handler == TARGET_SIG_ERR) {
533 force_sig(sig);
534 } else {
535 pq = &k->first;
536 if (sig < TARGET_SIGRTMIN) {
537 /* if non real time signal, we queue exactly one signal */
538 if (!k->pending)
539 q = &k->info;
540 else
541 return 0;
542 } else {
543 if (!k->pending) {
544 /* first signal */
545 q = &k->info;
546 } else {
547 q = alloc_sigqueue(env);
548 if (!q)
549 return -EAGAIN;
550 while (*pq != NULL)
551 pq = &(*pq)->next;
552 }
553 }
554 *pq = q;
555 q->info = *info;
556 q->next = NULL;
557 k->pending = 1;
558 /* signal that a new signal is pending */
559 ts->signal_pending = 1;
560 return 1; /* indicates that the signal was queued */
561 }
562 }
563
564 #ifndef HAVE_SAFE_SYSCALL
565 static inline void rewind_if_in_safe_syscall(void *puc)
566 {
567 /* Default version: never rewind */
568 }
569 #endif
570
571 static void host_signal_handler(int host_signum, siginfo_t *info,
572 void *puc)
573 {
574 CPUArchState *env = thread_cpu->env_ptr;
575 int sig;
576 target_siginfo_t tinfo;
577
578 /* the CPU emulator uses some host signals to detect exceptions,
579 we forward to it some signals */
580 if ((host_signum == SIGSEGV || host_signum == SIGBUS)
581 && info->si_code > 0) {
582 if (cpu_signal_handler(host_signum, info, puc))
583 return;
584 }
585
586 /* get target signal number */
587 sig = host_to_target_signal(host_signum);
588 if (sig < 1 || sig > TARGET_NSIG)
589 return;
590 trace_user_host_signal(env, host_signum, sig);
591
592 rewind_if_in_safe_syscall(puc);
593
594 host_to_target_siginfo_noswap(&tinfo, info);
595 if (queue_signal(env, sig, &tinfo) == 1) {
596 /* interrupt the virtual CPU as soon as possible */
597 cpu_exit(thread_cpu);
598 }
599 }
600
601 /* do_sigaltstack() returns target values and errnos. */
602 /* compare linux/kernel/signal.c:do_sigaltstack() */
603 abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp)
604 {
605 int ret;
606 struct target_sigaltstack oss;
607
608 /* XXX: test errors */
609 if(uoss_addr)
610 {
611 __put_user(target_sigaltstack_used.ss_sp, &oss.ss_sp);
612 __put_user(target_sigaltstack_used.ss_size, &oss.ss_size);
613 __put_user(sas_ss_flags(sp), &oss.ss_flags);
614 }
615
616 if(uss_addr)
617 {
618 struct target_sigaltstack *uss;
619 struct target_sigaltstack ss;
620 size_t minstacksize = TARGET_MINSIGSTKSZ;
621
622 #if defined(TARGET_PPC64)
623 /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
624 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
625 if (get_ppc64_abi(image) > 1) {
626 minstacksize = 4096;
627 }
628 #endif
629
630 ret = -TARGET_EFAULT;
631 if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
632 goto out;
633 }
634 __get_user(ss.ss_sp, &uss->ss_sp);
635 __get_user(ss.ss_size, &uss->ss_size);
636 __get_user(ss.ss_flags, &uss->ss_flags);
637 unlock_user_struct(uss, uss_addr, 0);
638
639 ret = -TARGET_EPERM;
640 if (on_sig_stack(sp))
641 goto out;
642
643 ret = -TARGET_EINVAL;
644 if (ss.ss_flags != TARGET_SS_DISABLE
645 && ss.ss_flags != TARGET_SS_ONSTACK
646 && ss.ss_flags != 0)
647 goto out;
648
649 if (ss.ss_flags == TARGET_SS_DISABLE) {
650 ss.ss_size = 0;
651 ss.ss_sp = 0;
652 } else {
653 ret = -TARGET_ENOMEM;
654 if (ss.ss_size < minstacksize) {
655 goto out;
656 }
657 }
658
659 target_sigaltstack_used.ss_sp = ss.ss_sp;
660 target_sigaltstack_used.ss_size = ss.ss_size;
661 }
662
663 if (uoss_addr) {
664 ret = -TARGET_EFAULT;
665 if (copy_to_user(uoss_addr, &oss, sizeof(oss)))
666 goto out;
667 }
668
669 ret = 0;
670 out:
671 return ret;
672 }
673
674 /* do_sigaction() return host values and errnos */
675 int do_sigaction(int sig, const struct target_sigaction *act,
676 struct target_sigaction *oact)
677 {
678 struct target_sigaction *k;
679 struct sigaction act1;
680 int host_sig;
681 int ret = 0;
682
683 if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP)
684 return -EINVAL;
685 k = &sigact_table[sig - 1];
686 if (oact) {
687 __put_user(k->_sa_handler, &oact->_sa_handler);
688 __put_user(k->sa_flags, &oact->sa_flags);
689 #if !defined(TARGET_MIPS)
690 __put_user(k->sa_restorer, &oact->sa_restorer);
691 #endif
692 /* Not swapped. */
693 oact->sa_mask = k->sa_mask;
694 }
695 if (act) {
696 /* FIXME: This is not threadsafe. */
697 __get_user(k->_sa_handler, &act->_sa_handler);
698 __get_user(k->sa_flags, &act->sa_flags);
699 #if !defined(TARGET_MIPS)
700 __get_user(k->sa_restorer, &act->sa_restorer);
701 #endif
702 /* To be swapped in target_to_host_sigset. */
703 k->sa_mask = act->sa_mask;
704
705 /* we update the host linux signal state */
706 host_sig = target_to_host_signal(sig);
707 if (host_sig != SIGSEGV && host_sig != SIGBUS) {
708 sigfillset(&act1.sa_mask);
709 act1.sa_flags = SA_SIGINFO;
710 if (k->sa_flags & TARGET_SA_RESTART)
711 act1.sa_flags |= SA_RESTART;
712 /* NOTE: it is important to update the host kernel signal
713 ignore state to avoid getting unexpected interrupted
714 syscalls */
715 if (k->_sa_handler == TARGET_SIG_IGN) {
716 act1.sa_sigaction = (void *)SIG_IGN;
717 } else if (k->_sa_handler == TARGET_SIG_DFL) {
718 if (fatal_signal (sig))
719 act1.sa_sigaction = host_signal_handler;
720 else
721 act1.sa_sigaction = (void *)SIG_DFL;
722 } else {
723 act1.sa_sigaction = host_signal_handler;
724 }
725 ret = sigaction(host_sig, &act1, NULL);
726 }
727 }
728 return ret;
729 }
730
731 #if defined(TARGET_I386) && TARGET_ABI_BITS == 32
732
733 /* from the Linux kernel */
734
735 struct target_fpreg {
736 uint16_t significand[4];
737 uint16_t exponent;
738 };
739
740 struct target_fpxreg {
741 uint16_t significand[4];
742 uint16_t exponent;
743 uint16_t padding[3];
744 };
745
746 struct target_xmmreg {
747 abi_ulong element[4];
748 };
749
750 struct target_fpstate {
751 /* Regular FPU environment */
752 abi_ulong cw;
753 abi_ulong sw;
754 abi_ulong tag;
755 abi_ulong ipoff;
756 abi_ulong cssel;
757 abi_ulong dataoff;
758 abi_ulong datasel;
759 struct target_fpreg _st[8];
760 uint16_t status;
761 uint16_t magic; /* 0xffff = regular FPU data only */
762
763 /* FXSR FPU environment */
764 abi_ulong _fxsr_env[6]; /* FXSR FPU env is ignored */
765 abi_ulong mxcsr;
766 abi_ulong reserved;
767 struct target_fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */
768 struct target_xmmreg _xmm[8];
769 abi_ulong padding[56];
770 };
771
772 #define X86_FXSR_MAGIC 0x0000
773
774 struct target_sigcontext {
775 uint16_t gs, __gsh;
776 uint16_t fs, __fsh;
777 uint16_t es, __esh;
778 uint16_t ds, __dsh;
779 abi_ulong edi;
780 abi_ulong esi;
781 abi_ulong ebp;
782 abi_ulong esp;
783 abi_ulong ebx;
784 abi_ulong edx;
785 abi_ulong ecx;
786 abi_ulong eax;
787 abi_ulong trapno;
788 abi_ulong err;
789 abi_ulong eip;
790 uint16_t cs, __csh;
791 abi_ulong eflags;
792 abi_ulong esp_at_signal;
793 uint16_t ss, __ssh;
794 abi_ulong fpstate; /* pointer */
795 abi_ulong oldmask;
796 abi_ulong cr2;
797 };
798
799 struct target_ucontext {
800 abi_ulong tuc_flags;
801 abi_ulong tuc_link;
802 target_stack_t tuc_stack;
803 struct target_sigcontext tuc_mcontext;
804 target_sigset_t tuc_sigmask; /* mask last for extensibility */
805 };
806
807 struct sigframe
808 {
809 abi_ulong pretcode;
810 int sig;
811 struct target_sigcontext sc;
812 struct target_fpstate fpstate;
813 abi_ulong extramask[TARGET_NSIG_WORDS-1];
814 char retcode[8];
815 };
816
817 struct rt_sigframe
818 {
819 abi_ulong pretcode;
820 int sig;
821 abi_ulong pinfo;
822 abi_ulong puc;
823 struct target_siginfo info;
824 struct target_ucontext uc;
825 struct target_fpstate fpstate;
826 char retcode[8];
827 };
828
829 /*
830 * Set up a signal frame.
831 */
832
833 /* XXX: save x87 state */
834 static void setup_sigcontext(struct target_sigcontext *sc,
835 struct target_fpstate *fpstate, CPUX86State *env, abi_ulong mask,
836 abi_ulong fpstate_addr)
837 {
838 CPUState *cs = CPU(x86_env_get_cpu(env));
839 uint16_t magic;
840
841 /* already locked in setup_frame() */
842 __put_user(env->segs[R_GS].selector, (unsigned int *)&sc->gs);
843 __put_user(env->segs[R_FS].selector, (unsigned int *)&sc->fs);
844 __put_user(env->segs[R_ES].selector, (unsigned int *)&sc->es);
845 __put_user(env->segs[R_DS].selector, (unsigned int *)&sc->ds);
846 __put_user(env->regs[R_EDI], &sc->edi);
847 __put_user(env->regs[R_ESI], &sc->esi);
848 __put_user(env->regs[R_EBP], &sc->ebp);
849 __put_user(env->regs[R_ESP], &sc->esp);
850 __put_user(env->regs[R_EBX], &sc->ebx);
851 __put_user(env->regs[R_EDX], &sc->edx);
852 __put_user(env->regs[R_ECX], &sc->ecx);
853 __put_user(env->regs[R_EAX], &sc->eax);
854 __put_user(cs->exception_index, &sc->trapno);
855 __put_user(env->error_code, &sc->err);
856 __put_user(env->eip, &sc->eip);
857 __put_user(env->segs[R_CS].selector, (unsigned int *)&sc->cs);
858 __put_user(env->eflags, &sc->eflags);
859 __put_user(env->regs[R_ESP], &sc->esp_at_signal);
860 __put_user(env->segs[R_SS].selector, (unsigned int *)&sc->ss);
861
862 cpu_x86_fsave(env, fpstate_addr, 1);
863 fpstate->status = fpstate->sw;
864 magic = 0xffff;
865 __put_user(magic, &fpstate->magic);
866 __put_user(fpstate_addr, &sc->fpstate);
867
868 /* non-iBCS2 extensions.. */
869 __put_user(mask, &sc->oldmask);
870 __put_user(env->cr[2], &sc->cr2);
871 }
872
873 /*
874 * Determine which stack to use..
875 */
876
877 static inline abi_ulong
878 get_sigframe(struct target_sigaction *ka, CPUX86State *env, size_t frame_size)
879 {
880 unsigned long esp;
881
882 /* Default to using normal stack */
883 esp = env->regs[R_ESP];
884 /* This is the X/Open sanctioned signal stack switching. */
885 if (ka->sa_flags & TARGET_SA_ONSTACK) {
886 if (sas_ss_flags(esp) == 0) {
887 esp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
888 }
889 } else {
890
891 /* This is the legacy signal stack switching. */
892 if ((env->segs[R_SS].selector & 0xffff) != __USER_DS &&
893 !(ka->sa_flags & TARGET_SA_RESTORER) &&
894 ka->sa_restorer) {
895 esp = (unsigned long) ka->sa_restorer;
896 }
897 }
898 return (esp - frame_size) & -8ul;
899 }
900
901 /* compare linux/arch/i386/kernel/signal.c:setup_frame() */
902 static void setup_frame(int sig, struct target_sigaction *ka,
903 target_sigset_t *set, CPUX86State *env)
904 {
905 abi_ulong frame_addr;
906 struct sigframe *frame;
907 int i;
908
909 frame_addr = get_sigframe(ka, env, sizeof(*frame));
910 trace_user_setup_frame(env, frame_addr);
911
912 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
913 goto give_sigsegv;
914
915 __put_user(sig, &frame->sig);
916
917 setup_sigcontext(&frame->sc, &frame->fpstate, env, set->sig[0],
918 frame_addr + offsetof(struct sigframe, fpstate));
919
920 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
921 __put_user(set->sig[i], &frame->extramask[i - 1]);
922 }
923
924 /* Set up to return from userspace. If provided, use a stub
925 already in userspace. */
926 if (ka->sa_flags & TARGET_SA_RESTORER) {
927 __put_user(ka->sa_restorer, &frame->pretcode);
928 } else {
929 uint16_t val16;
930 abi_ulong retcode_addr;
931 retcode_addr = frame_addr + offsetof(struct sigframe, retcode);
932 __put_user(retcode_addr, &frame->pretcode);
933 /* This is popl %eax ; movl $,%eax ; int $0x80 */
934 val16 = 0xb858;
935 __put_user(val16, (uint16_t *)(frame->retcode+0));
936 __put_user(TARGET_NR_sigreturn, (int *)(frame->retcode+2));
937 val16 = 0x80cd;
938 __put_user(val16, (uint16_t *)(frame->retcode+6));
939 }
940
941
942 /* Set up registers for signal handler */
943 env->regs[R_ESP] = frame_addr;
944 env->eip = ka->_sa_handler;
945
946 cpu_x86_load_seg(env, R_DS, __USER_DS);
947 cpu_x86_load_seg(env, R_ES, __USER_DS);
948 cpu_x86_load_seg(env, R_SS, __USER_DS);
949 cpu_x86_load_seg(env, R_CS, __USER_CS);
950 env->eflags &= ~TF_MASK;
951
952 unlock_user_struct(frame, frame_addr, 1);
953
954 return;
955
956 give_sigsegv:
957 if (sig == TARGET_SIGSEGV) {
958 ka->_sa_handler = TARGET_SIG_DFL;
959 }
960 force_sig(TARGET_SIGSEGV /* , current */);
961 }
962
963 /* compare linux/arch/i386/kernel/signal.c:setup_rt_frame() */
964 static void setup_rt_frame(int sig, struct target_sigaction *ka,
965 target_siginfo_t *info,
966 target_sigset_t *set, CPUX86State *env)
967 {
968 abi_ulong frame_addr, addr;
969 struct rt_sigframe *frame;
970 int i;
971
972 frame_addr = get_sigframe(ka, env, sizeof(*frame));
973 trace_user_setup_rt_frame(env, frame_addr);
974
975 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
976 goto give_sigsegv;
977
978 __put_user(sig, &frame->sig);
979 addr = frame_addr + offsetof(struct rt_sigframe, info);
980 __put_user(addr, &frame->pinfo);
981 addr = frame_addr + offsetof(struct rt_sigframe, uc);
982 __put_user(addr, &frame->puc);
983 tswap_siginfo(&frame->info, info);
984
985 /* Create the ucontext. */
986 __put_user(0, &frame->uc.tuc_flags);
987 __put_user(0, &frame->uc.tuc_link);
988 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
989 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
990 &frame->uc.tuc_stack.ss_flags);
991 __put_user(target_sigaltstack_used.ss_size,
992 &frame->uc.tuc_stack.ss_size);
993 setup_sigcontext(&frame->uc.tuc_mcontext, &frame->fpstate, env,
994 set->sig[0], frame_addr + offsetof(struct rt_sigframe, fpstate));
995
996 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
997 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
998 }
999
1000 /* Set up to return from userspace. If provided, use a stub
1001 already in userspace. */
1002 if (ka->sa_flags & TARGET_SA_RESTORER) {
1003 __put_user(ka->sa_restorer, &frame->pretcode);
1004 } else {
1005 uint16_t val16;
1006 addr = frame_addr + offsetof(struct rt_sigframe, retcode);
1007 __put_user(addr, &frame->pretcode);
1008 /* This is movl $,%eax ; int $0x80 */
1009 __put_user(0xb8, (char *)(frame->retcode+0));
1010 __put_user(TARGET_NR_rt_sigreturn, (int *)(frame->retcode+1));
1011 val16 = 0x80cd;
1012 __put_user(val16, (uint16_t *)(frame->retcode+5));
1013 }
1014
1015 /* Set up registers for signal handler */
1016 env->regs[R_ESP] = frame_addr;
1017 env->eip = ka->_sa_handler;
1018
1019 cpu_x86_load_seg(env, R_DS, __USER_DS);
1020 cpu_x86_load_seg(env, R_ES, __USER_DS);
1021 cpu_x86_load_seg(env, R_SS, __USER_DS);
1022 cpu_x86_load_seg(env, R_CS, __USER_CS);
1023 env->eflags &= ~TF_MASK;
1024
1025 unlock_user_struct(frame, frame_addr, 1);
1026
1027 return;
1028
1029 give_sigsegv:
1030 if (sig == TARGET_SIGSEGV) {
1031 ka->_sa_handler = TARGET_SIG_DFL;
1032 }
1033 force_sig(TARGET_SIGSEGV /* , current */);
1034 }
1035
1036 static int
1037 restore_sigcontext(CPUX86State *env, struct target_sigcontext *sc)
1038 {
1039 unsigned int err = 0;
1040 abi_ulong fpstate_addr;
1041 unsigned int tmpflags;
1042
1043 cpu_x86_load_seg(env, R_GS, tswap16(sc->gs));
1044 cpu_x86_load_seg(env, R_FS, tswap16(sc->fs));
1045 cpu_x86_load_seg(env, R_ES, tswap16(sc->es));
1046 cpu_x86_load_seg(env, R_DS, tswap16(sc->ds));
1047
1048 env->regs[R_EDI] = tswapl(sc->edi);
1049 env->regs[R_ESI] = tswapl(sc->esi);
1050 env->regs[R_EBP] = tswapl(sc->ebp);
1051 env->regs[R_ESP] = tswapl(sc->esp);
1052 env->regs[R_EBX] = tswapl(sc->ebx);
1053 env->regs[R_EDX] = tswapl(sc->edx);
1054 env->regs[R_ECX] = tswapl(sc->ecx);
1055 env->regs[R_EAX] = tswapl(sc->eax);
1056 env->eip = tswapl(sc->eip);
1057
1058 cpu_x86_load_seg(env, R_CS, lduw_p(&sc->cs) | 3);
1059 cpu_x86_load_seg(env, R_SS, lduw_p(&sc->ss) | 3);
1060
1061 tmpflags = tswapl(sc->eflags);
1062 env->eflags = (env->eflags & ~0x40DD5) | (tmpflags & 0x40DD5);
1063 // regs->orig_eax = -1; /* disable syscall checks */
1064
1065 fpstate_addr = tswapl(sc->fpstate);
1066 if (fpstate_addr != 0) {
1067 if (!access_ok(VERIFY_READ, fpstate_addr,
1068 sizeof(struct target_fpstate)))
1069 goto badframe;
1070 cpu_x86_frstor(env, fpstate_addr, 1);
1071 }
1072
1073 return err;
1074 badframe:
1075 return 1;
1076 }
1077
1078 long do_sigreturn(CPUX86State *env)
1079 {
1080 struct sigframe *frame;
1081 abi_ulong frame_addr = env->regs[R_ESP] - 8;
1082 target_sigset_t target_set;
1083 sigset_t set;
1084 int i;
1085
1086 trace_user_do_sigreturn(env, frame_addr);
1087 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1088 goto badframe;
1089 /* set blocked signals */
1090 __get_user(target_set.sig[0], &frame->sc.oldmask);
1091 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1092 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
1093 }
1094
1095 target_to_host_sigset_internal(&set, &target_set);
1096 do_sigprocmask(SIG_SETMASK, &set, NULL);
1097
1098 /* restore registers */
1099 if (restore_sigcontext(env, &frame->sc))
1100 goto badframe;
1101 unlock_user_struct(frame, frame_addr, 0);
1102 return -TARGET_QEMU_ESIGRETURN;
1103
1104 badframe:
1105 unlock_user_struct(frame, frame_addr, 0);
1106 force_sig(TARGET_SIGSEGV);
1107 return 0;
1108 }
1109
1110 long do_rt_sigreturn(CPUX86State *env)
1111 {
1112 abi_ulong frame_addr;
1113 struct rt_sigframe *frame;
1114 sigset_t set;
1115
1116 frame_addr = env->regs[R_ESP] - 4;
1117 trace_user_do_rt_sigreturn(env, frame_addr);
1118 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1119 goto badframe;
1120 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
1121 do_sigprocmask(SIG_SETMASK, &set, NULL);
1122
1123 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) {
1124 goto badframe;
1125 }
1126
1127 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe, uc.tuc_stack), 0,
1128 get_sp_from_cpustate(env)) == -EFAULT) {
1129 goto badframe;
1130 }
1131
1132 unlock_user_struct(frame, frame_addr, 0);
1133 return -TARGET_QEMU_ESIGRETURN;
1134
1135 badframe:
1136 unlock_user_struct(frame, frame_addr, 0);
1137 force_sig(TARGET_SIGSEGV);
1138 return 0;
1139 }
1140
1141 #elif defined(TARGET_AARCH64)
1142
1143 struct target_sigcontext {
1144 uint64_t fault_address;
1145 /* AArch64 registers */
1146 uint64_t regs[31];
1147 uint64_t sp;
1148 uint64_t pc;
1149 uint64_t pstate;
1150 /* 4K reserved for FP/SIMD state and future expansion */
1151 char __reserved[4096] __attribute__((__aligned__(16)));
1152 };
1153
1154 struct target_ucontext {
1155 abi_ulong tuc_flags;
1156 abi_ulong tuc_link;
1157 target_stack_t tuc_stack;
1158 target_sigset_t tuc_sigmask;
1159 /* glibc uses a 1024-bit sigset_t */
1160 char __unused[1024 / 8 - sizeof(target_sigset_t)];
1161 /* last for future expansion */
1162 struct target_sigcontext tuc_mcontext;
1163 };
1164
1165 /*
1166 * Header to be used at the beginning of structures extending the user
1167 * context. Such structures must be placed after the rt_sigframe on the stack
1168 * and be 16-byte aligned. The last structure must be a dummy one with the
1169 * magic and size set to 0.
1170 */
1171 struct target_aarch64_ctx {
1172 uint32_t magic;
1173 uint32_t size;
1174 };
1175
1176 #define TARGET_FPSIMD_MAGIC 0x46508001
1177
1178 struct target_fpsimd_context {
1179 struct target_aarch64_ctx head;
1180 uint32_t fpsr;
1181 uint32_t fpcr;
1182 uint64_t vregs[32 * 2]; /* really uint128_t vregs[32] */
1183 };
1184
1185 /*
1186 * Auxiliary context saved in the sigcontext.__reserved array. Not exported to
1187 * user space as it will change with the addition of new context. User space
1188 * should check the magic/size information.
1189 */
1190 struct target_aux_context {
1191 struct target_fpsimd_context fpsimd;
1192 /* additional context to be added before "end" */
1193 struct target_aarch64_ctx end;
1194 };
1195
1196 struct target_rt_sigframe {
1197 struct target_siginfo info;
1198 struct target_ucontext uc;
1199 uint64_t fp;
1200 uint64_t lr;
1201 uint32_t tramp[2];
1202 };
1203
1204 static int target_setup_sigframe(struct target_rt_sigframe *sf,
1205 CPUARMState *env, target_sigset_t *set)
1206 {
1207 int i;
1208 struct target_aux_context *aux =
1209 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved;
1210
1211 /* set up the stack frame for unwinding */
1212 __put_user(env->xregs[29], &sf->fp);
1213 __put_user(env->xregs[30], &sf->lr);
1214
1215 for (i = 0; i < 31; i++) {
1216 __put_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]);
1217 }
1218 __put_user(env->xregs[31], &sf->uc.tuc_mcontext.sp);
1219 __put_user(env->pc, &sf->uc.tuc_mcontext.pc);
1220 __put_user(pstate_read(env), &sf->uc.tuc_mcontext.pstate);
1221
1222 __put_user(env->exception.vaddress, &sf->uc.tuc_mcontext.fault_address);
1223
1224 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
1225 __put_user(set->sig[i], &sf->uc.tuc_sigmask.sig[i]);
1226 }
1227
1228 for (i = 0; i < 32; i++) {
1229 #ifdef TARGET_WORDS_BIGENDIAN
1230 __put_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2 + 1]);
1231 __put_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2]);
1232 #else
1233 __put_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2]);
1234 __put_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2 + 1]);
1235 #endif
1236 }
1237 __put_user(vfp_get_fpsr(env), &aux->fpsimd.fpsr);
1238 __put_user(vfp_get_fpcr(env), &aux->fpsimd.fpcr);
1239 __put_user(TARGET_FPSIMD_MAGIC, &aux->fpsimd.head.magic);
1240 __put_user(sizeof(struct target_fpsimd_context),
1241 &aux->fpsimd.head.size);
1242
1243 /* set the "end" magic */
1244 __put_user(0, &aux->end.magic);
1245 __put_user(0, &aux->end.size);
1246
1247 return 0;
1248 }
1249
1250 static int target_restore_sigframe(CPUARMState *env,
1251 struct target_rt_sigframe *sf)
1252 {
1253 sigset_t set;
1254 int i;
1255 struct target_aux_context *aux =
1256 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved;
1257 uint32_t magic, size, fpsr, fpcr;
1258 uint64_t pstate;
1259
1260 target_to_host_sigset(&set, &sf->uc.tuc_sigmask);
1261 do_sigprocmask(SIG_SETMASK, &set, NULL);
1262
1263 for (i = 0; i < 31; i++) {
1264 __get_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]);
1265 }
1266
1267 __get_user(env->xregs[31], &sf->uc.tuc_mcontext.sp);
1268 __get_user(env->pc, &sf->uc.tuc_mcontext.pc);
1269 __get_user(pstate, &sf->uc.tuc_mcontext.pstate);
1270 pstate_write(env, pstate);
1271
1272 __get_user(magic, &aux->fpsimd.head.magic);
1273 __get_user(size, &aux->fpsimd.head.size);
1274
1275 if (magic != TARGET_FPSIMD_MAGIC
1276 || size != sizeof(struct target_fpsimd_context)) {
1277 return 1;
1278 }
1279
1280 for (i = 0; i < 32; i++) {
1281 #ifdef TARGET_WORDS_BIGENDIAN
1282 __get_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2 + 1]);
1283 __get_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2]);
1284 #else
1285 __get_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2]);
1286 __get_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2 + 1]);
1287 #endif
1288 }
1289 __get_user(fpsr, &aux->fpsimd.fpsr);
1290 vfp_set_fpsr(env, fpsr);
1291 __get_user(fpcr, &aux->fpsimd.fpcr);
1292 vfp_set_fpcr(env, fpcr);
1293
1294 return 0;
1295 }
1296
1297 static abi_ulong get_sigframe(struct target_sigaction *ka, CPUARMState *env)
1298 {
1299 abi_ulong sp;
1300
1301 sp = env->xregs[31];
1302
1303 /*
1304 * This is the X/Open sanctioned signal stack switching.
1305 */
1306 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
1307 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1308 }
1309
1310 sp = (sp - sizeof(struct target_rt_sigframe)) & ~15;
1311
1312 return sp;
1313 }
1314
1315 static void target_setup_frame(int usig, struct target_sigaction *ka,
1316 target_siginfo_t *info, target_sigset_t *set,
1317 CPUARMState *env)
1318 {
1319 struct target_rt_sigframe *frame;
1320 abi_ulong frame_addr, return_addr;
1321
1322 frame_addr = get_sigframe(ka, env);
1323 trace_user_setup_frame(env, frame_addr);
1324 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1325 goto give_sigsegv;
1326 }
1327
1328 __put_user(0, &frame->uc.tuc_flags);
1329 __put_user(0, &frame->uc.tuc_link);
1330
1331 __put_user(target_sigaltstack_used.ss_sp,
1332 &frame->uc.tuc_stack.ss_sp);
1333 __put_user(sas_ss_flags(env->xregs[31]),
1334 &frame->uc.tuc_stack.ss_flags);
1335 __put_user(target_sigaltstack_used.ss_size,
1336 &frame->uc.tuc_stack.ss_size);
1337 target_setup_sigframe(frame, env, set);
1338 if (ka->sa_flags & TARGET_SA_RESTORER) {
1339 return_addr = ka->sa_restorer;
1340 } else {
1341 /* mov x8,#__NR_rt_sigreturn; svc #0 */
1342 __put_user(0xd2801168, &frame->tramp[0]);
1343 __put_user(0xd4000001, &frame->tramp[1]);
1344 return_addr = frame_addr + offsetof(struct target_rt_sigframe, tramp);
1345 }
1346 env->xregs[0] = usig;
1347 env->xregs[31] = frame_addr;
1348 env->xregs[29] = env->xregs[31] + offsetof(struct target_rt_sigframe, fp);
1349 env->pc = ka->_sa_handler;
1350 env->xregs[30] = return_addr;
1351 if (info) {
1352 tswap_siginfo(&frame->info, info);
1353 env->xregs[1] = frame_addr + offsetof(struct target_rt_sigframe, info);
1354 env->xregs[2] = frame_addr + offsetof(struct target_rt_sigframe, uc);
1355 }
1356
1357 unlock_user_struct(frame, frame_addr, 1);
1358 return;
1359
1360 give_sigsegv:
1361 unlock_user_struct(frame, frame_addr, 1);
1362 force_sig(TARGET_SIGSEGV);
1363 }
1364
1365 static void setup_rt_frame(int sig, struct target_sigaction *ka,
1366 target_siginfo_t *info, target_sigset_t *set,
1367 CPUARMState *env)
1368 {
1369 target_setup_frame(sig, ka, info, set, env);
1370 }
1371
1372 static void setup_frame(int sig, struct target_sigaction *ka,
1373 target_sigset_t *set, CPUARMState *env)
1374 {
1375 target_setup_frame(sig, ka, 0, set, env);
1376 }
1377
1378 long do_rt_sigreturn(CPUARMState *env)
1379 {
1380 struct target_rt_sigframe *frame = NULL;
1381 abi_ulong frame_addr = env->xregs[31];
1382
1383 trace_user_do_rt_sigreturn(env, frame_addr);
1384 if (frame_addr & 15) {
1385 goto badframe;
1386 }
1387
1388 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
1389 goto badframe;
1390 }
1391
1392 if (target_restore_sigframe(env, frame)) {
1393 goto badframe;
1394 }
1395
1396 if (do_sigaltstack(frame_addr +
1397 offsetof(struct target_rt_sigframe, uc.tuc_stack),
1398 0, get_sp_from_cpustate(env)) == -EFAULT) {
1399 goto badframe;
1400 }
1401
1402 unlock_user_struct(frame, frame_addr, 0);
1403 return -TARGET_QEMU_ESIGRETURN;
1404
1405 badframe:
1406 unlock_user_struct(frame, frame_addr, 0);
1407 force_sig(TARGET_SIGSEGV);
1408 return 0;
1409 }
1410
1411 long do_sigreturn(CPUARMState *env)
1412 {
1413 return do_rt_sigreturn(env);
1414 }
1415
1416 #elif defined(TARGET_ARM)
1417
1418 struct target_sigcontext {
1419 abi_ulong trap_no;
1420 abi_ulong error_code;
1421 abi_ulong oldmask;
1422 abi_ulong arm_r0;
1423 abi_ulong arm_r1;
1424 abi_ulong arm_r2;
1425 abi_ulong arm_r3;
1426 abi_ulong arm_r4;
1427 abi_ulong arm_r5;
1428 abi_ulong arm_r6;
1429 abi_ulong arm_r7;
1430 abi_ulong arm_r8;
1431 abi_ulong arm_r9;
1432 abi_ulong arm_r10;
1433 abi_ulong arm_fp;
1434 abi_ulong arm_ip;
1435 abi_ulong arm_sp;
1436 abi_ulong arm_lr;
1437 abi_ulong arm_pc;
1438 abi_ulong arm_cpsr;
1439 abi_ulong fault_address;
1440 };
1441
1442 struct target_ucontext_v1 {
1443 abi_ulong tuc_flags;
1444 abi_ulong tuc_link;
1445 target_stack_t tuc_stack;
1446 struct target_sigcontext tuc_mcontext;
1447 target_sigset_t tuc_sigmask; /* mask last for extensibility */
1448 };
1449
1450 struct target_ucontext_v2 {
1451 abi_ulong tuc_flags;
1452 abi_ulong tuc_link;
1453 target_stack_t tuc_stack;
1454 struct target_sigcontext tuc_mcontext;
1455 target_sigset_t tuc_sigmask; /* mask last for extensibility */
1456 char __unused[128 - sizeof(target_sigset_t)];
1457 abi_ulong tuc_regspace[128] __attribute__((__aligned__(8)));
1458 };
1459
1460 struct target_user_vfp {
1461 uint64_t fpregs[32];
1462 abi_ulong fpscr;
1463 };
1464
1465 struct target_user_vfp_exc {
1466 abi_ulong fpexc;
1467 abi_ulong fpinst;
1468 abi_ulong fpinst2;
1469 };
1470
1471 struct target_vfp_sigframe {
1472 abi_ulong magic;
1473 abi_ulong size;
1474 struct target_user_vfp ufp;
1475 struct target_user_vfp_exc ufp_exc;
1476 } __attribute__((__aligned__(8)));
1477
1478 struct target_iwmmxt_sigframe {
1479 abi_ulong magic;
1480 abi_ulong size;
1481 uint64_t regs[16];
1482 /* Note that not all the coprocessor control registers are stored here */
1483 uint32_t wcssf;
1484 uint32_t wcasf;
1485 uint32_t wcgr0;
1486 uint32_t wcgr1;
1487 uint32_t wcgr2;
1488 uint32_t wcgr3;
1489 } __attribute__((__aligned__(8)));
1490
1491 #define TARGET_VFP_MAGIC 0x56465001
1492 #define TARGET_IWMMXT_MAGIC 0x12ef842a
1493
1494 struct sigframe_v1
1495 {
1496 struct target_sigcontext sc;
1497 abi_ulong extramask[TARGET_NSIG_WORDS-1];
1498 abi_ulong retcode;
1499 };
1500
1501 struct sigframe_v2
1502 {
1503 struct target_ucontext_v2 uc;
1504 abi_ulong retcode;
1505 };
1506
1507 struct rt_sigframe_v1
1508 {
1509 abi_ulong pinfo;
1510 abi_ulong puc;
1511 struct target_siginfo info;
1512 struct target_ucontext_v1 uc;
1513 abi_ulong retcode;
1514 };
1515
1516 struct rt_sigframe_v2
1517 {
1518 struct target_siginfo info;
1519 struct target_ucontext_v2 uc;
1520 abi_ulong retcode;
1521 };
1522
1523 #define TARGET_CONFIG_CPU_32 1
1524
1525 /*
1526 * For ARM syscalls, we encode the syscall number into the instruction.
1527 */
1528 #define SWI_SYS_SIGRETURN (0xef000000|(TARGET_NR_sigreturn + ARM_SYSCALL_BASE))
1529 #define SWI_SYS_RT_SIGRETURN (0xef000000|(TARGET_NR_rt_sigreturn + ARM_SYSCALL_BASE))
1530
1531 /*
1532 * For Thumb syscalls, we pass the syscall number via r7. We therefore
1533 * need two 16-bit instructions.
1534 */
1535 #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_sigreturn))
1536 #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_rt_sigreturn))
1537
1538 static const abi_ulong retcodes[4] = {
1539 SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN,
1540 SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN
1541 };
1542
1543
1544 static inline int valid_user_regs(CPUARMState *regs)
1545 {
1546 return 1;
1547 }
1548
1549 static void
1550 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/
1551 CPUARMState *env, abi_ulong mask)
1552 {
1553 __put_user(env->regs[0], &sc->arm_r0);
1554 __put_user(env->regs[1], &sc->arm_r1);
1555 __put_user(env->regs[2], &sc->arm_r2);
1556 __put_user(env->regs[3], &sc->arm_r3);
1557 __put_user(env->regs[4], &sc->arm_r4);
1558 __put_user(env->regs[5], &sc->arm_r5);
1559 __put_user(env->regs[6], &sc->arm_r6);
1560 __put_user(env->regs[7], &sc->arm_r7);
1561 __put_user(env->regs[8], &sc->arm_r8);
1562 __put_user(env->regs[9], &sc->arm_r9);
1563 __put_user(env->regs[10], &sc->arm_r10);
1564 __put_user(env->regs[11], &sc->arm_fp);
1565 __put_user(env->regs[12], &sc->arm_ip);
1566 __put_user(env->regs[13], &sc->arm_sp);
1567 __put_user(env->regs[14], &sc->arm_lr);
1568 __put_user(env->regs[15], &sc->arm_pc);
1569 #ifdef TARGET_CONFIG_CPU_32
1570 __put_user(cpsr_read(env), &sc->arm_cpsr);
1571 #endif
1572
1573 __put_user(/* current->thread.trap_no */ 0, &sc->trap_no);
1574 __put_user(/* current->thread.error_code */ 0, &sc->error_code);
1575 __put_user(/* current->thread.address */ 0, &sc->fault_address);
1576 __put_user(mask, &sc->oldmask);
1577 }
1578
1579 static inline abi_ulong
1580 get_sigframe(struct target_sigaction *ka, CPUARMState *regs, int framesize)
1581 {
1582 unsigned long sp = regs->regs[13];
1583
1584 /*
1585 * This is the X/Open sanctioned signal stack switching.
1586 */
1587 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
1588 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1589 }
1590 /*
1591 * ATPCS B01 mandates 8-byte alignment
1592 */
1593 return (sp - framesize) & ~7;
1594 }
1595
1596 static void
1597 setup_return(CPUARMState *env, struct target_sigaction *ka,
1598 abi_ulong *rc, abi_ulong frame_addr, int usig, abi_ulong rc_addr)
1599 {
1600 abi_ulong handler = ka->_sa_handler;
1601 abi_ulong retcode;
1602 int thumb = handler & 1;
1603 uint32_t cpsr = cpsr_read(env);
1604
1605 cpsr &= ~CPSR_IT;
1606 if (thumb) {
1607 cpsr |= CPSR_T;
1608 } else {
1609 cpsr &= ~CPSR_T;
1610 }
1611
1612 if (ka->sa_flags & TARGET_SA_RESTORER) {
1613 retcode = ka->sa_restorer;
1614 } else {
1615 unsigned int idx = thumb;
1616
1617 if (ka->sa_flags & TARGET_SA_SIGINFO) {
1618 idx += 2;
1619 }
1620
1621 __put_user(retcodes[idx], rc);
1622
1623 retcode = rc_addr + thumb;
1624 }
1625
1626 env->regs[0] = usig;
1627 env->regs[13] = frame_addr;
1628 env->regs[14] = retcode;
1629 env->regs[15] = handler & (thumb ? ~1 : ~3);
1630 cpsr_write(env, cpsr, CPSR_IT | CPSR_T, CPSRWriteByInstr);
1631 }
1632
1633 static abi_ulong *setup_sigframe_v2_vfp(abi_ulong *regspace, CPUARMState *env)
1634 {
1635 int i;
1636 struct target_vfp_sigframe *vfpframe;
1637 vfpframe = (struct target_vfp_sigframe *)regspace;
1638 __put_user(TARGET_VFP_MAGIC, &vfpframe->magic);
1639 __put_user(sizeof(*vfpframe), &vfpframe->size);
1640 for (i = 0; i < 32; i++) {
1641 __put_user(float64_val(env->vfp.regs[i]), &vfpframe->ufp.fpregs[i]);
1642 }
1643 __put_user(vfp_get_fpscr(env), &vfpframe->ufp.fpscr);
1644 __put_user(env->vfp.xregs[ARM_VFP_FPEXC], &vfpframe->ufp_exc.fpexc);
1645 __put_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst);
1646 __put_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2);
1647 return (abi_ulong*)(vfpframe+1);
1648 }
1649
1650 static abi_ulong *setup_sigframe_v2_iwmmxt(abi_ulong *regspace,
1651 CPUARMState *env)
1652 {
1653 int i;
1654 struct target_iwmmxt_sigframe *iwmmxtframe;
1655 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace;
1656 __put_user(TARGET_IWMMXT_MAGIC, &iwmmxtframe->magic);
1657 __put_user(sizeof(*iwmmxtframe), &iwmmxtframe->size);
1658 for (i = 0; i < 16; i++) {
1659 __put_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]);
1660 }
1661 __put_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf);
1662 __put_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf);
1663 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0);
1664 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1);
1665 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2);
1666 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3);
1667 return (abi_ulong*)(iwmmxtframe+1);
1668 }
1669
1670 static void setup_sigframe_v2(struct target_ucontext_v2 *uc,
1671 target_sigset_t *set, CPUARMState *env)
1672 {
1673 struct target_sigaltstack stack;
1674 int i;
1675 abi_ulong *regspace;
1676
1677 /* Clear all the bits of the ucontext we don't use. */
1678 memset(uc, 0, offsetof(struct target_ucontext_v2, tuc_mcontext));
1679
1680 memset(&stack, 0, sizeof(stack));
1681 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp);
1682 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size);
1683 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags);
1684 memcpy(&uc->tuc_stack, &stack, sizeof(stack));
1685
1686 setup_sigcontext(&uc->tuc_mcontext, env, set->sig[0]);
1687 /* Save coprocessor signal frame. */
1688 regspace = uc->tuc_regspace;
1689 if (arm_feature(env, ARM_FEATURE_VFP)) {
1690 regspace = setup_sigframe_v2_vfp(regspace, env);
1691 }
1692 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
1693 regspace = setup_sigframe_v2_iwmmxt(regspace, env);
1694 }
1695
1696 /* Write terminating magic word */
1697 __put_user(0, regspace);
1698
1699 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1700 __put_user(set->sig[i], &uc->tuc_sigmask.sig[i]);
1701 }
1702 }
1703
1704 /* compare linux/arch/arm/kernel/signal.c:setup_frame() */
1705 static void setup_frame_v1(int usig, struct target_sigaction *ka,
1706 target_sigset_t *set, CPUARMState *regs)
1707 {
1708 struct sigframe_v1 *frame;
1709 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame));
1710 int i;
1711
1712 trace_user_setup_frame(regs, frame_addr);
1713 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1714 return;
1715 }
1716
1717 setup_sigcontext(&frame->sc, regs, set->sig[0]);
1718
1719 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1720 __put_user(set->sig[i], &frame->extramask[i - 1]);
1721 }
1722
1723 setup_return(regs, ka, &frame->retcode, frame_addr, usig,
1724 frame_addr + offsetof(struct sigframe_v1, retcode));
1725
1726 unlock_user_struct(frame, frame_addr, 1);
1727 }
1728
1729 static void setup_frame_v2(int usig, struct target_sigaction *ka,
1730 target_sigset_t *set, CPUARMState *regs)
1731 {
1732 struct sigframe_v2 *frame;
1733 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame));
1734
1735 trace_user_setup_frame(regs, frame_addr);
1736 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1737 return;
1738 }
1739
1740 setup_sigframe_v2(&frame->uc, set, regs);
1741
1742 setup_return(regs, ka, &frame->retcode, frame_addr, usig,
1743 frame_addr + offsetof(struct sigframe_v2, retcode));
1744
1745 unlock_user_struct(frame, frame_addr, 1);
1746 }
1747
1748 static void setup_frame(int usig, struct target_sigaction *ka,
1749 target_sigset_t *set, CPUARMState *regs)
1750 {
1751 if (get_osversion() >= 0x020612) {
1752 setup_frame_v2(usig, ka, set, regs);
1753 } else {
1754 setup_frame_v1(usig, ka, set, regs);
1755 }
1756 }
1757
1758 /* compare linux/arch/arm/kernel/signal.c:setup_rt_frame() */
1759 static void setup_rt_frame_v1(int usig, struct target_sigaction *ka,
1760 target_siginfo_t *info,
1761 target_sigset_t *set, CPUARMState *env)
1762 {
1763 struct rt_sigframe_v1 *frame;
1764 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame));
1765 struct target_sigaltstack stack;
1766 int i;
1767 abi_ulong info_addr, uc_addr;
1768
1769 trace_user_setup_rt_frame(env, frame_addr);
1770 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1771 return /* 1 */;
1772 }
1773
1774 info_addr = frame_addr + offsetof(struct rt_sigframe_v1, info);
1775 __put_user(info_addr, &frame->pinfo);
1776 uc_addr = frame_addr + offsetof(struct rt_sigframe_v1, uc);
1777 __put_user(uc_addr, &frame->puc);
1778 tswap_siginfo(&frame->info, info);
1779
1780 /* Clear all the bits of the ucontext we don't use. */
1781 memset(&frame->uc, 0, offsetof(struct target_ucontext_v1, tuc_mcontext));
1782
1783 memset(&stack, 0, sizeof(stack));
1784 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp);
1785 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size);
1786 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags);
1787 memcpy(&frame->uc.tuc_stack, &stack, sizeof(stack));
1788
1789 setup_sigcontext(&frame->uc.tuc_mcontext, env, set->sig[0]);
1790 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1791 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
1792 }
1793
1794 setup_return(env, ka, &frame->retcode, frame_addr, usig,
1795 frame_addr + offsetof(struct rt_sigframe_v1, retcode));
1796
1797 env->regs[1] = info_addr;
1798 env->regs[2] = uc_addr;
1799
1800 unlock_user_struct(frame, frame_addr, 1);
1801 }
1802
1803 static void setup_rt_frame_v2(int usig, struct target_sigaction *ka,
1804 target_siginfo_t *info,
1805 target_sigset_t *set, CPUARMState *env)
1806 {
1807 struct rt_sigframe_v2 *frame;
1808 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame));
1809 abi_ulong info_addr, uc_addr;
1810
1811 trace_user_setup_rt_frame(env, frame_addr);
1812 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1813 return /* 1 */;
1814 }
1815
1816 info_addr = frame_addr + offsetof(struct rt_sigframe_v2, info);
1817 uc_addr = frame_addr + offsetof(struct rt_sigframe_v2, uc);
1818 tswap_siginfo(&frame->info, info);
1819
1820 setup_sigframe_v2(&frame->uc, set, env);
1821
1822 setup_return(env, ka, &frame->retcode, frame_addr, usig,
1823 frame_addr + offsetof(struct rt_sigframe_v2, retcode));
1824
1825 env->regs[1] = info_addr;
1826 env->regs[2] = uc_addr;
1827
1828 unlock_user_struct(frame, frame_addr, 1);
1829 }
1830
1831 static void setup_rt_frame(int usig, struct target_sigaction *ka,
1832 target_siginfo_t *info,
1833 target_sigset_t *set, CPUARMState *env)
1834 {
1835 if (get_osversion() >= 0x020612) {
1836 setup_rt_frame_v2(usig, ka, info, set, env);
1837 } else {
1838 setup_rt_frame_v1(usig, ka, info, set, env);
1839 }
1840 }
1841
1842 static int
1843 restore_sigcontext(CPUARMState *env, struct target_sigcontext *sc)
1844 {
1845 int err = 0;
1846 uint32_t cpsr;
1847
1848 __get_user(env->regs[0], &sc->arm_r0);
1849 __get_user(env->regs[1], &sc->arm_r1);
1850 __get_user(env->regs[2], &sc->arm_r2);
1851 __get_user(env->regs[3], &sc->arm_r3);
1852 __get_user(env->regs[4], &sc->arm_r4);
1853 __get_user(env->regs[5], &sc->arm_r5);
1854 __get_user(env->regs[6], &sc->arm_r6);
1855 __get_user(env->regs[7], &sc->arm_r7);
1856 __get_user(env->regs[8], &sc->arm_r8);
1857 __get_user(env->regs[9], &sc->arm_r9);
1858 __get_user(env->regs[10], &sc->arm_r10);
1859 __get_user(env->regs[11], &sc->arm_fp);
1860 __get_user(env->regs[12], &sc->arm_ip);
1861 __get_user(env->regs[13], &sc->arm_sp);
1862 __get_user(env->regs[14], &sc->arm_lr);
1863 __get_user(env->regs[15], &sc->arm_pc);
1864 #ifdef TARGET_CONFIG_CPU_32
1865 __get_user(cpsr, &sc->arm_cpsr);
1866 cpsr_write(env, cpsr, CPSR_USER | CPSR_EXEC, CPSRWriteByInstr);
1867 #endif
1868
1869 err |= !valid_user_regs(env);
1870
1871 return err;
1872 }
1873
1874 static long do_sigreturn_v1(CPUARMState *env)
1875 {
1876 abi_ulong frame_addr;
1877 struct sigframe_v1 *frame = NULL;
1878 target_sigset_t set;
1879 sigset_t host_set;
1880 int i;
1881
1882 /*
1883 * Since we stacked the signal on a 64-bit boundary,
1884 * then 'sp' should be word aligned here. If it's
1885 * not, then the user is trying to mess with us.
1886 */
1887 frame_addr = env->regs[13];
1888 trace_user_do_sigreturn(env, frame_addr);
1889 if (frame_addr & 7) {
1890 goto badframe;
1891 }
1892
1893 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
1894 goto badframe;
1895 }
1896
1897 __get_user(set.sig[0], &frame->sc.oldmask);
1898 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1899 __get_user(set.sig[i], &frame->extramask[i - 1]);
1900 }
1901
1902 target_to_host_sigset_internal(&host_set, &set);
1903 do_sigprocmask(SIG_SETMASK, &host_set, NULL);
1904
1905 if (restore_sigcontext(env, &frame->sc)) {
1906 goto badframe;
1907 }
1908
1909 #if 0
1910 /* Send SIGTRAP if we're single-stepping */
1911 if (ptrace_cancel_bpt(current))
1912 send_sig(SIGTRAP, current, 1);
1913 #endif
1914 unlock_user_struct(frame, frame_addr, 0);
1915 return -TARGET_QEMU_ESIGRETURN;
1916
1917 badframe:
1918 force_sig(TARGET_SIGSEGV /* , current */);
1919 return 0;
1920 }
1921
1922 static abi_ulong *restore_sigframe_v2_vfp(CPUARMState *env, abi_ulong *regspace)
1923 {
1924 int i;
1925 abi_ulong magic, sz;
1926 uint32_t fpscr, fpexc;
1927 struct target_vfp_sigframe *vfpframe;
1928 vfpframe = (struct target_vfp_sigframe *)regspace;
1929
1930 __get_user(magic, &vfpframe->magic);
1931 __get_user(sz, &vfpframe->size);
1932 if (magic != TARGET_VFP_MAGIC || sz != sizeof(*vfpframe)) {
1933 return 0;
1934 }
1935 for (i = 0; i < 32; i++) {
1936 __get_user(float64_val(env->vfp.regs[i]), &vfpframe->ufp.fpregs[i]);
1937 }
1938 __get_user(fpscr, &vfpframe->ufp.fpscr);
1939 vfp_set_fpscr(env, fpscr);
1940 __get_user(fpexc, &vfpframe->ufp_exc.fpexc);
1941 /* Sanitise FPEXC: ensure VFP is enabled, FPINST2 is invalid
1942 * and the exception flag is cleared
1943 */
1944 fpexc |= (1 << 30);
1945 fpexc &= ~((1 << 31) | (1 << 28));
1946 env->vfp.xregs[ARM_VFP_FPEXC] = fpexc;
1947 __get_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst);
1948 __get_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2);
1949 return (abi_ulong*)(vfpframe + 1);
1950 }
1951
1952 static abi_ulong *restore_sigframe_v2_iwmmxt(CPUARMState *env,
1953 abi_ulong *regspace)
1954 {
1955 int i;
1956 abi_ulong magic, sz;
1957 struct target_iwmmxt_sigframe *iwmmxtframe;
1958 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace;
1959
1960 __get_user(magic, &iwmmxtframe->magic);
1961 __get_user(sz, &iwmmxtframe->size);
1962 if (magic != TARGET_IWMMXT_MAGIC || sz != sizeof(*iwmmxtframe)) {
1963 return 0;
1964 }
1965 for (i = 0; i < 16; i++) {
1966 __get_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]);
1967 }
1968 __get_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf);
1969 __get_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf);
1970 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0);
1971 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1);
1972 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2);
1973 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3);
1974 return (abi_ulong*)(iwmmxtframe + 1);
1975 }
1976
1977 static int do_sigframe_return_v2(CPUARMState *env, target_ulong frame_addr,
1978 struct target_ucontext_v2 *uc)
1979 {
1980 sigset_t host_set;
1981 abi_ulong *regspace;
1982
1983 target_to_host_sigset(&host_set, &uc->tuc_sigmask);
1984 do_sigprocmask(SIG_SETMASK, &host_set, NULL);
1985
1986 if (restore_sigcontext(env, &uc->tuc_mcontext))
1987 return 1;
1988
1989 /* Restore coprocessor signal frame */
1990 regspace = uc->tuc_regspace;
1991 if (arm_feature(env, ARM_FEATURE_VFP)) {
1992 regspace = restore_sigframe_v2_vfp(env, regspace);
1993 if (!regspace) {
1994 return 1;
1995 }
1996 }
1997 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
1998 regspace = restore_sigframe_v2_iwmmxt(env, regspace);
1999 if (!regspace) {
2000 return 1;
2001 }
2002 }
2003
2004 if (do_sigaltstack(frame_addr + offsetof(struct target_ucontext_v2, tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT)
2005 return 1;
2006
2007 #if 0
2008 /* Send SIGTRAP if we're single-stepping */
2009 if (ptrace_cancel_bpt(current))
2010 send_sig(SIGTRAP, current, 1);
2011 #endif
2012
2013 return 0;
2014 }
2015
2016 static long do_sigreturn_v2(CPUARMState *env)
2017 {
2018 abi_ulong frame_addr;
2019 struct sigframe_v2 *frame = NULL;
2020
2021 /*
2022 * Since we stacked the signal on a 64-bit boundary,
2023 * then 'sp' should be word aligned here. If it's
2024 * not, then the user is trying to mess with us.
2025 */
2026 frame_addr = env->regs[13];
2027 trace_user_do_sigreturn(env, frame_addr);
2028 if (frame_addr & 7) {
2029 goto badframe;
2030 }
2031
2032 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2033 goto badframe;
2034 }
2035
2036 if (do_sigframe_return_v2(env, frame_addr, &frame->uc)) {
2037 goto badframe;
2038 }
2039
2040 unlock_user_struct(frame, frame_addr, 0);
2041 return -TARGET_QEMU_ESIGRETURN;
2042
2043 badframe:
2044 unlock_user_struct(frame, frame_addr, 0);
2045 force_sig(TARGET_SIGSEGV /* , current */);
2046 return 0;
2047 }
2048
2049 long do_sigreturn(CPUARMState *env)
2050 {
2051 if (get_osversion() >= 0x020612) {
2052 return do_sigreturn_v2(env);
2053 } else {
2054 return do_sigreturn_v1(env);
2055 }
2056 }
2057
2058 static long do_rt_sigreturn_v1(CPUARMState *env)
2059 {
2060 abi_ulong frame_addr;
2061 struct rt_sigframe_v1 *frame = NULL;
2062 sigset_t host_set;
2063
2064 /*
2065 * Since we stacked the signal on a 64-bit boundary,
2066 * then 'sp' should be word aligned here. If it's
2067 * not, then the user is trying to mess with us.
2068 */
2069 frame_addr = env->regs[13];
2070 trace_user_do_rt_sigreturn(env, frame_addr);
2071 if (frame_addr & 7) {
2072 goto badframe;
2073 }
2074
2075 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2076 goto badframe;
2077 }
2078
2079 target_to_host_sigset(&host_set, &frame->uc.tuc_sigmask);
2080 do_sigprocmask(SIG_SETMASK, &host_set, NULL);
2081
2082 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) {
2083 goto badframe;
2084 }
2085
2086 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe_v1, uc.tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT)
2087 goto badframe;
2088
2089 #if 0
2090 /* Send SIGTRAP if we're single-stepping */
2091 if (ptrace_cancel_bpt(current))
2092 send_sig(SIGTRAP, current, 1);
2093 #endif
2094 unlock_user_struct(frame, frame_addr, 0);
2095 return -TARGET_QEMU_ESIGRETURN;
2096
2097 badframe:
2098 unlock_user_struct(frame, frame_addr, 0);
2099 force_sig(TARGET_SIGSEGV /* , current */);
2100 return 0;
2101 }
2102
2103 static long do_rt_sigreturn_v2(CPUARMState *env)
2104 {
2105 abi_ulong frame_addr;
2106 struct rt_sigframe_v2 *frame = NULL;
2107
2108 /*
2109 * Since we stacked the signal on a 64-bit boundary,
2110 * then 'sp' should be word aligned here. If it's
2111 * not, then the user is trying to mess with us.
2112 */
2113 frame_addr = env->regs[13];
2114 trace_user_do_rt_sigreturn(env, frame_addr);
2115 if (frame_addr & 7) {
2116 goto badframe;
2117 }
2118
2119 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2120 goto badframe;
2121 }
2122
2123 if (do_sigframe_return_v2(env, frame_addr, &frame->uc)) {
2124 goto badframe;
2125 }
2126
2127 unlock_user_struct(frame, frame_addr, 0);
2128 return -TARGET_QEMU_ESIGRETURN;
2129
2130 badframe:
2131 unlock_user_struct(frame, frame_addr, 0);
2132 force_sig(TARGET_SIGSEGV /* , current */);
2133 return 0;
2134 }
2135
2136 long do_rt_sigreturn(CPUARMState *env)
2137 {
2138 if (get_osversion() >= 0x020612) {
2139 return do_rt_sigreturn_v2(env);
2140 } else {
2141 return do_rt_sigreturn_v1(env);
2142 }
2143 }
2144
2145 #elif defined(TARGET_SPARC)
2146
2147 #define __SUNOS_MAXWIN 31
2148
2149 /* This is what SunOS does, so shall I. */
2150 struct target_sigcontext {
2151 abi_ulong sigc_onstack; /* state to restore */
2152
2153 abi_ulong sigc_mask; /* sigmask to restore */
2154 abi_ulong sigc_sp; /* stack pointer */
2155 abi_ulong sigc_pc; /* program counter */
2156 abi_ulong sigc_npc; /* next program counter */
2157 abi_ulong sigc_psr; /* for condition codes etc */
2158 abi_ulong sigc_g1; /* User uses these two registers */
2159 abi_ulong sigc_o0; /* within the trampoline code. */
2160
2161 /* Now comes information regarding the users window set
2162 * at the time of the signal.
2163 */
2164 abi_ulong sigc_oswins; /* outstanding windows */
2165
2166 /* stack ptrs for each regwin buf */
2167 char *sigc_spbuf[__SUNOS_MAXWIN];
2168
2169 /* Windows to restore after signal */
2170 struct {
2171 abi_ulong locals[8];
2172 abi_ulong ins[8];
2173 } sigc_wbuf[__SUNOS_MAXWIN];
2174 };
2175 /* A Sparc stack frame */
2176 struct sparc_stackf {
2177 abi_ulong locals[8];
2178 abi_ulong ins[8];
2179 /* It's simpler to treat fp and callers_pc as elements of ins[]
2180 * since we never need to access them ourselves.
2181 */
2182 char *structptr;
2183 abi_ulong xargs[6];
2184 abi_ulong xxargs[1];
2185 };
2186
2187 typedef struct {
2188 struct {
2189 abi_ulong psr;
2190 abi_ulong pc;
2191 abi_ulong npc;
2192 abi_ulong y;
2193 abi_ulong u_regs[16]; /* globals and ins */
2194 } si_regs;
2195 int si_mask;
2196 } __siginfo_t;
2197
2198 typedef struct {
2199 abi_ulong si_float_regs[32];
2200 unsigned long si_fsr;
2201 unsigned long si_fpqdepth;
2202 struct {
2203 unsigned long *insn_addr;
2204 unsigned long insn;
2205 } si_fpqueue [16];
2206 } qemu_siginfo_fpu_t;
2207
2208
2209 struct target_signal_frame {
2210 struct sparc_stackf ss;
2211 __siginfo_t info;
2212 abi_ulong fpu_save;
2213 abi_ulong insns[2] __attribute__ ((aligned (8)));
2214 abi_ulong extramask[TARGET_NSIG_WORDS - 1];
2215 abi_ulong extra_size; /* Should be 0 */
2216 qemu_siginfo_fpu_t fpu_state;
2217 };
2218 struct target_rt_signal_frame {
2219 struct sparc_stackf ss;
2220 siginfo_t info;
2221 abi_ulong regs[20];
2222 sigset_t mask;
2223 abi_ulong fpu_save;
2224 unsigned int insns[2];
2225 stack_t stack;
2226 unsigned int extra_size; /* Should be 0 */
2227 qemu_siginfo_fpu_t fpu_state;
2228 };
2229
2230 #define UREG_O0 16
2231 #define UREG_O6 22
2232 #define UREG_I0 0
2233 #define UREG_I1 1
2234 #define UREG_I2 2
2235 #define UREG_I3 3
2236 #define UREG_I4 4
2237 #define UREG_I5 5
2238 #define UREG_I6 6
2239 #define UREG_I7 7
2240 #define UREG_L0 8
2241 #define UREG_FP UREG_I6
2242 #define UREG_SP UREG_O6
2243
2244 static inline abi_ulong get_sigframe(struct target_sigaction *sa,
2245 CPUSPARCState *env,
2246 unsigned long framesize)
2247 {
2248 abi_ulong sp;
2249
2250 sp = env->regwptr[UREG_FP];
2251
2252 /* This is the X/Open sanctioned signal stack switching. */
2253 if (sa->sa_flags & TARGET_SA_ONSTACK) {
2254 if (!on_sig_stack(sp)
2255 && !((target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size) & 7)) {
2256 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
2257 }
2258 }
2259 return sp - framesize;
2260 }
2261
2262 static int
2263 setup___siginfo(__siginfo_t *si, CPUSPARCState *env, abi_ulong mask)
2264 {
2265 int err = 0, i;
2266
2267 __put_user(env->psr, &si->si_regs.psr);
2268 __put_user(env->pc, &si->si_regs.pc);
2269 __put_user(env->npc, &si->si_regs.npc);
2270 __put_user(env->y, &si->si_regs.y);
2271 for (i=0; i < 8; i++) {
2272 __put_user(env->gregs[i], &si->si_regs.u_regs[i]);
2273 }
2274 for (i=0; i < 8; i++) {
2275 __put_user(env->regwptr[UREG_I0 + i], &si->si_regs.u_regs[i+8]);
2276 }
2277 __put_user(mask, &si->si_mask);
2278 return err;
2279 }
2280
2281 #if 0
2282 static int
2283 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/
2284 CPUSPARCState *env, unsigned long mask)
2285 {
2286 int err = 0;
2287
2288 __put_user(mask, &sc->sigc_mask);
2289 __put_user(env->regwptr[UREG_SP], &sc->sigc_sp);
2290 __put_user(env->pc, &sc->sigc_pc);
2291 __put_user(env->npc, &sc->sigc_npc);
2292 __put_user(env->psr, &sc->sigc_psr);
2293 __put_user(env->gregs[1], &sc->sigc_g1);
2294 __put_user(env->regwptr[UREG_O0], &sc->sigc_o0);
2295
2296 return err;
2297 }
2298 #endif
2299 #define NF_ALIGNEDSZ (((sizeof(struct target_signal_frame) + 7) & (~7)))
2300
2301 static void setup_frame(int sig, struct target_sigaction *ka,
2302 target_sigset_t *set, CPUSPARCState *env)
2303 {
2304 abi_ulong sf_addr;
2305 struct target_signal_frame *sf;
2306 int sigframe_size, err, i;
2307
2308 /* 1. Make sure everything is clean */
2309 //synchronize_user_stack();
2310
2311 sigframe_size = NF_ALIGNEDSZ;
2312 sf_addr = get_sigframe(ka, env, sigframe_size);
2313 trace_user_setup_frame(env, sf_addr);
2314
2315 sf = lock_user(VERIFY_WRITE, sf_addr,
2316 sizeof(struct target_signal_frame), 0);
2317 if (!sf) {
2318 goto sigsegv;
2319 }
2320 #if 0
2321 if (invalid_frame_pointer(sf, sigframe_size))
2322 goto sigill_and_return;
2323 #endif
2324 /* 2. Save the current process state */
2325 err = setup___siginfo(&sf->info, env, set->sig[0]);
2326 __put_user(0, &sf->extra_size);
2327
2328 //save_fpu_state(regs, &sf->fpu_state);
2329 //__put_user(&sf->fpu_state, &sf->fpu_save);
2330
2331 __put_user(set->sig[0], &sf->info.si_mask);
2332 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) {
2333 __put_user(set->sig[i + 1], &sf->extramask[i]);
2334 }
2335
2336 for (i = 0; i < 8; i++) {
2337 __put_user(env->regwptr[i + UREG_L0], &sf->ss.locals[i]);
2338 }
2339 for (i = 0; i < 8; i++) {
2340 __put_user(env->regwptr[i + UREG_I0], &sf->ss.ins[i]);
2341 }
2342 if (err)
2343 goto sigsegv;
2344
2345 /* 3. signal handler back-trampoline and parameters */
2346 env->regwptr[UREG_FP] = sf_addr;
2347 env->regwptr[UREG_I0] = sig;
2348 env->regwptr[UREG_I1] = sf_addr +
2349 offsetof(struct target_signal_frame, info);
2350 env->regwptr[UREG_I2] = sf_addr +
2351 offsetof(struct target_signal_frame, info);
2352
2353 /* 4. signal handler */
2354 env->pc = ka->_sa_handler;
2355 env->npc = (env->pc + 4);
2356 /* 5. return to kernel instructions */
2357 if (ka->sa_restorer) {
2358 env->regwptr[UREG_I7] = ka->sa_restorer;
2359 } else {
2360 uint32_t val32;
2361
2362 env->regwptr[UREG_I7] = sf_addr +
2363 offsetof(struct target_signal_frame, insns) - 2 * 4;
2364
2365 /* mov __NR_sigreturn, %g1 */
2366 val32 = 0x821020d8;
2367 __put_user(val32, &sf->insns[0]);
2368
2369 /* t 0x10 */
2370 val32 = 0x91d02010;
2371 __put_user(val32, &sf->insns[1]);
2372 if (err)
2373 goto sigsegv;
2374
2375 /* Flush instruction space. */
2376 // flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
2377 // tb_flush(env);
2378 }
2379 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame));
2380 return;
2381 #if 0
2382 sigill_and_return:
2383 force_sig(TARGET_SIGILL);
2384 #endif
2385 sigsegv:
2386 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame));
2387 force_sig(TARGET_SIGSEGV);
2388 }
2389
2390 static void setup_rt_frame(int sig, struct target_sigaction *ka,
2391 target_siginfo_t *info,
2392 target_sigset_t *set, CPUSPARCState *env)
2393 {
2394 fprintf(stderr, "setup_rt_frame: not implemented\n");
2395 }
2396
2397 long do_sigreturn(CPUSPARCState *env)
2398 {
2399 abi_ulong sf_addr;
2400 struct target_signal_frame *sf;
2401 uint32_t up_psr, pc, npc;
2402 target_sigset_t set;
2403 sigset_t host_set;
2404 int err=0, i;
2405
2406 sf_addr = env->regwptr[UREG_FP];
2407 trace_user_do_sigreturn(env, sf_addr);
2408 if (!lock_user_struct(VERIFY_READ, sf, sf_addr, 1)) {
2409 goto segv_and_exit;
2410 }
2411
2412 /* 1. Make sure we are not getting garbage from the user */
2413
2414 if (sf_addr & 3)
2415 goto segv_and_exit;
2416
2417 __get_user(pc, &sf->info.si_regs.pc);
2418 __get_user(npc, &sf->info.si_regs.npc);
2419
2420 if ((pc | npc) & 3) {
2421 goto segv_and_exit;
2422 }
2423
2424 /* 2. Restore the state */
2425 __get_user(up_psr, &sf->info.si_regs.psr);
2426
2427 /* User can only change condition codes and FPU enabling in %psr. */
2428 env->psr = (up_psr & (PSR_ICC /* | PSR_EF */))
2429 | (env->psr & ~(PSR_ICC /* | PSR_EF */));
2430
2431 env->pc = pc;
2432 env->npc = npc;
2433 __get_user(env->y, &sf->info.si_regs.y);
2434 for (i=0; i < 8; i++) {
2435 __get_user(env->gregs[i], &sf->info.si_regs.u_regs[i]);
2436 }
2437 for (i=0; i < 8; i++) {
2438 __get_user(env->regwptr[i + UREG_I0], &sf->info.si_regs.u_regs[i+8]);
2439 }
2440
2441 /* FIXME: implement FPU save/restore:
2442 * __get_user(fpu_save, &sf->fpu_save);
2443 * if (fpu_save)
2444 * err |= restore_fpu_state(env, fpu_save);
2445 */
2446
2447 /* This is pretty much atomic, no amount locking would prevent
2448 * the races which exist anyways.
2449 */
2450 __get_user(set.sig[0], &sf->info.si_mask);
2451 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
2452 __get_user(set.sig[i], &sf->extramask[i - 1]);
2453 }
2454
2455 target_to_host_sigset_internal(&host_set, &set);
2456 do_sigprocmask(SIG_SETMASK, &host_set, NULL);
2457
2458 if (err) {
2459 goto segv_and_exit;
2460 }
2461 unlock_user_struct(sf, sf_addr, 0);
2462 return -TARGET_QEMU_ESIGRETURN;
2463
2464 segv_and_exit:
2465 unlock_user_struct(sf, sf_addr, 0);
2466 force_sig(TARGET_SIGSEGV);
2467 }
2468
2469 long do_rt_sigreturn(CPUSPARCState *env)
2470 {
2471 trace_user_do_rt_sigreturn(env, 0);
2472 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
2473 return -TARGET_ENOSYS;
2474 }
2475
2476 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
2477 #define MC_TSTATE 0
2478 #define MC_PC 1
2479 #define MC_NPC 2
2480 #define MC_Y 3
2481 #define MC_G1 4
2482 #define MC_G2 5
2483 #define MC_G3 6
2484 #define MC_G4 7
2485 #define MC_G5 8
2486 #define MC_G6 9
2487 #define MC_G7 10
2488 #define MC_O0 11
2489 #define MC_O1 12
2490 #define MC_O2 13
2491 #define MC_O3 14
2492 #define MC_O4 15
2493 #define MC_O5 16
2494 #define MC_O6 17
2495 #define MC_O7 18
2496 #define MC_NGREG 19
2497
2498 typedef abi_ulong target_mc_greg_t;
2499 typedef target_mc_greg_t target_mc_gregset_t[MC_NGREG];
2500
2501 struct target_mc_fq {
2502 abi_ulong *mcfq_addr;
2503 uint32_t mcfq_insn;
2504 };
2505
2506 struct target_mc_fpu {
2507 union {
2508 uint32_t sregs[32];
2509 uint64_t dregs[32];
2510 //uint128_t qregs[16];
2511 } mcfpu_fregs;
2512 abi_ulong mcfpu_fsr;
2513 abi_ulong mcfpu_fprs;
2514 abi_ulong mcfpu_gsr;
2515 struct target_mc_fq *mcfpu_fq;
2516 unsigned char mcfpu_qcnt;
2517 unsigned char mcfpu_qentsz;
2518 unsigned char mcfpu_enab;
2519 };
2520 typedef struct target_mc_fpu target_mc_fpu_t;
2521
2522 typedef struct {
2523 target_mc_gregset_t mc_gregs;
2524 target_mc_greg_t mc_fp;
2525 target_mc_greg_t mc_i7;
2526 target_mc_fpu_t mc_fpregs;
2527 } target_mcontext_t;
2528
2529 struct target_ucontext {
2530 struct target_ucontext *tuc_link;
2531 abi_ulong tuc_flags;
2532 target_sigset_t tuc_sigmask;
2533 target_mcontext_t tuc_mcontext;
2534 };
2535
2536 /* A V9 register window */
2537 struct target_reg_window {
2538 abi_ulong locals[8];
2539 abi_ulong ins[8];
2540 };
2541
2542 #define TARGET_STACK_BIAS 2047
2543
2544 /* {set, get}context() needed for 64-bit SparcLinux userland. */
2545 void sparc64_set_context(CPUSPARCState *env)
2546 {
2547 abi_ulong ucp_addr;
2548 struct target_ucontext *ucp;
2549 target_mc_gregset_t *grp;
2550 abi_ulong pc, npc, tstate;
2551 abi_ulong fp, i7, w_addr;
2552 unsigned int i;
2553
2554 ucp_addr = env->regwptr[UREG_I0];
2555 if (!lock_user_struct(VERIFY_READ, ucp, ucp_addr, 1)) {
2556 goto do_sigsegv;
2557 }
2558 grp = &ucp->tuc_mcontext.mc_gregs;
2559 __get_user(pc, &((*grp)[MC_PC]));
2560 __get_user(npc, &((*grp)[MC_NPC]));
2561 if ((pc | npc) & 3) {
2562 goto do_sigsegv;
2563 }
2564 if (env->regwptr[UREG_I1]) {
2565 target_sigset_t target_set;
2566 sigset_t set;
2567
2568 if (TARGET_NSIG_WORDS == 1) {
2569 __get_user(target_set.sig[0], &ucp->tuc_sigmask.sig[0]);
2570 } else {
2571 abi_ulong *src, *dst;
2572 src = ucp->tuc_sigmask.sig;
2573 dst = target_set.sig;
2574 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) {
2575 __get_user(*dst, src);
2576 }
2577 }
2578 target_to_host_sigset_internal(&set, &target_set);
2579 do_sigprocmask(SIG_SETMASK, &set, NULL);
2580 }
2581 env->pc = pc;
2582 env->npc = npc;
2583 __get_user(env->y, &((*grp)[MC_Y]));
2584 __get_user(tstate, &((*grp)[MC_TSTATE]));
2585 env->asi = (tstate >> 24) & 0xff;
2586 cpu_put_ccr(env, tstate >> 32);
2587 cpu_put_cwp64(env, tstate & 0x1f);
2588 __get_user(env->gregs[1], (&(*grp)[MC_G1]));
2589 __get_user(env->gregs[2], (&(*grp)[MC_G2]));
2590 __get_user(env->gregs[3], (&(*grp)[MC_G3]));
2591 __get_user(env->gregs[4], (&(*grp)[MC_G4]));
2592 __get_user(env->gregs[5], (&(*grp)[MC_G5]));
2593 __get_user(env->gregs[6], (&(*grp)[MC_G6]));
2594 __get_user(env->gregs[7], (&(*grp)[MC_G7]));
2595 __get_user(env->regwptr[UREG_I0], (&(*grp)[MC_O0]));
2596 __get_user(env->regwptr[UREG_I1], (&(*grp)[MC_O1]));
2597 __get_user(env->regwptr[UREG_I2], (&(*grp)[MC_O2]));
2598 __get_user(env->regwptr[UREG_I3], (&(*grp)[MC_O3]));
2599 __get_user(env->regwptr[UREG_I4], (&(*grp)[MC_O4]));
2600 __get_user(env->regwptr[UREG_I5], (&(*grp)[MC_O5]));
2601 __get_user(env->regwptr[UREG_I6], (&(*grp)[MC_O6]));
2602 __get_user(env->regwptr[UREG_I7], (&(*grp)[MC_O7]));
2603
2604 __get_user(fp, &(ucp->tuc_mcontext.mc_fp));
2605 __get_user(i7, &(ucp->tuc_mcontext.mc_i7));
2606
2607 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6];
2608 if (put_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]),
2609 abi_ulong) != 0) {
2610 goto do_sigsegv;
2611 }
2612 if (put_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]),
2613 abi_ulong) != 0) {
2614 goto do_sigsegv;
2615 }
2616 /* FIXME this does not match how the kernel handles the FPU in
2617 * its sparc64_set_context implementation. In particular the FPU
2618 * is only restored if fenab is non-zero in:
2619 * __get_user(fenab, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_enab));
2620 */
2621 __get_user(env->fprs, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fprs));
2622 {
2623 uint32_t *src = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
2624 for (i = 0; i < 64; i++, src++) {
2625 if (i & 1) {
2626 __get_user(env->fpr[i/2].l.lower, src);
2627 } else {
2628 __get_user(env->fpr[i/2].l.upper, src);
2629 }
2630 }
2631 }
2632 __get_user(env->fsr,
2633 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fsr));
2634 __get_user(env->gsr,
2635 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_gsr));
2636 unlock_user_struct(ucp, ucp_addr, 0);
2637 return;
2638 do_sigsegv:
2639 unlock_user_struct(ucp, ucp_addr, 0);
2640 force_sig(TARGET_SIGSEGV);
2641 }
2642
2643 void sparc64_get_context(CPUSPARCState *env)
2644 {
2645 abi_ulong ucp_addr;
2646 struct target_ucontext *ucp;
2647 target_mc_gregset_t *grp;
2648 target_mcontext_t *mcp;
2649 abi_ulong fp, i7, w_addr;
2650 int err;
2651 unsigned int i;
2652 target_sigset_t target_set;
2653 sigset_t set;
2654
2655 ucp_addr = env->regwptr[UREG_I0];
2656 if (!lock_user_struct(VERIFY_WRITE, ucp, ucp_addr, 0)) {
2657 goto do_sigsegv;
2658 }
2659
2660 mcp = &ucp->tuc_mcontext;
2661 grp = &mcp->mc_gregs;
2662
2663 /* Skip over the trap instruction, first. */
2664 env->pc = env->npc;
2665 env->npc += 4;
2666
2667 err = 0;
2668
2669 do_sigprocmask(0, NULL, &set);
2670 host_to_target_sigset_internal(&target_set, &set);
2671 if (TARGET_NSIG_WORDS == 1) {
2672 __put_user(target_set.sig[0],
2673 (abi_ulong *)&ucp->tuc_sigmask);
2674 } else {
2675 abi_ulong *src, *dst;
2676 src = target_set.sig;
2677 dst = ucp->tuc_sigmask.sig;
2678 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) {
2679 __put_user(*src, dst);
2680 }
2681 if (err)
2682 goto do_sigsegv;
2683 }
2684
2685 /* XXX: tstate must be saved properly */
2686 // __put_user(env->tstate, &((*grp)[MC_TSTATE]));
2687 __put_user(env->pc, &((*grp)[MC_PC]));
2688 __put_user(env->npc, &((*grp)[MC_NPC]));
2689 __put_user(env->y, &((*grp)[MC_Y]));
2690 __put_user(env->gregs[1], &((*grp)[MC_G1]));
2691 __put_user(env->gregs[2], &((*grp)[MC_G2]));
2692 __put_user(env->gregs[3], &((*grp)[MC_G3]));
2693 __put_user(env->gregs[4], &((*grp)[MC_G4]));
2694 __put_user(env->gregs[5], &((*grp)[MC_G5]));
2695 __put_user(env->gregs[6], &((*grp)[MC_G6]));
2696 __put_user(env->gregs[7], &((*grp)[MC_G7]));
2697 __put_user(env->regwptr[UREG_I0], &((*grp)[MC_O0]));
2698 __put_user(env->regwptr[UREG_I1], &((*grp)[MC_O1]));
2699 __put_user(env->regwptr[UREG_I2], &((*grp)[MC_O2]));
2700 __put_user(env->regwptr[UREG_I3], &((*grp)[MC_O3]));
2701 __put_user(env->regwptr[UREG_I4], &((*grp)[MC_O4]));
2702 __put_user(env->regwptr[UREG_I5], &((*grp)[MC_O5]));
2703 __put_user(env->regwptr[UREG_I6], &((*grp)[MC_O6]));
2704 __put_user(env->regwptr[UREG_I7], &((*grp)[MC_O7]));
2705
2706 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6];
2707 fp = i7 = 0;
2708 if (get_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]),
2709 abi_ulong) != 0) {
2710 goto do_sigsegv;
2711 }
2712 if (get_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]),
2713 abi_ulong) != 0) {
2714 goto do_sigsegv;
2715 }
2716 __put_user(fp, &(mcp->mc_fp));
2717 __put_user(i7, &(mcp->mc_i7));
2718
2719 {
2720 uint32_t *dst = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
2721 for (i = 0; i < 64; i++, dst++) {
2722 if (i & 1) {
2723 __put_user(env->fpr[i/2].l.lower, dst);
2724 } else {
2725 __put_user(env->fpr[i/2].l.upper, dst);
2726 }
2727 }
2728 }
2729 __put_user(env->fsr, &(mcp->mc_fpregs.mcfpu_fsr));
2730 __put_user(env->gsr, &(mcp->mc_fpregs.mcfpu_gsr));
2731 __put_user(env->fprs, &(mcp->mc_fpregs.mcfpu_fprs));
2732
2733 if (err)
2734 goto do_sigsegv;
2735 unlock_user_struct(ucp, ucp_addr, 1);
2736 return;
2737 do_sigsegv:
2738 unlock_user_struct(ucp, ucp_addr, 1);
2739 force_sig(TARGET_SIGSEGV);
2740 }
2741 #endif
2742 #elif defined(TARGET_MIPS) || defined(TARGET_MIPS64)
2743
2744 # if defined(TARGET_ABI_MIPSO32)
2745 struct target_sigcontext {
2746 uint32_t sc_regmask; /* Unused */
2747 uint32_t sc_status;
2748 uint64_t sc_pc;
2749 uint64_t sc_regs[32];
2750 uint64_t sc_fpregs[32];
2751 uint32_t sc_ownedfp; /* Unused */
2752 uint32_t sc_fpc_csr;
2753 uint32_t sc_fpc_eir; /* Unused */
2754 uint32_t sc_used_math;
2755 uint32_t sc_dsp; /* dsp status, was sc_ssflags */
2756 uint32_t pad0;
2757 uint64_t sc_mdhi;
2758 uint64_t sc_mdlo;
2759 target_ulong sc_hi1; /* Was sc_cause */
2760 target_ulong sc_lo1; /* Was sc_badvaddr */
2761 target_ulong sc_hi2; /* Was sc_sigset[4] */
2762 target_ulong sc_lo2;
2763 target_ulong sc_hi3;
2764 target_ulong sc_lo3;
2765 };
2766 # else /* N32 || N64 */
2767 struct target_sigcontext {
2768 uint64_t sc_regs[32];
2769 uint64_t sc_fpregs[32];
2770 uint64_t sc_mdhi;
2771 uint64_t sc_hi1;
2772 uint64_t sc_hi2;
2773 uint64_t sc_hi3;
2774 uint64_t sc_mdlo;
2775 uint64_t sc_lo1;
2776 uint64_t sc_lo2;
2777 uint64_t sc_lo3;
2778 uint64_t sc_pc;
2779 uint32_t sc_fpc_csr;
2780 uint32_t sc_used_math;
2781 uint32_t sc_dsp;
2782 uint32_t sc_reserved;
2783 };
2784 # endif /* O32 */
2785
2786 struct sigframe {
2787 uint32_t sf_ass[4]; /* argument save space for o32 */
2788 uint32_t sf_code[2]; /* signal trampoline */
2789 struct target_sigcontext sf_sc;
2790 target_sigset_t sf_mask;
2791 };
2792
2793 struct target_ucontext {
2794 target_ulong tuc_flags;
2795 target_ulong tuc_link;
2796 target_stack_t tuc_stack;
2797 target_ulong pad0;
2798 struct target_sigcontext tuc_mcontext;
2799 target_sigset_t tuc_sigmask;
2800 };
2801
2802 struct target_rt_sigframe {
2803 uint32_t rs_ass[4]; /* argument save space for o32 */
2804 uint32_t rs_code[2]; /* signal trampoline */
2805 struct target_siginfo rs_info;
2806 struct target_ucontext rs_uc;
2807 };
2808
2809 /* Install trampoline to jump back from signal handler */
2810 static inline int install_sigtramp(unsigned int *tramp, unsigned int syscall)
2811 {
2812 int err = 0;
2813
2814 /*
2815 * Set up the return code ...
2816 *
2817 * li v0, __NR__foo_sigreturn
2818 * syscall
2819 */
2820
2821 __put_user(0x24020000 + syscall, tramp + 0);
2822 __put_user(0x0000000c , tramp + 1);
2823 return err;
2824 }
2825
2826 static inline void setup_sigcontext(CPUMIPSState *regs,
2827 struct target_sigcontext *sc)
2828 {
2829 int i;
2830
2831 __put_user(exception_resume_pc(regs), &sc->sc_pc);
2832 regs->hflags &= ~MIPS_HFLAG_BMASK;
2833
2834 __put_user(0, &sc->sc_regs[0]);
2835 for (i = 1; i < 32; ++i) {
2836 __put_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
2837 }
2838
2839 __put_user(regs->active_tc.HI[0], &sc->sc_mdhi);
2840 __put_user(regs->active_tc.LO[0], &sc->sc_mdlo);
2841
2842 /* Rather than checking for dsp existence, always copy. The storage
2843 would just be garbage otherwise. */
2844 __put_user(regs->active_tc.HI[1], &sc->sc_hi1);
2845 __put_user(regs->active_tc.HI[2], &sc->sc_hi2);
2846 __put_user(regs->active_tc.HI[3], &sc->sc_hi3);
2847 __put_user(regs->active_tc.LO[1], &sc->sc_lo1);
2848 __put_user(regs->active_tc.LO[2], &sc->sc_lo2);
2849 __put_user(regs->active_tc.LO[3], &sc->sc_lo3);
2850 {
2851 uint32_t dsp = cpu_rddsp(0x3ff, regs);
2852 __put_user(dsp, &sc->sc_dsp);
2853 }
2854
2855 __put_user(1, &sc->sc_used_math);
2856
2857 for (i = 0; i < 32; ++i) {
2858 __put_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]);
2859 }
2860 }
2861
2862 static inline void
2863 restore_sigcontext(CPUMIPSState *regs, struct target_sigcontext *sc)
2864 {
2865 int i;
2866
2867 __get_user(regs->CP0_EPC, &sc->sc_pc);
2868
2869 __get_user(regs->active_tc.HI[0], &sc->sc_mdhi);
2870 __get_user(regs->active_tc.LO[0], &sc->sc_mdlo);
2871
2872 for (i = 1; i < 32; ++i) {
2873 __get_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
2874 }
2875
2876 __get_user(regs->active_tc.HI[1], &sc->sc_hi1);
2877 __get_user(regs->active_tc.HI[2], &sc->sc_hi2);
2878 __get_user(regs->active_tc.HI[3], &sc->sc_hi3);
2879 __get_user(regs->active_tc.LO[1], &sc->sc_lo1);
2880 __get_user(regs->active_tc.LO[2], &sc->sc_lo2);
2881 __get_user(regs->active_tc.LO[3], &sc->sc_lo3);
2882 {
2883 uint32_t dsp;
2884 __get_user(dsp, &sc->sc_dsp);
2885 cpu_wrdsp(dsp, 0x3ff, regs);
2886 }
2887
2888 for (i = 0; i < 32; ++i) {
2889 __get_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]);
2890 }
2891 }
2892
2893 /*
2894 * Determine which stack to use..
2895 */
2896 static inline abi_ulong
2897 get_sigframe(struct target_sigaction *ka, CPUMIPSState *regs, size_t frame_size)
2898 {
2899 unsigned long sp;
2900
2901 /* Default to using normal stack */
2902 sp = regs->active_tc.gpr[29];
2903
2904 /*
2905 * FPU emulator may have its own trampoline active just
2906 * above the user stack, 16-bytes before the next lowest
2907 * 16 byte boundary. Try to avoid trashing it.
2908 */
2909 sp -= 32;
2910
2911 /* This is the X/Open sanctioned signal stack switching. */
2912 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) {
2913 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
2914 }
2915
2916 return (sp - frame_size) & ~7;
2917 }
2918
2919 static void mips_set_hflags_isa_mode_from_pc(CPUMIPSState *env)
2920 {
2921 if (env->insn_flags & (ASE_MIPS16 | ASE_MICROMIPS)) {
2922 env->hflags &= ~MIPS_HFLAG_M16;
2923 env->hflags |= (env->active_tc.PC & 1) << MIPS_HFLAG_M16_SHIFT;
2924 env->active_tc.PC &= ~(target_ulong) 1;
2925 }
2926 }
2927
2928 # if defined(TARGET_ABI_MIPSO32)
2929 /* compare linux/arch/mips/kernel/signal.c:setup_frame() */
2930 static void setup_frame(int sig, struct target_sigaction * ka,
2931 target_sigset_t *set, CPUMIPSState *regs)
2932 {
2933 struct sigframe *frame;
2934 abi_ulong frame_addr;
2935 int i;
2936
2937 frame_addr = get_sigframe(ka, regs, sizeof(*frame));
2938 trace_user_setup_frame(regs, frame_addr);
2939 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
2940 goto give_sigsegv;
2941 }
2942
2943 install_sigtramp(frame->sf_code, TARGET_NR_sigreturn);
2944
2945 setup_sigcontext(regs, &frame->sf_sc);
2946
2947 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
2948 __put_user(set->sig[i], &frame->sf_mask.sig[i]);
2949 }
2950
2951 /*
2952 * Arguments to signal handler:
2953 *
2954 * a0 = signal number
2955 * a1 = 0 (should be cause)
2956 * a2 = pointer to struct sigcontext
2957 *
2958 * $25 and PC point to the signal handler, $29 points to the
2959 * struct sigframe.
2960 */
2961 regs->active_tc.gpr[ 4] = sig;
2962 regs->active_tc.gpr[ 5] = 0;
2963 regs->active_tc.gpr[ 6] = frame_addr + offsetof(struct sigframe, sf_sc);
2964 regs->active_tc.gpr[29] = frame_addr;
2965 regs->active_tc.gpr[31] = frame_addr + offsetof(struct sigframe, sf_code);
2966 /* The original kernel code sets CP0_EPC to the handler
2967 * since it returns to userland using eret
2968 * we cannot do this here, and we must set PC directly */
2969 regs->active_tc.PC = regs->active_tc.gpr[25] = ka->_sa_handler;
2970 mips_set_hflags_isa_mode_from_pc(regs);
2971 unlock_user_struct(frame, frame_addr, 1);
2972 return;
2973
2974 give_sigsegv:
2975 force_sig(TARGET_SIGSEGV/*, current*/);
2976 }
2977
2978 long do_sigreturn(CPUMIPSState *regs)
2979 {
2980 struct sigframe *frame;
2981 abi_ulong frame_addr;
2982 sigset_t blocked;
2983 target_sigset_t target_set;
2984 int i;
2985
2986 frame_addr = regs->active_tc.gpr[29];
2987 trace_user_do_sigreturn(regs, frame_addr);
2988 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
2989 goto badframe;
2990
2991 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
2992 __get_user(target_set.sig[i], &frame->sf_mask.sig[i]);
2993 }
2994
2995 target_to_host_sigset_internal(&blocked, &target_set);
2996 do_sigprocmask(SIG_SETMASK, &blocked, NULL);
2997
2998 restore_sigcontext(regs, &frame->sf_sc);
2999
3000 #if 0
3001 /*
3002 * Don't let your children do this ...
3003 */
3004 __asm__ __volatile__(
3005 "move\t$29, %0\n\t"
3006 "j\tsyscall_exit"
3007 :/* no outputs */
3008 :"r" (&regs));
3009 /* Unreached */
3010 #endif
3011
3012 regs->active_tc.PC = regs->CP0_EPC;
3013 mips_set_hflags_isa_mode_from_pc(regs);
3014 /* I am not sure this is right, but it seems to work
3015 * maybe a problem with nested signals ? */
3016 regs->CP0_EPC = 0;
3017 return -TARGET_QEMU_ESIGRETURN;
3018
3019 badframe:
3020 force_sig(TARGET_SIGSEGV/*, current*/);
3021 return 0;
3022 }
3023 # endif /* O32 */
3024
3025 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3026 target_siginfo_t *info,
3027 target_sigset_t *set, CPUMIPSState *env)
3028 {
3029 struct target_rt_sigframe *frame;
3030 abi_ulong frame_addr;
3031 int i;
3032
3033 frame_addr = get_sigframe(ka, env, sizeof(*frame));
3034 trace_user_setup_rt_frame(env, frame_addr);
3035 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3036 goto give_sigsegv;
3037 }
3038
3039 install_sigtramp(frame->rs_code, TARGET_NR_rt_sigreturn);
3040
3041 tswap_siginfo(&frame->rs_info, info);
3042
3043 __put_user(0, &frame->rs_uc.tuc_flags);
3044 __put_user(0, &frame->rs_uc.tuc_link);
3045 __put_user(target_sigaltstack_used.ss_sp, &frame->rs_uc.tuc_stack.ss_sp);
3046 __put_user(target_sigaltstack_used.ss_size, &frame->rs_uc.tuc_stack.ss_size);
3047 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
3048 &frame->rs_uc.tuc_stack.ss_flags);
3049
3050 setup_sigcontext(env, &frame->rs_uc.tuc_mcontext);
3051
3052 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3053 __put_user(set->sig[i], &frame->rs_uc.tuc_sigmask.sig[i]);
3054 }
3055
3056 /*
3057 * Arguments to signal handler:
3058 *
3059 * a0 = signal number
3060 * a1 = pointer to siginfo_t
3061 * a2 = pointer to struct ucontext
3062 *
3063 * $25 and PC point to the signal handler, $29 points to the
3064 * struct sigframe.
3065 */
3066 env->active_tc.gpr[ 4] = sig;
3067 env->active_tc.gpr[ 5] = frame_addr
3068 + offsetof(struct target_rt_sigframe, rs_info);
3069 env->active_tc.gpr[ 6] = frame_addr
3070 + offsetof(struct target_rt_sigframe, rs_uc);
3071 env->active_tc.gpr[29] = frame_addr;
3072 env->active_tc.gpr[31] = frame_addr
3073 + offsetof(struct target_rt_sigframe, rs_code);
3074 /* The original kernel code sets CP0_EPC to the handler
3075 * since it returns to userland using eret
3076 * we cannot do this here, and we must set PC directly */
3077 env->active_tc.PC = env->active_tc.gpr[25] = ka->_sa_handler;
3078 mips_set_hflags_isa_mode_from_pc(env);
3079 unlock_user_struct(frame, frame_addr, 1);
3080 return;
3081
3082 give_sigsegv:
3083 unlock_user_struct(frame, frame_addr, 1);
3084 force_sig(TARGET_SIGSEGV/*, current*/);
3085 }
3086
3087 long do_rt_sigreturn(CPUMIPSState *env)
3088 {
3089 struct target_rt_sigframe *frame;
3090 abi_ulong frame_addr;
3091 sigset_t blocked;
3092
3093 frame_addr = env->active_tc.gpr[29];
3094 trace_user_do_rt_sigreturn(env, frame_addr);
3095 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3096 goto badframe;
3097 }
3098
3099 target_to_host_sigset(&blocked, &frame->rs_uc.tuc_sigmask);
3100 do_sigprocmask(SIG_SETMASK, &blocked, NULL);
3101
3102 restore_sigcontext(env, &frame->rs_uc.tuc_mcontext);
3103
3104 if (do_sigaltstack(frame_addr +
3105 offsetof(struct target_rt_sigframe, rs_uc.tuc_stack),
3106 0, get_sp_from_cpustate(env)) == -EFAULT)
3107 goto badframe;
3108
3109 env->active_tc.PC = env->CP0_EPC;
3110 mips_set_hflags_isa_mode_from_pc(env);
3111 /* I am not sure this is right, but it seems to work
3112 * maybe a problem with nested signals ? */
3113 env->CP0_EPC = 0;
3114 return -TARGET_QEMU_ESIGRETURN;
3115
3116 badframe:
3117 force_sig(TARGET_SIGSEGV/*, current*/);
3118 return 0;
3119 }
3120
3121 #elif defined(TARGET_SH4)
3122
3123 /*
3124 * code and data structures from linux kernel:
3125 * include/asm-sh/sigcontext.h
3126 * arch/sh/kernel/signal.c
3127 */
3128
3129 struct target_sigcontext {
3130 target_ulong oldmask;
3131
3132 /* CPU registers */
3133 target_ulong sc_gregs[16];
3134 target_ulong sc_pc;
3135 target_ulong sc_pr;
3136 target_ulong sc_sr;
3137 target_ulong sc_gbr;
3138 target_ulong sc_mach;
3139 target_ulong sc_macl;
3140
3141 /* FPU registers */
3142 target_ulong sc_fpregs[16];
3143 target_ulong sc_xfpregs[16];
3144 unsigned int sc_fpscr;
3145 unsigned int sc_fpul;
3146 unsigned int sc_ownedfp;
3147 };
3148
3149 struct target_sigframe
3150 {
3151 struct target_sigcontext sc;
3152 target_ulong extramask[TARGET_NSIG_WORDS-1];
3153 uint16_t retcode[3];
3154 };
3155
3156
3157 struct target_ucontext {
3158 target_ulong tuc_flags;
3159 struct target_ucontext *tuc_link;
3160 target_stack_t tuc_stack;
3161 struct target_sigcontext tuc_mcontext;
3162 target_sigset_t tuc_sigmask; /* mask last for extensibility */
3163 };
3164
3165 struct target_rt_sigframe
3166 {
3167 struct target_siginfo info;
3168 struct target_ucontext uc;
3169 uint16_t retcode[3];
3170 };
3171
3172
3173 #define MOVW(n) (0x9300|((n)-2)) /* Move mem word at PC+n to R3 */
3174 #define TRAP_NOARG 0xc310 /* Syscall w/no args (NR in R3) SH3/4 */
3175
3176 static abi_ulong get_sigframe(struct target_sigaction *ka,
3177 unsigned long sp, size_t frame_size)
3178 {
3179 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags(sp) == 0)) {
3180 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3181 }
3182
3183 return (sp - frame_size) & -8ul;
3184 }
3185
3186 static void setup_sigcontext(struct target_sigcontext *sc,
3187 CPUSH4State *regs, unsigned long mask)
3188 {
3189 int i;
3190
3191 #define COPY(x) __put_user(regs->x, &sc->sc_##x)
3192 COPY(gregs[0]); COPY(gregs[1]);
3193 COPY(gregs[2]); COPY(gregs[3]);
3194 COPY(gregs[4]); COPY(gregs[5]);
3195 COPY(gregs[6]); COPY(gregs[7]);
3196 COPY(gregs[8]); COPY(gregs[9]);
3197 COPY(gregs[10]); COPY(gregs[11]);
3198 COPY(gregs[12]); COPY(gregs[13]);
3199 COPY(gregs[14]); COPY(gregs[15]);
3200 COPY(gbr); COPY(mach);
3201 COPY(macl); COPY(pr);
3202 COPY(sr); COPY(pc);
3203 #undef COPY
3204
3205 for (i=0; i<16; i++) {
3206 __put_user(regs->fregs[i], &sc->sc_fpregs[i]);
3207 }
3208 __put_user(regs->fpscr, &sc->sc_fpscr);
3209 __put_user(regs->fpul, &sc->sc_fpul);
3210
3211 /* non-iBCS2 extensions.. */
3212 __put_user(mask, &sc->oldmask);
3213 }
3214
3215 static void restore_sigcontext(CPUSH4State *regs, struct target_sigcontext *sc)
3216 {
3217 int i;
3218
3219 #define COPY(x) __get_user(regs->x, &sc->sc_##x)
3220 COPY(gregs[0]); COPY(gregs[1]);
3221 COPY(gregs[2]); COPY(gregs[3]);
3222 COPY(gregs[4]); COPY(gregs[5]);
3223 COPY(gregs[6]); COPY(gregs[7]);
3224 COPY(gregs[8]); COPY(gregs[9]);
3225 COPY(gregs[10]); COPY(gregs[11]);
3226 COPY(gregs[12]); COPY(gregs[13]);
3227 COPY(gregs[14]); COPY(gregs[15]);
3228 COPY(gbr); COPY(mach);
3229 COPY(macl); COPY(pr);
3230 COPY(sr); COPY(pc);
3231 #undef COPY
3232
3233 for (i=0; i<16; i++) {
3234 __get_user(regs->fregs[i], &sc->sc_fpregs[i]);
3235 }
3236 __get_user(regs->fpscr, &sc->sc_fpscr);
3237 __get_user(regs->fpul, &sc->sc_fpul);
3238
3239 regs->tra = -1; /* disable syscall checks */
3240 }
3241
3242 static void setup_frame(int sig, struct target_sigaction *ka,
3243 target_sigset_t *set, CPUSH4State *regs)
3244 {
3245 struct target_sigframe *frame;
3246 abi_ulong frame_addr;
3247 int i;
3248
3249 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame));
3250 trace_user_setup_frame(regs, frame_addr);
3251 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3252 goto give_sigsegv;