vl: Table-based select_vgahw()
[qemu.git] / linux-user / signal.c
1 /*
2 * Emulation of Linux signals
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include <sys/ucontext.h>
21 #include <sys/resource.h>
22
23 #include "qemu.h"
24 #include "qemu-common.h"
25 #include "target_signal.h"
26 #include "trace.h"
27
28 static struct target_sigaltstack target_sigaltstack_used = {
29 .ss_sp = 0,
30 .ss_size = 0,
31 .ss_flags = TARGET_SS_DISABLE,
32 };
33
34 static struct target_sigaction sigact_table[TARGET_NSIG];
35
36 static void host_signal_handler(int host_signum, siginfo_t *info,
37 void *puc);
38
39 static uint8_t host_to_target_signal_table[_NSIG] = {
40 [SIGHUP] = TARGET_SIGHUP,
41 [SIGINT] = TARGET_SIGINT,
42 [SIGQUIT] = TARGET_SIGQUIT,
43 [SIGILL] = TARGET_SIGILL,
44 [SIGTRAP] = TARGET_SIGTRAP,
45 [SIGABRT] = TARGET_SIGABRT,
46 /* [SIGIOT] = TARGET_SIGIOT,*/
47 [SIGBUS] = TARGET_SIGBUS,
48 [SIGFPE] = TARGET_SIGFPE,
49 [SIGKILL] = TARGET_SIGKILL,
50 [SIGUSR1] = TARGET_SIGUSR1,
51 [SIGSEGV] = TARGET_SIGSEGV,
52 [SIGUSR2] = TARGET_SIGUSR2,
53 [SIGPIPE] = TARGET_SIGPIPE,
54 [SIGALRM] = TARGET_SIGALRM,
55 [SIGTERM] = TARGET_SIGTERM,
56 #ifdef SIGSTKFLT
57 [SIGSTKFLT] = TARGET_SIGSTKFLT,
58 #endif
59 [SIGCHLD] = TARGET_SIGCHLD,
60 [SIGCONT] = TARGET_SIGCONT,
61 [SIGSTOP] = TARGET_SIGSTOP,
62 [SIGTSTP] = TARGET_SIGTSTP,
63 [SIGTTIN] = TARGET_SIGTTIN,
64 [SIGTTOU] = TARGET_SIGTTOU,
65 [SIGURG] = TARGET_SIGURG,
66 [SIGXCPU] = TARGET_SIGXCPU,
67 [SIGXFSZ] = TARGET_SIGXFSZ,
68 [SIGVTALRM] = TARGET_SIGVTALRM,
69 [SIGPROF] = TARGET_SIGPROF,
70 [SIGWINCH] = TARGET_SIGWINCH,
71 [SIGIO] = TARGET_SIGIO,
72 [SIGPWR] = TARGET_SIGPWR,
73 [SIGSYS] = TARGET_SIGSYS,
74 /* next signals stay the same */
75 /* Nasty hack: Reverse SIGRTMIN and SIGRTMAX to avoid overlap with
76 host libpthread signals. This assumes no one actually uses SIGRTMAX :-/
77 To fix this properly we need to do manual signal delivery multiplexed
78 over a single host signal. */
79 [__SIGRTMIN] = __SIGRTMAX,
80 [__SIGRTMAX] = __SIGRTMIN,
81 };
82 static uint8_t target_to_host_signal_table[_NSIG];
83
84 static inline int on_sig_stack(unsigned long sp)
85 {
86 return (sp - target_sigaltstack_used.ss_sp
87 < target_sigaltstack_used.ss_size);
88 }
89
90 static inline int sas_ss_flags(unsigned long sp)
91 {
92 return (target_sigaltstack_used.ss_size == 0 ? SS_DISABLE
93 : on_sig_stack(sp) ? SS_ONSTACK : 0);
94 }
95
96 int host_to_target_signal(int sig)
97 {
98 if (sig < 0 || sig >= _NSIG)
99 return sig;
100 return host_to_target_signal_table[sig];
101 }
102
103 int target_to_host_signal(int sig)
104 {
105 if (sig < 0 || sig >= _NSIG)
106 return sig;
107 return target_to_host_signal_table[sig];
108 }
109
110 static inline void target_sigemptyset(target_sigset_t *set)
111 {
112 memset(set, 0, sizeof(*set));
113 }
114
115 static inline void target_sigaddset(target_sigset_t *set, int signum)
116 {
117 signum--;
118 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
119 set->sig[signum / TARGET_NSIG_BPW] |= mask;
120 }
121
122 static inline int target_sigismember(const target_sigset_t *set, int signum)
123 {
124 signum--;
125 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
126 return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0);
127 }
128
129 static void host_to_target_sigset_internal(target_sigset_t *d,
130 const sigset_t *s)
131 {
132 int i;
133 target_sigemptyset(d);
134 for (i = 1; i <= TARGET_NSIG; i++) {
135 if (sigismember(s, i)) {
136 target_sigaddset(d, host_to_target_signal(i));
137 }
138 }
139 }
140
141 void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
142 {
143 target_sigset_t d1;
144 int i;
145
146 host_to_target_sigset_internal(&d1, s);
147 for(i = 0;i < TARGET_NSIG_WORDS; i++)
148 d->sig[i] = tswapal(d1.sig[i]);
149 }
150
151 static void target_to_host_sigset_internal(sigset_t *d,
152 const target_sigset_t *s)
153 {
154 int i;
155 sigemptyset(d);
156 for (i = 1; i <= TARGET_NSIG; i++) {
157 if (target_sigismember(s, i)) {
158 sigaddset(d, target_to_host_signal(i));
159 }
160 }
161 }
162
163 void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
164 {
165 target_sigset_t s1;
166 int i;
167
168 for(i = 0;i < TARGET_NSIG_WORDS; i++)
169 s1.sig[i] = tswapal(s->sig[i]);
170 target_to_host_sigset_internal(d, &s1);
171 }
172
173 void host_to_target_old_sigset(abi_ulong *old_sigset,
174 const sigset_t *sigset)
175 {
176 target_sigset_t d;
177 host_to_target_sigset(&d, sigset);
178 *old_sigset = d.sig[0];
179 }
180
181 void target_to_host_old_sigset(sigset_t *sigset,
182 const abi_ulong *old_sigset)
183 {
184 target_sigset_t d;
185 int i;
186
187 d.sig[0] = *old_sigset;
188 for(i = 1;i < TARGET_NSIG_WORDS; i++)
189 d.sig[i] = 0;
190 target_to_host_sigset(sigset, &d);
191 }
192
193 /* Wrapper for sigprocmask function
194 * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
195 * are host signal set, not guest ones. This wraps the sigprocmask host calls
196 * that should be protected (calls originated from guest)
197 */
198 int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
199 {
200 int ret;
201 sigset_t val;
202 sigset_t *temp = NULL;
203 CPUState *cpu = thread_cpu;
204 TaskState *ts = (TaskState *)cpu->opaque;
205 bool segv_was_blocked = ts->sigsegv_blocked;
206
207 if (set) {
208 bool has_sigsegv = sigismember(set, SIGSEGV);
209 val = *set;
210 temp = &val;
211
212 sigdelset(temp, SIGSEGV);
213
214 switch (how) {
215 case SIG_BLOCK:
216 if (has_sigsegv) {
217 ts->sigsegv_blocked = true;
218 }
219 break;
220 case SIG_UNBLOCK:
221 if (has_sigsegv) {
222 ts->sigsegv_blocked = false;
223 }
224 break;
225 case SIG_SETMASK:
226 ts->sigsegv_blocked = has_sigsegv;
227 break;
228 default:
229 g_assert_not_reached();
230 }
231 }
232
233 ret = sigprocmask(how, temp, oldset);
234
235 if (oldset && segv_was_blocked) {
236 sigaddset(oldset, SIGSEGV);
237 }
238
239 return ret;
240 }
241
242 /* siginfo conversion */
243
244 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
245 const siginfo_t *info)
246 {
247 int sig = host_to_target_signal(info->si_signo);
248 tinfo->si_signo = sig;
249 tinfo->si_errno = 0;
250 tinfo->si_code = info->si_code;
251
252 if (sig == TARGET_SIGILL || sig == TARGET_SIGFPE || sig == TARGET_SIGSEGV
253 || sig == TARGET_SIGBUS || sig == TARGET_SIGTRAP) {
254 /* Should never come here, but who knows. The information for
255 the target is irrelevant. */
256 tinfo->_sifields._sigfault._addr = 0;
257 } else if (sig == TARGET_SIGIO) {
258 tinfo->_sifields._sigpoll._band = info->si_band;
259 tinfo->_sifields._sigpoll._fd = info->si_fd;
260 } else if (sig == TARGET_SIGCHLD) {
261 tinfo->_sifields._sigchld._pid = info->si_pid;
262 tinfo->_sifields._sigchld._uid = info->si_uid;
263 tinfo->_sifields._sigchld._status
264 = host_to_target_waitstatus(info->si_status);
265 tinfo->_sifields._sigchld._utime = info->si_utime;
266 tinfo->_sifields._sigchld._stime = info->si_stime;
267 } else if (sig >= TARGET_SIGRTMIN) {
268 tinfo->_sifields._rt._pid = info->si_pid;
269 tinfo->_sifields._rt._uid = info->si_uid;
270 /* XXX: potential problem if 64 bit */
271 tinfo->_sifields._rt._sigval.sival_ptr
272 = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
273 }
274 }
275
276 static void tswap_siginfo(target_siginfo_t *tinfo,
277 const target_siginfo_t *info)
278 {
279 int sig = info->si_signo;
280 tinfo->si_signo = tswap32(sig);
281 tinfo->si_errno = tswap32(info->si_errno);
282 tinfo->si_code = tswap32(info->si_code);
283
284 if (sig == TARGET_SIGILL || sig == TARGET_SIGFPE || sig == TARGET_SIGSEGV
285 || sig == TARGET_SIGBUS || sig == TARGET_SIGTRAP) {
286 tinfo->_sifields._sigfault._addr
287 = tswapal(info->_sifields._sigfault._addr);
288 } else if (sig == TARGET_SIGIO) {
289 tinfo->_sifields._sigpoll._band
290 = tswap32(info->_sifields._sigpoll._band);
291 tinfo->_sifields._sigpoll._fd = tswap32(info->_sifields._sigpoll._fd);
292 } else if (sig == TARGET_SIGCHLD) {
293 tinfo->_sifields._sigchld._pid
294 = tswap32(info->_sifields._sigchld._pid);
295 tinfo->_sifields._sigchld._uid
296 = tswap32(info->_sifields._sigchld._uid);
297 tinfo->_sifields._sigchld._status
298 = tswap32(info->_sifields._sigchld._status);
299 tinfo->_sifields._sigchld._utime
300 = tswapal(info->_sifields._sigchld._utime);
301 tinfo->_sifields._sigchld._stime
302 = tswapal(info->_sifields._sigchld._stime);
303 } else if (sig >= TARGET_SIGRTMIN) {
304 tinfo->_sifields._rt._pid = tswap32(info->_sifields._rt._pid);
305 tinfo->_sifields._rt._uid = tswap32(info->_sifields._rt._uid);
306 tinfo->_sifields._rt._sigval.sival_ptr
307 = tswapal(info->_sifields._rt._sigval.sival_ptr);
308 }
309 }
310
311
312 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
313 {
314 host_to_target_siginfo_noswap(tinfo, info);
315 tswap_siginfo(tinfo, tinfo);
316 }
317
318 /* XXX: we support only POSIX RT signals are used. */
319 /* XXX: find a solution for 64 bit (additional malloced data is needed) */
320 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
321 {
322 info->si_signo = tswap32(tinfo->si_signo);
323 info->si_errno = tswap32(tinfo->si_errno);
324 info->si_code = tswap32(tinfo->si_code);
325 info->si_pid = tswap32(tinfo->_sifields._rt._pid);
326 info->si_uid = tswap32(tinfo->_sifields._rt._uid);
327 info->si_value.sival_ptr =
328 (void *)(long)tswapal(tinfo->_sifields._rt._sigval.sival_ptr);
329 }
330
331 static int fatal_signal (int sig)
332 {
333 switch (sig) {
334 case TARGET_SIGCHLD:
335 case TARGET_SIGURG:
336 case TARGET_SIGWINCH:
337 /* Ignored by default. */
338 return 0;
339 case TARGET_SIGCONT:
340 case TARGET_SIGSTOP:
341 case TARGET_SIGTSTP:
342 case TARGET_SIGTTIN:
343 case TARGET_SIGTTOU:
344 /* Job control signals. */
345 return 0;
346 default:
347 return 1;
348 }
349 }
350
351 /* returns 1 if given signal should dump core if not handled */
352 static int core_dump_signal(int sig)
353 {
354 switch (sig) {
355 case TARGET_SIGABRT:
356 case TARGET_SIGFPE:
357 case TARGET_SIGILL:
358 case TARGET_SIGQUIT:
359 case TARGET_SIGSEGV:
360 case TARGET_SIGTRAP:
361 case TARGET_SIGBUS:
362 return (1);
363 default:
364 return (0);
365 }
366 }
367
368 void signal_init(void)
369 {
370 struct sigaction act;
371 struct sigaction oact;
372 int i, j;
373 int host_sig;
374
375 /* generate signal conversion tables */
376 for(i = 1; i < _NSIG; i++) {
377 if (host_to_target_signal_table[i] == 0)
378 host_to_target_signal_table[i] = i;
379 }
380 for(i = 1; i < _NSIG; i++) {
381 j = host_to_target_signal_table[i];
382 target_to_host_signal_table[j] = i;
383 }
384
385 /* set all host signal handlers. ALL signals are blocked during
386 the handlers to serialize them. */
387 memset(sigact_table, 0, sizeof(sigact_table));
388
389 sigfillset(&act.sa_mask);
390 act.sa_flags = SA_SIGINFO;
391 act.sa_sigaction = host_signal_handler;
392 for(i = 1; i <= TARGET_NSIG; i++) {
393 host_sig = target_to_host_signal(i);
394 sigaction(host_sig, NULL, &oact);
395 if (oact.sa_sigaction == (void *)SIG_IGN) {
396 sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
397 } else if (oact.sa_sigaction == (void *)SIG_DFL) {
398 sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
399 }
400 /* If there's already a handler installed then something has
401 gone horribly wrong, so don't even try to handle that case. */
402 /* Install some handlers for our own use. We need at least
403 SIGSEGV and SIGBUS, to detect exceptions. We can not just
404 trap all signals because it affects syscall interrupt
405 behavior. But do trap all default-fatal signals. */
406 if (fatal_signal (i))
407 sigaction(host_sig, &act, NULL);
408 }
409 }
410
411 /* signal queue handling */
412
413 static inline struct sigqueue *alloc_sigqueue(CPUArchState *env)
414 {
415 CPUState *cpu = ENV_GET_CPU(env);
416 TaskState *ts = cpu->opaque;
417 struct sigqueue *q = ts->first_free;
418 if (!q)
419 return NULL;
420 ts->first_free = q->next;
421 return q;
422 }
423
424 static inline void free_sigqueue(CPUArchState *env, struct sigqueue *q)
425 {
426 CPUState *cpu = ENV_GET_CPU(env);
427 TaskState *ts = cpu->opaque;
428
429 q->next = ts->first_free;
430 ts->first_free = q;
431 }
432
433 /* abort execution with signal */
434 static void QEMU_NORETURN force_sig(int target_sig)
435 {
436 CPUState *cpu = thread_cpu;
437 CPUArchState *env = cpu->env_ptr;
438 TaskState *ts = (TaskState *)cpu->opaque;
439 int host_sig, core_dumped = 0;
440 struct sigaction act;
441
442 host_sig = target_to_host_signal(target_sig);
443 trace_user_force_sig(env, target_sig, host_sig);
444 gdb_signalled(env, target_sig);
445
446 /* dump core if supported by target binary format */
447 if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
448 stop_all_tasks();
449 core_dumped =
450 ((*ts->bprm->core_dump)(target_sig, env) == 0);
451 }
452 if (core_dumped) {
453 /* we already dumped the core of target process, we don't want
454 * a coredump of qemu itself */
455 struct rlimit nodump;
456 getrlimit(RLIMIT_CORE, &nodump);
457 nodump.rlim_cur=0;
458 setrlimit(RLIMIT_CORE, &nodump);
459 (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n",
460 target_sig, strsignal(host_sig), "core dumped" );
461 }
462
463 /* The proper exit code for dying from an uncaught signal is
464 * -<signal>. The kernel doesn't allow exit() or _exit() to pass
465 * a negative value. To get the proper exit code we need to
466 * actually die from an uncaught signal. Here the default signal
467 * handler is installed, we send ourself a signal and we wait for
468 * it to arrive. */
469 sigfillset(&act.sa_mask);
470 act.sa_handler = SIG_DFL;
471 act.sa_flags = 0;
472 sigaction(host_sig, &act, NULL);
473
474 /* For some reason raise(host_sig) doesn't send the signal when
475 * statically linked on x86-64. */
476 kill(getpid(), host_sig);
477
478 /* Make sure the signal isn't masked (just reuse the mask inside
479 of act) */
480 sigdelset(&act.sa_mask, host_sig);
481 sigsuspend(&act.sa_mask);
482
483 /* unreachable */
484 abort();
485 }
486
487 /* queue a signal so that it will be send to the virtual CPU as soon
488 as possible */
489 int queue_signal(CPUArchState *env, int sig, target_siginfo_t *info)
490 {
491 CPUState *cpu = ENV_GET_CPU(env);
492 TaskState *ts = cpu->opaque;
493 struct emulated_sigtable *k;
494 struct sigqueue *q, **pq;
495 abi_ulong handler;
496 int queue;
497
498 trace_user_queue_signal(env, sig);
499 k = &ts->sigtab[sig - 1];
500 queue = gdb_queuesig ();
501 handler = sigact_table[sig - 1]._sa_handler;
502
503 if (ts->sigsegv_blocked && sig == TARGET_SIGSEGV) {
504 /* Guest has blocked SIGSEGV but we got one anyway. Assume this
505 * is a forced SIGSEGV (ie one the kernel handles via force_sig_info
506 * because it got a real MMU fault). A blocked SIGSEGV in that
507 * situation is treated as if using the default handler. This is
508 * not correct if some other process has randomly sent us a SIGSEGV
509 * via kill(), but that is not easy to distinguish at this point,
510 * so we assume it doesn't happen.
511 */
512 handler = TARGET_SIG_DFL;
513 }
514
515 if (!queue && handler == TARGET_SIG_DFL) {
516 if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
517 kill(getpid(),SIGSTOP);
518 return 0;
519 } else
520 /* default handler : ignore some signal. The other are fatal */
521 if (sig != TARGET_SIGCHLD &&
522 sig != TARGET_SIGURG &&
523 sig != TARGET_SIGWINCH &&
524 sig != TARGET_SIGCONT) {
525 force_sig(sig);
526 } else {
527 return 0; /* indicate ignored */
528 }
529 } else if (!queue && handler == TARGET_SIG_IGN) {
530 /* ignore signal */
531 return 0;
532 } else if (!queue && handler == TARGET_SIG_ERR) {
533 force_sig(sig);
534 } else {
535 pq = &k->first;
536 if (sig < TARGET_SIGRTMIN) {
537 /* if non real time signal, we queue exactly one signal */
538 if (!k->pending)
539 q = &k->info;
540 else
541 return 0;
542 } else {
543 if (!k->pending) {
544 /* first signal */
545 q = &k->info;
546 } else {
547 q = alloc_sigqueue(env);
548 if (!q)
549 return -EAGAIN;
550 while (*pq != NULL)
551 pq = &(*pq)->next;
552 }
553 }
554 *pq = q;
555 q->info = *info;
556 q->next = NULL;
557 k->pending = 1;
558 /* signal that a new signal is pending */
559 ts->signal_pending = 1;
560 return 1; /* indicates that the signal was queued */
561 }
562 }
563
564 static void host_signal_handler(int host_signum, siginfo_t *info,
565 void *puc)
566 {
567 CPUArchState *env = thread_cpu->env_ptr;
568 int sig;
569 target_siginfo_t tinfo;
570
571 /* the CPU emulator uses some host signals to detect exceptions,
572 we forward to it some signals */
573 if ((host_signum == SIGSEGV || host_signum == SIGBUS)
574 && info->si_code > 0) {
575 if (cpu_signal_handler(host_signum, info, puc))
576 return;
577 }
578
579 /* get target signal number */
580 sig = host_to_target_signal(host_signum);
581 if (sig < 1 || sig > TARGET_NSIG)
582 return;
583 trace_user_host_signal(env, host_signum, sig);
584 host_to_target_siginfo_noswap(&tinfo, info);
585 if (queue_signal(env, sig, &tinfo) == 1) {
586 /* interrupt the virtual CPU as soon as possible */
587 cpu_exit(thread_cpu);
588 }
589 }
590
591 /* do_sigaltstack() returns target values and errnos. */
592 /* compare linux/kernel/signal.c:do_sigaltstack() */
593 abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp)
594 {
595 int ret;
596 struct target_sigaltstack oss;
597
598 /* XXX: test errors */
599 if(uoss_addr)
600 {
601 __put_user(target_sigaltstack_used.ss_sp, &oss.ss_sp);
602 __put_user(target_sigaltstack_used.ss_size, &oss.ss_size);
603 __put_user(sas_ss_flags(sp), &oss.ss_flags);
604 }
605
606 if(uss_addr)
607 {
608 struct target_sigaltstack *uss;
609 struct target_sigaltstack ss;
610 size_t minstacksize = TARGET_MINSIGSTKSZ;
611
612 #if defined(TARGET_PPC64)
613 /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
614 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
615 if (get_ppc64_abi(image) > 1) {
616 minstacksize = 4096;
617 }
618 #endif
619
620 ret = -TARGET_EFAULT;
621 if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
622 goto out;
623 }
624 __get_user(ss.ss_sp, &uss->ss_sp);
625 __get_user(ss.ss_size, &uss->ss_size);
626 __get_user(ss.ss_flags, &uss->ss_flags);
627 unlock_user_struct(uss, uss_addr, 0);
628
629 ret = -TARGET_EPERM;
630 if (on_sig_stack(sp))
631 goto out;
632
633 ret = -TARGET_EINVAL;
634 if (ss.ss_flags != TARGET_SS_DISABLE
635 && ss.ss_flags != TARGET_SS_ONSTACK
636 && ss.ss_flags != 0)
637 goto out;
638
639 if (ss.ss_flags == TARGET_SS_DISABLE) {
640 ss.ss_size = 0;
641 ss.ss_sp = 0;
642 } else {
643 ret = -TARGET_ENOMEM;
644 if (ss.ss_size < minstacksize) {
645 goto out;
646 }
647 }
648
649 target_sigaltstack_used.ss_sp = ss.ss_sp;
650 target_sigaltstack_used.ss_size = ss.ss_size;
651 }
652
653 if (uoss_addr) {
654 ret = -TARGET_EFAULT;
655 if (copy_to_user(uoss_addr, &oss, sizeof(oss)))
656 goto out;
657 }
658
659 ret = 0;
660 out:
661 return ret;
662 }
663
664 /* do_sigaction() return host values and errnos */
665 int do_sigaction(int sig, const struct target_sigaction *act,
666 struct target_sigaction *oact)
667 {
668 struct target_sigaction *k;
669 struct sigaction act1;
670 int host_sig;
671 int ret = 0;
672
673 if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP)
674 return -EINVAL;
675 k = &sigact_table[sig - 1];
676 if (oact) {
677 __put_user(k->_sa_handler, &oact->_sa_handler);
678 __put_user(k->sa_flags, &oact->sa_flags);
679 #if !defined(TARGET_MIPS)
680 __put_user(k->sa_restorer, &oact->sa_restorer);
681 #endif
682 /* Not swapped. */
683 oact->sa_mask = k->sa_mask;
684 }
685 if (act) {
686 /* FIXME: This is not threadsafe. */
687 __get_user(k->_sa_handler, &act->_sa_handler);
688 __get_user(k->sa_flags, &act->sa_flags);
689 #if !defined(TARGET_MIPS)
690 __get_user(k->sa_restorer, &act->sa_restorer);
691 #endif
692 /* To be swapped in target_to_host_sigset. */
693 k->sa_mask = act->sa_mask;
694
695 /* we update the host linux signal state */
696 host_sig = target_to_host_signal(sig);
697 if (host_sig != SIGSEGV && host_sig != SIGBUS) {
698 sigfillset(&act1.sa_mask);
699 act1.sa_flags = SA_SIGINFO;
700 if (k->sa_flags & TARGET_SA_RESTART)
701 act1.sa_flags |= SA_RESTART;
702 /* NOTE: it is important to update the host kernel signal
703 ignore state to avoid getting unexpected interrupted
704 syscalls */
705 if (k->_sa_handler == TARGET_SIG_IGN) {
706 act1.sa_sigaction = (void *)SIG_IGN;
707 } else if (k->_sa_handler == TARGET_SIG_DFL) {
708 if (fatal_signal (sig))
709 act1.sa_sigaction = host_signal_handler;
710 else
711 act1.sa_sigaction = (void *)SIG_DFL;
712 } else {
713 act1.sa_sigaction = host_signal_handler;
714 }
715 ret = sigaction(host_sig, &act1, NULL);
716 }
717 }
718 return ret;
719 }
720
721 #if defined(TARGET_I386) && TARGET_ABI_BITS == 32
722
723 /* from the Linux kernel */
724
725 struct target_fpreg {
726 uint16_t significand[4];
727 uint16_t exponent;
728 };
729
730 struct target_fpxreg {
731 uint16_t significand[4];
732 uint16_t exponent;
733 uint16_t padding[3];
734 };
735
736 struct target_xmmreg {
737 abi_ulong element[4];
738 };
739
740 struct target_fpstate {
741 /* Regular FPU environment */
742 abi_ulong cw;
743 abi_ulong sw;
744 abi_ulong tag;
745 abi_ulong ipoff;
746 abi_ulong cssel;
747 abi_ulong dataoff;
748 abi_ulong datasel;
749 struct target_fpreg _st[8];
750 uint16_t status;
751 uint16_t magic; /* 0xffff = regular FPU data only */
752
753 /* FXSR FPU environment */
754 abi_ulong _fxsr_env[6]; /* FXSR FPU env is ignored */
755 abi_ulong mxcsr;
756 abi_ulong reserved;
757 struct target_fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */
758 struct target_xmmreg _xmm[8];
759 abi_ulong padding[56];
760 };
761
762 #define X86_FXSR_MAGIC 0x0000
763
764 struct target_sigcontext {
765 uint16_t gs, __gsh;
766 uint16_t fs, __fsh;
767 uint16_t es, __esh;
768 uint16_t ds, __dsh;
769 abi_ulong edi;
770 abi_ulong esi;
771 abi_ulong ebp;
772 abi_ulong esp;
773 abi_ulong ebx;
774 abi_ulong edx;
775 abi_ulong ecx;
776 abi_ulong eax;
777 abi_ulong trapno;
778 abi_ulong err;
779 abi_ulong eip;
780 uint16_t cs, __csh;
781 abi_ulong eflags;
782 abi_ulong esp_at_signal;
783 uint16_t ss, __ssh;
784 abi_ulong fpstate; /* pointer */
785 abi_ulong oldmask;
786 abi_ulong cr2;
787 };
788
789 struct target_ucontext {
790 abi_ulong tuc_flags;
791 abi_ulong tuc_link;
792 target_stack_t tuc_stack;
793 struct target_sigcontext tuc_mcontext;
794 target_sigset_t tuc_sigmask; /* mask last for extensibility */
795 };
796
797 struct sigframe
798 {
799 abi_ulong pretcode;
800 int sig;
801 struct target_sigcontext sc;
802 struct target_fpstate fpstate;
803 abi_ulong extramask[TARGET_NSIG_WORDS-1];
804 char retcode[8];
805 };
806
807 struct rt_sigframe
808 {
809 abi_ulong pretcode;
810 int sig;
811 abi_ulong pinfo;
812 abi_ulong puc;
813 struct target_siginfo info;
814 struct target_ucontext uc;
815 struct target_fpstate fpstate;
816 char retcode[8];
817 };
818
819 /*
820 * Set up a signal frame.
821 */
822
823 /* XXX: save x87 state */
824 static void setup_sigcontext(struct target_sigcontext *sc,
825 struct target_fpstate *fpstate, CPUX86State *env, abi_ulong mask,
826 abi_ulong fpstate_addr)
827 {
828 CPUState *cs = CPU(x86_env_get_cpu(env));
829 uint16_t magic;
830
831 /* already locked in setup_frame() */
832 __put_user(env->segs[R_GS].selector, (unsigned int *)&sc->gs);
833 __put_user(env->segs[R_FS].selector, (unsigned int *)&sc->fs);
834 __put_user(env->segs[R_ES].selector, (unsigned int *)&sc->es);
835 __put_user(env->segs[R_DS].selector, (unsigned int *)&sc->ds);
836 __put_user(env->regs[R_EDI], &sc->edi);
837 __put_user(env->regs[R_ESI], &sc->esi);
838 __put_user(env->regs[R_EBP], &sc->ebp);
839 __put_user(env->regs[R_ESP], &sc->esp);
840 __put_user(env->regs[R_EBX], &sc->ebx);
841 __put_user(env->regs[R_EDX], &sc->edx);
842 __put_user(env->regs[R_ECX], &sc->ecx);
843 __put_user(env->regs[R_EAX], &sc->eax);
844 __put_user(cs->exception_index, &sc->trapno);
845 __put_user(env->error_code, &sc->err);
846 __put_user(env->eip, &sc->eip);
847 __put_user(env->segs[R_CS].selector, (unsigned int *)&sc->cs);
848 __put_user(env->eflags, &sc->eflags);
849 __put_user(env->regs[R_ESP], &sc->esp_at_signal);
850 __put_user(env->segs[R_SS].selector, (unsigned int *)&sc->ss);
851
852 cpu_x86_fsave(env, fpstate_addr, 1);
853 fpstate->status = fpstate->sw;
854 magic = 0xffff;
855 __put_user(magic, &fpstate->magic);
856 __put_user(fpstate_addr, &sc->fpstate);
857
858 /* non-iBCS2 extensions.. */
859 __put_user(mask, &sc->oldmask);
860 __put_user(env->cr[2], &sc->cr2);
861 }
862
863 /*
864 * Determine which stack to use..
865 */
866
867 static inline abi_ulong
868 get_sigframe(struct target_sigaction *ka, CPUX86State *env, size_t frame_size)
869 {
870 unsigned long esp;
871
872 /* Default to using normal stack */
873 esp = env->regs[R_ESP];
874 /* This is the X/Open sanctioned signal stack switching. */
875 if (ka->sa_flags & TARGET_SA_ONSTACK) {
876 if (sas_ss_flags(esp) == 0)
877 esp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
878 }
879
880 /* This is the legacy signal stack switching. */
881 else
882 if ((env->segs[R_SS].selector & 0xffff) != __USER_DS &&
883 !(ka->sa_flags & TARGET_SA_RESTORER) &&
884 ka->sa_restorer) {
885 esp = (unsigned long) ka->sa_restorer;
886 }
887 return (esp - frame_size) & -8ul;
888 }
889
890 /* compare linux/arch/i386/kernel/signal.c:setup_frame() */
891 static void setup_frame(int sig, struct target_sigaction *ka,
892 target_sigset_t *set, CPUX86State *env)
893 {
894 abi_ulong frame_addr;
895 struct sigframe *frame;
896 int i;
897
898 frame_addr = get_sigframe(ka, env, sizeof(*frame));
899 trace_user_setup_frame(env, frame_addr);
900
901 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
902 goto give_sigsegv;
903
904 __put_user(sig, &frame->sig);
905
906 setup_sigcontext(&frame->sc, &frame->fpstate, env, set->sig[0],
907 frame_addr + offsetof(struct sigframe, fpstate));
908
909 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
910 __put_user(set->sig[i], &frame->extramask[i - 1]);
911 }
912
913 /* Set up to return from userspace. If provided, use a stub
914 already in userspace. */
915 if (ka->sa_flags & TARGET_SA_RESTORER) {
916 __put_user(ka->sa_restorer, &frame->pretcode);
917 } else {
918 uint16_t val16;
919 abi_ulong retcode_addr;
920 retcode_addr = frame_addr + offsetof(struct sigframe, retcode);
921 __put_user(retcode_addr, &frame->pretcode);
922 /* This is popl %eax ; movl $,%eax ; int $0x80 */
923 val16 = 0xb858;
924 __put_user(val16, (uint16_t *)(frame->retcode+0));
925 __put_user(TARGET_NR_sigreturn, (int *)(frame->retcode+2));
926 val16 = 0x80cd;
927 __put_user(val16, (uint16_t *)(frame->retcode+6));
928 }
929
930
931 /* Set up registers for signal handler */
932 env->regs[R_ESP] = frame_addr;
933 env->eip = ka->_sa_handler;
934
935 cpu_x86_load_seg(env, R_DS, __USER_DS);
936 cpu_x86_load_seg(env, R_ES, __USER_DS);
937 cpu_x86_load_seg(env, R_SS, __USER_DS);
938 cpu_x86_load_seg(env, R_CS, __USER_CS);
939 env->eflags &= ~TF_MASK;
940
941 unlock_user_struct(frame, frame_addr, 1);
942
943 return;
944
945 give_sigsegv:
946 if (sig == TARGET_SIGSEGV)
947 ka->_sa_handler = TARGET_SIG_DFL;
948 force_sig(TARGET_SIGSEGV /* , current */);
949 }
950
951 /* compare linux/arch/i386/kernel/signal.c:setup_rt_frame() */
952 static void setup_rt_frame(int sig, struct target_sigaction *ka,
953 target_siginfo_t *info,
954 target_sigset_t *set, CPUX86State *env)
955 {
956 abi_ulong frame_addr, addr;
957 struct rt_sigframe *frame;
958 int i;
959
960 frame_addr = get_sigframe(ka, env, sizeof(*frame));
961 trace_user_setup_rt_frame(env, frame_addr);
962
963 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
964 goto give_sigsegv;
965
966 __put_user(sig, &frame->sig);
967 addr = frame_addr + offsetof(struct rt_sigframe, info);
968 __put_user(addr, &frame->pinfo);
969 addr = frame_addr + offsetof(struct rt_sigframe, uc);
970 __put_user(addr, &frame->puc);
971 tswap_siginfo(&frame->info, info);
972
973 /* Create the ucontext. */
974 __put_user(0, &frame->uc.tuc_flags);
975 __put_user(0, &frame->uc.tuc_link);
976 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
977 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
978 &frame->uc.tuc_stack.ss_flags);
979 __put_user(target_sigaltstack_used.ss_size,
980 &frame->uc.tuc_stack.ss_size);
981 setup_sigcontext(&frame->uc.tuc_mcontext, &frame->fpstate, env,
982 set->sig[0], frame_addr + offsetof(struct rt_sigframe, fpstate));
983
984 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
985 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
986 }
987
988 /* Set up to return from userspace. If provided, use a stub
989 already in userspace. */
990 if (ka->sa_flags & TARGET_SA_RESTORER) {
991 __put_user(ka->sa_restorer, &frame->pretcode);
992 } else {
993 uint16_t val16;
994 addr = frame_addr + offsetof(struct rt_sigframe, retcode);
995 __put_user(addr, &frame->pretcode);
996 /* This is movl $,%eax ; int $0x80 */
997 __put_user(0xb8, (char *)(frame->retcode+0));
998 __put_user(TARGET_NR_rt_sigreturn, (int *)(frame->retcode+1));
999 val16 = 0x80cd;
1000 __put_user(val16, (uint16_t *)(frame->retcode+5));
1001 }
1002
1003 /* Set up registers for signal handler */
1004 env->regs[R_ESP] = frame_addr;
1005 env->eip = ka->_sa_handler;
1006
1007 cpu_x86_load_seg(env, R_DS, __USER_DS);
1008 cpu_x86_load_seg(env, R_ES, __USER_DS);
1009 cpu_x86_load_seg(env, R_SS, __USER_DS);
1010 cpu_x86_load_seg(env, R_CS, __USER_CS);
1011 env->eflags &= ~TF_MASK;
1012
1013 unlock_user_struct(frame, frame_addr, 1);
1014
1015 return;
1016
1017 give_sigsegv:
1018 if (sig == TARGET_SIGSEGV)
1019 ka->_sa_handler = TARGET_SIG_DFL;
1020 force_sig(TARGET_SIGSEGV /* , current */);
1021 }
1022
1023 static int
1024 restore_sigcontext(CPUX86State *env, struct target_sigcontext *sc, int *peax)
1025 {
1026 unsigned int err = 0;
1027 abi_ulong fpstate_addr;
1028 unsigned int tmpflags;
1029
1030 cpu_x86_load_seg(env, R_GS, tswap16(sc->gs));
1031 cpu_x86_load_seg(env, R_FS, tswap16(sc->fs));
1032 cpu_x86_load_seg(env, R_ES, tswap16(sc->es));
1033 cpu_x86_load_seg(env, R_DS, tswap16(sc->ds));
1034
1035 env->regs[R_EDI] = tswapl(sc->edi);
1036 env->regs[R_ESI] = tswapl(sc->esi);
1037 env->regs[R_EBP] = tswapl(sc->ebp);
1038 env->regs[R_ESP] = tswapl(sc->esp);
1039 env->regs[R_EBX] = tswapl(sc->ebx);
1040 env->regs[R_EDX] = tswapl(sc->edx);
1041 env->regs[R_ECX] = tswapl(sc->ecx);
1042 env->eip = tswapl(sc->eip);
1043
1044 cpu_x86_load_seg(env, R_CS, lduw_p(&sc->cs) | 3);
1045 cpu_x86_load_seg(env, R_SS, lduw_p(&sc->ss) | 3);
1046
1047 tmpflags = tswapl(sc->eflags);
1048 env->eflags = (env->eflags & ~0x40DD5) | (tmpflags & 0x40DD5);
1049 // regs->orig_eax = -1; /* disable syscall checks */
1050
1051 fpstate_addr = tswapl(sc->fpstate);
1052 if (fpstate_addr != 0) {
1053 if (!access_ok(VERIFY_READ, fpstate_addr,
1054 sizeof(struct target_fpstate)))
1055 goto badframe;
1056 cpu_x86_frstor(env, fpstate_addr, 1);
1057 }
1058
1059 *peax = tswapl(sc->eax);
1060 return err;
1061 badframe:
1062 return 1;
1063 }
1064
1065 long do_sigreturn(CPUX86State *env)
1066 {
1067 struct sigframe *frame;
1068 abi_ulong frame_addr = env->regs[R_ESP] - 8;
1069 target_sigset_t target_set;
1070 sigset_t set;
1071 int eax, i;
1072
1073 trace_user_do_sigreturn(env, frame_addr);
1074 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1075 goto badframe;
1076 /* set blocked signals */
1077 __get_user(target_set.sig[0], &frame->sc.oldmask);
1078 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1079 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
1080 }
1081
1082 target_to_host_sigset_internal(&set, &target_set);
1083 do_sigprocmask(SIG_SETMASK, &set, NULL);
1084
1085 /* restore registers */
1086 if (restore_sigcontext(env, &frame->sc, &eax))
1087 goto badframe;
1088 unlock_user_struct(frame, frame_addr, 0);
1089 return eax;
1090
1091 badframe:
1092 unlock_user_struct(frame, frame_addr, 0);
1093 force_sig(TARGET_SIGSEGV);
1094 return 0;
1095 }
1096
1097 long do_rt_sigreturn(CPUX86State *env)
1098 {
1099 abi_ulong frame_addr;
1100 struct rt_sigframe *frame;
1101 sigset_t set;
1102 int eax;
1103
1104 frame_addr = env->regs[R_ESP] - 4;
1105 trace_user_do_rt_sigreturn(env, frame_addr);
1106 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1107 goto badframe;
1108 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
1109 do_sigprocmask(SIG_SETMASK, &set, NULL);
1110
1111 if (restore_sigcontext(env, &frame->uc.tuc_mcontext, &eax))
1112 goto badframe;
1113
1114 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe, uc.tuc_stack), 0,
1115 get_sp_from_cpustate(env)) == -EFAULT)
1116 goto badframe;
1117
1118 unlock_user_struct(frame, frame_addr, 0);
1119 return eax;
1120
1121 badframe:
1122 unlock_user_struct(frame, frame_addr, 0);
1123 force_sig(TARGET_SIGSEGV);
1124 return 0;
1125 }
1126
1127 #elif defined(TARGET_AARCH64)
1128
1129 struct target_sigcontext {
1130 uint64_t fault_address;
1131 /* AArch64 registers */
1132 uint64_t regs[31];
1133 uint64_t sp;
1134 uint64_t pc;
1135 uint64_t pstate;
1136 /* 4K reserved for FP/SIMD state and future expansion */
1137 char __reserved[4096] __attribute__((__aligned__(16)));
1138 };
1139
1140 struct target_ucontext {
1141 abi_ulong tuc_flags;
1142 abi_ulong tuc_link;
1143 target_stack_t tuc_stack;
1144 target_sigset_t tuc_sigmask;
1145 /* glibc uses a 1024-bit sigset_t */
1146 char __unused[1024 / 8 - sizeof(target_sigset_t)];
1147 /* last for future expansion */
1148 struct target_sigcontext tuc_mcontext;
1149 };
1150
1151 /*
1152 * Header to be used at the beginning of structures extending the user
1153 * context. Such structures must be placed after the rt_sigframe on the stack
1154 * and be 16-byte aligned. The last structure must be a dummy one with the
1155 * magic and size set to 0.
1156 */
1157 struct target_aarch64_ctx {
1158 uint32_t magic;
1159 uint32_t size;
1160 };
1161
1162 #define TARGET_FPSIMD_MAGIC 0x46508001
1163
1164 struct target_fpsimd_context {
1165 struct target_aarch64_ctx head;
1166 uint32_t fpsr;
1167 uint32_t fpcr;
1168 uint64_t vregs[32 * 2]; /* really uint128_t vregs[32] */
1169 };
1170
1171 /*
1172 * Auxiliary context saved in the sigcontext.__reserved array. Not exported to
1173 * user space as it will change with the addition of new context. User space
1174 * should check the magic/size information.
1175 */
1176 struct target_aux_context {
1177 struct target_fpsimd_context fpsimd;
1178 /* additional context to be added before "end" */
1179 struct target_aarch64_ctx end;
1180 };
1181
1182 struct target_rt_sigframe {
1183 struct target_siginfo info;
1184 struct target_ucontext uc;
1185 uint64_t fp;
1186 uint64_t lr;
1187 uint32_t tramp[2];
1188 };
1189
1190 static int target_setup_sigframe(struct target_rt_sigframe *sf,
1191 CPUARMState *env, target_sigset_t *set)
1192 {
1193 int i;
1194 struct target_aux_context *aux =
1195 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved;
1196
1197 /* set up the stack frame for unwinding */
1198 __put_user(env->xregs[29], &sf->fp);
1199 __put_user(env->xregs[30], &sf->lr);
1200
1201 for (i = 0; i < 31; i++) {
1202 __put_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]);
1203 }
1204 __put_user(env->xregs[31], &sf->uc.tuc_mcontext.sp);
1205 __put_user(env->pc, &sf->uc.tuc_mcontext.pc);
1206 __put_user(pstate_read(env), &sf->uc.tuc_mcontext.pstate);
1207
1208 __put_user(env->exception.vaddress, &sf->uc.tuc_mcontext.fault_address);
1209
1210 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
1211 __put_user(set->sig[i], &sf->uc.tuc_sigmask.sig[i]);
1212 }
1213
1214 for (i = 0; i < 32; i++) {
1215 #ifdef TARGET_WORDS_BIGENDIAN
1216 __put_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2 + 1]);
1217 __put_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2]);
1218 #else
1219 __put_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2]);
1220 __put_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2 + 1]);
1221 #endif
1222 }
1223 __put_user(vfp_get_fpsr(env), &aux->fpsimd.fpsr);
1224 __put_user(vfp_get_fpcr(env), &aux->fpsimd.fpcr);
1225 __put_user(TARGET_FPSIMD_MAGIC, &aux->fpsimd.head.magic);
1226 __put_user(sizeof(struct target_fpsimd_context),
1227 &aux->fpsimd.head.size);
1228
1229 /* set the "end" magic */
1230 __put_user(0, &aux->end.magic);
1231 __put_user(0, &aux->end.size);
1232
1233 return 0;
1234 }
1235
1236 static int target_restore_sigframe(CPUARMState *env,
1237 struct target_rt_sigframe *sf)
1238 {
1239 sigset_t set;
1240 int i;
1241 struct target_aux_context *aux =
1242 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved;
1243 uint32_t magic, size, fpsr, fpcr;
1244 uint64_t pstate;
1245
1246 target_to_host_sigset(&set, &sf->uc.tuc_sigmask);
1247 do_sigprocmask(SIG_SETMASK, &set, NULL);
1248
1249 for (i = 0; i < 31; i++) {
1250 __get_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]);
1251 }
1252
1253 __get_user(env->xregs[31], &sf->uc.tuc_mcontext.sp);
1254 __get_user(env->pc, &sf->uc.tuc_mcontext.pc);
1255 __get_user(pstate, &sf->uc.tuc_mcontext.pstate);
1256 pstate_write(env, pstate);
1257
1258 __get_user(magic, &aux->fpsimd.head.magic);
1259 __get_user(size, &aux->fpsimd.head.size);
1260
1261 if (magic != TARGET_FPSIMD_MAGIC
1262 || size != sizeof(struct target_fpsimd_context)) {
1263 return 1;
1264 }
1265
1266 for (i = 0; i < 32; i++) {
1267 #ifdef TARGET_WORDS_BIGENDIAN
1268 __get_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2 + 1]);
1269 __get_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2]);
1270 #else
1271 __get_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2]);
1272 __get_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2 + 1]);
1273 #endif
1274 }
1275 __get_user(fpsr, &aux->fpsimd.fpsr);
1276 vfp_set_fpsr(env, fpsr);
1277 __get_user(fpcr, &aux->fpsimd.fpcr);
1278 vfp_set_fpcr(env, fpcr);
1279
1280 return 0;
1281 }
1282
1283 static abi_ulong get_sigframe(struct target_sigaction *ka, CPUARMState *env)
1284 {
1285 abi_ulong sp;
1286
1287 sp = env->xregs[31];
1288
1289 /*
1290 * This is the X/Open sanctioned signal stack switching.
1291 */
1292 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
1293 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1294 }
1295
1296 sp = (sp - sizeof(struct target_rt_sigframe)) & ~15;
1297
1298 return sp;
1299 }
1300
1301 static void target_setup_frame(int usig, struct target_sigaction *ka,
1302 target_siginfo_t *info, target_sigset_t *set,
1303 CPUARMState *env)
1304 {
1305 struct target_rt_sigframe *frame;
1306 abi_ulong frame_addr, return_addr;
1307
1308 frame_addr = get_sigframe(ka, env);
1309 trace_user_setup_frame(env, frame_addr);
1310 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1311 goto give_sigsegv;
1312 }
1313
1314 __put_user(0, &frame->uc.tuc_flags);
1315 __put_user(0, &frame->uc.tuc_link);
1316
1317 __put_user(target_sigaltstack_used.ss_sp,
1318 &frame->uc.tuc_stack.ss_sp);
1319 __put_user(sas_ss_flags(env->xregs[31]),
1320 &frame->uc.tuc_stack.ss_flags);
1321 __put_user(target_sigaltstack_used.ss_size,
1322 &frame->uc.tuc_stack.ss_size);
1323 target_setup_sigframe(frame, env, set);
1324 if (ka->sa_flags & TARGET_SA_RESTORER) {
1325 return_addr = ka->sa_restorer;
1326 } else {
1327 /* mov x8,#__NR_rt_sigreturn; svc #0 */
1328 __put_user(0xd2801168, &frame->tramp[0]);
1329 __put_user(0xd4000001, &frame->tramp[1]);
1330 return_addr = frame_addr + offsetof(struct target_rt_sigframe, tramp);
1331 }
1332 env->xregs[0] = usig;
1333 env->xregs[31] = frame_addr;
1334 env->xregs[29] = env->xregs[31] + offsetof(struct target_rt_sigframe, fp);
1335 env->pc = ka->_sa_handler;
1336 env->xregs[30] = return_addr;
1337 if (info) {
1338 tswap_siginfo(&frame->info, info);
1339 env->xregs[1] = frame_addr + offsetof(struct target_rt_sigframe, info);
1340 env->xregs[2] = frame_addr + offsetof(struct target_rt_sigframe, uc);
1341 }
1342
1343 unlock_user_struct(frame, frame_addr, 1);
1344 return;
1345
1346 give_sigsegv:
1347 unlock_user_struct(frame, frame_addr, 1);
1348 force_sig(TARGET_SIGSEGV);
1349 }
1350
1351 static void setup_rt_frame(int sig, struct target_sigaction *ka,
1352 target_siginfo_t *info, target_sigset_t *set,
1353 CPUARMState *env)
1354 {
1355 target_setup_frame(sig, ka, info, set, env);
1356 }
1357
1358 static void setup_frame(int sig, struct target_sigaction *ka,
1359 target_sigset_t *set, CPUARMState *env)
1360 {
1361 target_setup_frame(sig, ka, 0, set, env);
1362 }
1363
1364 long do_rt_sigreturn(CPUARMState *env)
1365 {
1366 struct target_rt_sigframe *frame = NULL;
1367 abi_ulong frame_addr = env->xregs[31];
1368
1369 trace_user_do_rt_sigreturn(env, frame_addr);
1370 if (frame_addr & 15) {
1371 goto badframe;
1372 }
1373
1374 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
1375 goto badframe;
1376 }
1377
1378 if (target_restore_sigframe(env, frame)) {
1379 goto badframe;
1380 }
1381
1382 if (do_sigaltstack(frame_addr +
1383 offsetof(struct target_rt_sigframe, uc.tuc_stack),
1384 0, get_sp_from_cpustate(env)) == -EFAULT) {
1385 goto badframe;
1386 }
1387
1388 unlock_user_struct(frame, frame_addr, 0);
1389 return env->xregs[0];
1390
1391 badframe:
1392 unlock_user_struct(frame, frame_addr, 0);
1393 force_sig(TARGET_SIGSEGV);
1394 return 0;
1395 }
1396
1397 long do_sigreturn(CPUARMState *env)
1398 {
1399 return do_rt_sigreturn(env);
1400 }
1401
1402 #elif defined(TARGET_ARM)
1403
1404 struct target_sigcontext {
1405 abi_ulong trap_no;
1406 abi_ulong error_code;
1407 abi_ulong oldmask;
1408 abi_ulong arm_r0;
1409 abi_ulong arm_r1;
1410 abi_ulong arm_r2;
1411 abi_ulong arm_r3;
1412 abi_ulong arm_r4;
1413 abi_ulong arm_r5;
1414 abi_ulong arm_r6;
1415 abi_ulong arm_r7;
1416 abi_ulong arm_r8;
1417 abi_ulong arm_r9;
1418 abi_ulong arm_r10;
1419 abi_ulong arm_fp;
1420 abi_ulong arm_ip;
1421 abi_ulong arm_sp;
1422 abi_ulong arm_lr;
1423 abi_ulong arm_pc;
1424 abi_ulong arm_cpsr;
1425 abi_ulong fault_address;
1426 };
1427
1428 struct target_ucontext_v1 {
1429 abi_ulong tuc_flags;
1430 abi_ulong tuc_link;
1431 target_stack_t tuc_stack;
1432 struct target_sigcontext tuc_mcontext;
1433 target_sigset_t tuc_sigmask; /* mask last for extensibility */
1434 };
1435
1436 struct target_ucontext_v2 {
1437 abi_ulong tuc_flags;
1438 abi_ulong tuc_link;
1439 target_stack_t tuc_stack;
1440 struct target_sigcontext tuc_mcontext;
1441 target_sigset_t tuc_sigmask; /* mask last for extensibility */
1442 char __unused[128 - sizeof(target_sigset_t)];
1443 abi_ulong tuc_regspace[128] __attribute__((__aligned__(8)));
1444 };
1445
1446 struct target_user_vfp {
1447 uint64_t fpregs[32];
1448 abi_ulong fpscr;
1449 };
1450
1451 struct target_user_vfp_exc {
1452 abi_ulong fpexc;
1453 abi_ulong fpinst;
1454 abi_ulong fpinst2;
1455 };
1456
1457 struct target_vfp_sigframe {
1458 abi_ulong magic;
1459 abi_ulong size;
1460 struct target_user_vfp ufp;
1461 struct target_user_vfp_exc ufp_exc;
1462 } __attribute__((__aligned__(8)));
1463
1464 struct target_iwmmxt_sigframe {
1465 abi_ulong magic;
1466 abi_ulong size;
1467 uint64_t regs[16];
1468 /* Note that not all the coprocessor control registers are stored here */
1469 uint32_t wcssf;
1470 uint32_t wcasf;
1471 uint32_t wcgr0;
1472 uint32_t wcgr1;
1473 uint32_t wcgr2;
1474 uint32_t wcgr3;
1475 } __attribute__((__aligned__(8)));
1476
1477 #define TARGET_VFP_MAGIC 0x56465001
1478 #define TARGET_IWMMXT_MAGIC 0x12ef842a
1479
1480 struct sigframe_v1
1481 {
1482 struct target_sigcontext sc;
1483 abi_ulong extramask[TARGET_NSIG_WORDS-1];
1484 abi_ulong retcode;
1485 };
1486
1487 struct sigframe_v2
1488 {
1489 struct target_ucontext_v2 uc;
1490 abi_ulong retcode;
1491 };
1492
1493 struct rt_sigframe_v1
1494 {
1495 abi_ulong pinfo;
1496 abi_ulong puc;
1497 struct target_siginfo info;
1498 struct target_ucontext_v1 uc;
1499 abi_ulong retcode;
1500 };
1501
1502 struct rt_sigframe_v2
1503 {
1504 struct target_siginfo info;
1505 struct target_ucontext_v2 uc;
1506 abi_ulong retcode;
1507 };
1508
1509 #define TARGET_CONFIG_CPU_32 1
1510
1511 /*
1512 * For ARM syscalls, we encode the syscall number into the instruction.
1513 */
1514 #define SWI_SYS_SIGRETURN (0xef000000|(TARGET_NR_sigreturn + ARM_SYSCALL_BASE))
1515 #define SWI_SYS_RT_SIGRETURN (0xef000000|(TARGET_NR_rt_sigreturn + ARM_SYSCALL_BASE))
1516
1517 /*
1518 * For Thumb syscalls, we pass the syscall number via r7. We therefore
1519 * need two 16-bit instructions.
1520 */
1521 #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_sigreturn))
1522 #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_rt_sigreturn))
1523
1524 static const abi_ulong retcodes[4] = {
1525 SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN,
1526 SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN
1527 };
1528
1529
1530 static inline int valid_user_regs(CPUARMState *regs)
1531 {
1532 return 1;
1533 }
1534
1535 static void
1536 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/
1537 CPUARMState *env, abi_ulong mask)
1538 {
1539 __put_user(env->regs[0], &sc->arm_r0);
1540 __put_user(env->regs[1], &sc->arm_r1);
1541 __put_user(env->regs[2], &sc->arm_r2);
1542 __put_user(env->regs[3], &sc->arm_r3);
1543 __put_user(env->regs[4], &sc->arm_r4);
1544 __put_user(env->regs[5], &sc->arm_r5);
1545 __put_user(env->regs[6], &sc->arm_r6);
1546 __put_user(env->regs[7], &sc->arm_r7);
1547 __put_user(env->regs[8], &sc->arm_r8);
1548 __put_user(env->regs[9], &sc->arm_r9);
1549 __put_user(env->regs[10], &sc->arm_r10);
1550 __put_user(env->regs[11], &sc->arm_fp);
1551 __put_user(env->regs[12], &sc->arm_ip);
1552 __put_user(env->regs[13], &sc->arm_sp);
1553 __put_user(env->regs[14], &sc->arm_lr);
1554 __put_user(env->regs[15], &sc->arm_pc);
1555 #ifdef TARGET_CONFIG_CPU_32
1556 __put_user(cpsr_read(env), &sc->arm_cpsr);
1557 #endif
1558
1559 __put_user(/* current->thread.trap_no */ 0, &sc->trap_no);
1560 __put_user(/* current->thread.error_code */ 0, &sc->error_code);
1561 __put_user(/* current->thread.address */ 0, &sc->fault_address);
1562 __put_user(mask, &sc->oldmask);
1563 }
1564
1565 static inline abi_ulong
1566 get_sigframe(struct target_sigaction *ka, CPUARMState *regs, int framesize)
1567 {
1568 unsigned long sp = regs->regs[13];
1569
1570 /*
1571 * This is the X/Open sanctioned signal stack switching.
1572 */
1573 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
1574 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1575 }
1576 /*
1577 * ATPCS B01 mandates 8-byte alignment
1578 */
1579 return (sp - framesize) & ~7;
1580 }
1581
1582 static void
1583 setup_return(CPUARMState *env, struct target_sigaction *ka,
1584 abi_ulong *rc, abi_ulong frame_addr, int usig, abi_ulong rc_addr)
1585 {
1586 abi_ulong handler = ka->_sa_handler;
1587 abi_ulong retcode;
1588 int thumb = handler & 1;
1589 uint32_t cpsr = cpsr_read(env);
1590
1591 cpsr &= ~CPSR_IT;
1592 if (thumb) {
1593 cpsr |= CPSR_T;
1594 } else {
1595 cpsr &= ~CPSR_T;
1596 }
1597
1598 if (ka->sa_flags & TARGET_SA_RESTORER) {
1599 retcode = ka->sa_restorer;
1600 } else {
1601 unsigned int idx = thumb;
1602
1603 if (ka->sa_flags & TARGET_SA_SIGINFO) {
1604 idx += 2;
1605 }
1606
1607 __put_user(retcodes[idx], rc);
1608
1609 retcode = rc_addr + thumb;
1610 }
1611
1612 env->regs[0] = usig;
1613 env->regs[13] = frame_addr;
1614 env->regs[14] = retcode;
1615 env->regs[15] = handler & (thumb ? ~1 : ~3);
1616 cpsr_write(env, cpsr, CPSR_IT | CPSR_T, CPSRWriteByInstr);
1617 }
1618
1619 static abi_ulong *setup_sigframe_v2_vfp(abi_ulong *regspace, CPUARMState *env)
1620 {
1621 int i;
1622 struct target_vfp_sigframe *vfpframe;
1623 vfpframe = (struct target_vfp_sigframe *)regspace;
1624 __put_user(TARGET_VFP_MAGIC, &vfpframe->magic);
1625 __put_user(sizeof(*vfpframe), &vfpframe->size);
1626 for (i = 0; i < 32; i++) {
1627 __put_user(float64_val(env->vfp.regs[i]), &vfpframe->ufp.fpregs[i]);
1628 }
1629 __put_user(vfp_get_fpscr(env), &vfpframe->ufp.fpscr);
1630 __put_user(env->vfp.xregs[ARM_VFP_FPEXC], &vfpframe->ufp_exc.fpexc);
1631 __put_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst);
1632 __put_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2);
1633 return (abi_ulong*)(vfpframe+1);
1634 }
1635
1636 static abi_ulong *setup_sigframe_v2_iwmmxt(abi_ulong *regspace,
1637 CPUARMState *env)
1638 {
1639 int i;
1640 struct target_iwmmxt_sigframe *iwmmxtframe;
1641 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace;
1642 __put_user(TARGET_IWMMXT_MAGIC, &iwmmxtframe->magic);
1643 __put_user(sizeof(*iwmmxtframe), &iwmmxtframe->size);
1644 for (i = 0; i < 16; i++) {
1645 __put_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]);
1646 }
1647 __put_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf);
1648 __put_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf);
1649 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0);
1650 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1);
1651 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2);
1652 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3);
1653 return (abi_ulong*)(iwmmxtframe+1);
1654 }
1655
1656 static void setup_sigframe_v2(struct target_ucontext_v2 *uc,
1657 target_sigset_t *set, CPUARMState *env)
1658 {
1659 struct target_sigaltstack stack;
1660 int i;
1661 abi_ulong *regspace;
1662
1663 /* Clear all the bits of the ucontext we don't use. */
1664 memset(uc, 0, offsetof(struct target_ucontext_v2, tuc_mcontext));
1665
1666 memset(&stack, 0, sizeof(stack));
1667 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp);
1668 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size);
1669 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags);
1670 memcpy(&uc->tuc_stack, &stack, sizeof(stack));
1671
1672 setup_sigcontext(&uc->tuc_mcontext, env, set->sig[0]);
1673 /* Save coprocessor signal frame. */
1674 regspace = uc->tuc_regspace;
1675 if (arm_feature(env, ARM_FEATURE_VFP)) {
1676 regspace = setup_sigframe_v2_vfp(regspace, env);
1677 }
1678 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
1679 regspace = setup_sigframe_v2_iwmmxt(regspace, env);
1680 }
1681
1682 /* Write terminating magic word */
1683 __put_user(0, regspace);
1684
1685 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1686 __put_user(set->sig[i], &uc->tuc_sigmask.sig[i]);
1687 }
1688 }
1689
1690 /* compare linux/arch/arm/kernel/signal.c:setup_frame() */
1691 static void setup_frame_v1(int usig, struct target_sigaction *ka,
1692 target_sigset_t *set, CPUARMState *regs)
1693 {
1694 struct sigframe_v1 *frame;
1695 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame));
1696 int i;
1697
1698 trace_user_setup_frame(regs, frame_addr);
1699 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
1700 return;
1701
1702 setup_sigcontext(&frame->sc, regs, set->sig[0]);
1703
1704 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1705 __put_user(set->sig[i], &frame->extramask[i - 1]);
1706 }
1707
1708 setup_return(regs, ka, &frame->retcode, frame_addr, usig,
1709 frame_addr + offsetof(struct sigframe_v1, retcode));
1710
1711 unlock_user_struct(frame, frame_addr, 1);
1712 }
1713
1714 static void setup_frame_v2(int usig, struct target_sigaction *ka,
1715 target_sigset_t *set, CPUARMState *regs)
1716 {
1717 struct sigframe_v2 *frame;
1718 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame));
1719
1720 trace_user_setup_frame(regs, frame_addr);
1721 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
1722 return;
1723
1724 setup_sigframe_v2(&frame->uc, set, regs);
1725
1726 setup_return(regs, ka, &frame->retcode, frame_addr, usig,
1727 frame_addr + offsetof(struct sigframe_v2, retcode));
1728
1729 unlock_user_struct(frame, frame_addr, 1);
1730 }
1731
1732 static void setup_frame(int usig, struct target_sigaction *ka,
1733 target_sigset_t *set, CPUARMState *regs)
1734 {
1735 if (get_osversion() >= 0x020612) {
1736 setup_frame_v2(usig, ka, set, regs);
1737 } else {
1738 setup_frame_v1(usig, ka, set, regs);
1739 }
1740 }
1741
1742 /* compare linux/arch/arm/kernel/signal.c:setup_rt_frame() */
1743 static void setup_rt_frame_v1(int usig, struct target_sigaction *ka,
1744 target_siginfo_t *info,
1745 target_sigset_t *set, CPUARMState *env)
1746 {
1747 struct rt_sigframe_v1 *frame;
1748 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame));
1749 struct target_sigaltstack stack;
1750 int i;
1751 abi_ulong info_addr, uc_addr;
1752
1753 trace_user_setup_rt_frame(env, frame_addr);
1754 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
1755 return /* 1 */;
1756
1757 info_addr = frame_addr + offsetof(struct rt_sigframe_v1, info);
1758 __put_user(info_addr, &frame->pinfo);
1759 uc_addr = frame_addr + offsetof(struct rt_sigframe_v1, uc);
1760 __put_user(uc_addr, &frame->puc);
1761 tswap_siginfo(&frame->info, info);
1762
1763 /* Clear all the bits of the ucontext we don't use. */
1764 memset(&frame->uc, 0, offsetof(struct target_ucontext_v1, tuc_mcontext));
1765
1766 memset(&stack, 0, sizeof(stack));
1767 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp);
1768 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size);
1769 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags);
1770 memcpy(&frame->uc.tuc_stack, &stack, sizeof(stack));
1771
1772 setup_sigcontext(&frame->uc.tuc_mcontext, env, set->sig[0]);
1773 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1774 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
1775 }
1776
1777 setup_return(env, ka, &frame->retcode, frame_addr, usig,
1778 frame_addr + offsetof(struct rt_sigframe_v1, retcode));
1779
1780 env->regs[1] = info_addr;
1781 env->regs[2] = uc_addr;
1782
1783 unlock_user_struct(frame, frame_addr, 1);
1784 }
1785
1786 static void setup_rt_frame_v2(int usig, struct target_sigaction *ka,
1787 target_siginfo_t *info,
1788 target_sigset_t *set, CPUARMState *env)
1789 {
1790 struct rt_sigframe_v2 *frame;
1791 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame));
1792 abi_ulong info_addr, uc_addr;
1793
1794 trace_user_setup_rt_frame(env, frame_addr);
1795 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
1796 return /* 1 */;
1797
1798 info_addr = frame_addr + offsetof(struct rt_sigframe_v2, info);
1799 uc_addr = frame_addr + offsetof(struct rt_sigframe_v2, uc);
1800 tswap_siginfo(&frame->info, info);
1801
1802 setup_sigframe_v2(&frame->uc, set, env);
1803
1804 setup_return(env, ka, &frame->retcode, frame_addr, usig,
1805 frame_addr + offsetof(struct rt_sigframe_v2, retcode));
1806
1807 env->regs[1] = info_addr;
1808 env->regs[2] = uc_addr;
1809
1810 unlock_user_struct(frame, frame_addr, 1);
1811 }
1812
1813 static void setup_rt_frame(int usig, struct target_sigaction *ka,
1814 target_siginfo_t *info,
1815 target_sigset_t *set, CPUARMState *env)
1816 {
1817 if (get_osversion() >= 0x020612) {
1818 setup_rt_frame_v2(usig, ka, info, set, env);
1819 } else {
1820 setup_rt_frame_v1(usig, ka, info, set, env);
1821 }
1822 }
1823
1824 static int
1825 restore_sigcontext(CPUARMState *env, struct target_sigcontext *sc)
1826 {
1827 int err = 0;
1828 uint32_t cpsr;
1829
1830 __get_user(env->regs[0], &sc->arm_r0);
1831 __get_user(env->regs[1], &sc->arm_r1);
1832 __get_user(env->regs[2], &sc->arm_r2);
1833 __get_user(env->regs[3], &sc->arm_r3);
1834 __get_user(env->regs[4], &sc->arm_r4);
1835 __get_user(env->regs[5], &sc->arm_r5);
1836 __get_user(env->regs[6], &sc->arm_r6);
1837 __get_user(env->regs[7], &sc->arm_r7);
1838 __get_user(env->regs[8], &sc->arm_r8);
1839 __get_user(env->regs[9], &sc->arm_r9);
1840 __get_user(env->regs[10], &sc->arm_r10);
1841 __get_user(env->regs[11], &sc->arm_fp);
1842 __get_user(env->regs[12], &sc->arm_ip);
1843 __get_user(env->regs[13], &sc->arm_sp);
1844 __get_user(env->regs[14], &sc->arm_lr);
1845 __get_user(env->regs[15], &sc->arm_pc);
1846 #ifdef TARGET_CONFIG_CPU_32
1847 __get_user(cpsr, &sc->arm_cpsr);
1848 cpsr_write(env, cpsr, CPSR_USER | CPSR_EXEC, CPSRWriteByInstr);
1849 #endif
1850
1851 err |= !valid_user_regs(env);
1852
1853 return err;
1854 }
1855
1856 static long do_sigreturn_v1(CPUARMState *env)
1857 {
1858 abi_ulong frame_addr;
1859 struct sigframe_v1 *frame = NULL;
1860 target_sigset_t set;
1861 sigset_t host_set;
1862 int i;
1863
1864 /*
1865 * Since we stacked the signal on a 64-bit boundary,
1866 * then 'sp' should be word aligned here. If it's
1867 * not, then the user is trying to mess with us.
1868 */
1869 frame_addr = env->regs[13];
1870 trace_user_do_sigreturn(env, frame_addr);
1871 if (frame_addr & 7) {
1872 goto badframe;
1873 }
1874
1875 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1876 goto badframe;
1877
1878 __get_user(set.sig[0], &frame->sc.oldmask);
1879 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1880 __get_user(set.sig[i], &frame->extramask[i - 1]);
1881 }
1882
1883 target_to_host_sigset_internal(&host_set, &set);
1884 do_sigprocmask(SIG_SETMASK, &host_set, NULL);
1885
1886 if (restore_sigcontext(env, &frame->sc))
1887 goto badframe;
1888
1889 #if 0
1890 /* Send SIGTRAP if we're single-stepping */
1891 if (ptrace_cancel_bpt(current))
1892 send_sig(SIGTRAP, current, 1);
1893 #endif
1894 unlock_user_struct(frame, frame_addr, 0);
1895 return env->regs[0];
1896
1897 badframe:
1898 force_sig(TARGET_SIGSEGV /* , current */);
1899 return 0;
1900 }
1901
1902 static abi_ulong *restore_sigframe_v2_vfp(CPUARMState *env, abi_ulong *regspace)
1903 {
1904 int i;
1905 abi_ulong magic, sz;
1906 uint32_t fpscr, fpexc;
1907 struct target_vfp_sigframe *vfpframe;
1908 vfpframe = (struct target_vfp_sigframe *)regspace;
1909
1910 __get_user(magic, &vfpframe->magic);
1911 __get_user(sz, &vfpframe->size);
1912 if (magic != TARGET_VFP_MAGIC || sz != sizeof(*vfpframe)) {
1913 return 0;
1914 }
1915 for (i = 0; i < 32; i++) {
1916 __get_user(float64_val(env->vfp.regs[i]), &vfpframe->ufp.fpregs[i]);
1917 }
1918 __get_user(fpscr, &vfpframe->ufp.fpscr);
1919 vfp_set_fpscr(env, fpscr);
1920 __get_user(fpexc, &vfpframe->ufp_exc.fpexc);
1921 /* Sanitise FPEXC: ensure VFP is enabled, FPINST2 is invalid
1922 * and the exception flag is cleared
1923 */
1924 fpexc |= (1 << 30);
1925 fpexc &= ~((1 << 31) | (1 << 28));
1926 env->vfp.xregs[ARM_VFP_FPEXC] = fpexc;
1927 __get_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst);
1928 __get_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2);
1929 return (abi_ulong*)(vfpframe + 1);
1930 }
1931
1932 static abi_ulong *restore_sigframe_v2_iwmmxt(CPUARMState *env,
1933 abi_ulong *regspace)
1934 {
1935 int i;
1936 abi_ulong magic, sz;
1937 struct target_iwmmxt_sigframe *iwmmxtframe;
1938 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace;
1939
1940 __get_user(magic, &iwmmxtframe->magic);
1941 __get_user(sz, &iwmmxtframe->size);
1942 if (magic != TARGET_IWMMXT_MAGIC || sz != sizeof(*iwmmxtframe)) {
1943 return 0;
1944 }
1945 for (i = 0; i < 16; i++) {
1946 __get_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]);
1947 }
1948 __get_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf);
1949 __get_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf);
1950 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0);
1951 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1);
1952 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2);
1953 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3);
1954 return (abi_ulong*)(iwmmxtframe + 1);
1955 }
1956
1957 static int do_sigframe_return_v2(CPUARMState *env, target_ulong frame_addr,
1958 struct target_ucontext_v2 *uc)
1959 {
1960 sigset_t host_set;
1961 abi_ulong *regspace;
1962
1963 target_to_host_sigset(&host_set, &uc->tuc_sigmask);
1964 do_sigprocmask(SIG_SETMASK, &host_set, NULL);
1965
1966 if (restore_sigcontext(env, &uc->tuc_mcontext))
1967 return 1;
1968
1969 /* Restore coprocessor signal frame */
1970 regspace = uc->tuc_regspace;
1971 if (arm_feature(env, ARM_FEATURE_VFP)) {
1972 regspace = restore_sigframe_v2_vfp(env, regspace);
1973 if (!regspace) {
1974 return 1;
1975 }
1976 }
1977 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
1978 regspace = restore_sigframe_v2_iwmmxt(env, regspace);
1979 if (!regspace) {
1980 return 1;
1981 }
1982 }
1983
1984 if (do_sigaltstack(frame_addr + offsetof(struct target_ucontext_v2, tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT)
1985 return 1;
1986
1987 #if 0
1988 /* Send SIGTRAP if we're single-stepping */
1989 if (ptrace_cancel_bpt(current))
1990 send_sig(SIGTRAP, current, 1);
1991 #endif
1992
1993 return 0;
1994 }
1995
1996 static long do_sigreturn_v2(CPUARMState *env)
1997 {
1998 abi_ulong frame_addr;
1999 struct sigframe_v2 *frame = NULL;
2000
2001 /*
2002 * Since we stacked the signal on a 64-bit boundary,
2003 * then 'sp' should be word aligned here. If it's
2004 * not, then the user is trying to mess with us.
2005 */
2006 frame_addr = env->regs[13];
2007 trace_user_do_sigreturn(env, frame_addr);
2008 if (frame_addr & 7) {
2009 goto badframe;
2010 }
2011
2012 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
2013 goto badframe;
2014
2015 if (do_sigframe_return_v2(env, frame_addr, &frame->uc))
2016 goto badframe;
2017
2018 unlock_user_struct(frame, frame_addr, 0);
2019 return env->regs[0];
2020
2021 badframe:
2022 unlock_user_struct(frame, frame_addr, 0);
2023 force_sig(TARGET_SIGSEGV /* , current */);
2024 return 0;
2025 }
2026
2027 long do_sigreturn(CPUARMState *env)
2028 {
2029 if (get_osversion() >= 0x020612) {
2030 return do_sigreturn_v2(env);
2031 } else {
2032 return do_sigreturn_v1(env);
2033 }
2034 }
2035
2036 static long do_rt_sigreturn_v1(CPUARMState *env)
2037 {
2038 abi_ulong frame_addr;
2039 struct rt_sigframe_v1 *frame = NULL;
2040 sigset_t host_set;
2041
2042 /*
2043 * Since we stacked the signal on a 64-bit boundary,
2044 * then 'sp' should be word aligned here. If it's
2045 * not, then the user is trying to mess with us.
2046 */
2047 frame_addr = env->regs[13];
2048 trace_user_do_rt_sigreturn(env, frame_addr);
2049 if (frame_addr & 7) {
2050 goto badframe;
2051 }
2052
2053 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
2054 goto badframe;
2055
2056 target_to_host_sigset(&host_set, &frame->uc.tuc_sigmask);
2057 do_sigprocmask(SIG_SETMASK, &host_set, NULL);
2058
2059 if (restore_sigcontext(env, &frame->uc.tuc_mcontext))
2060 goto badframe;
2061
2062 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe_v1, uc.tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT)
2063 goto badframe;
2064
2065 #if 0
2066 /* Send SIGTRAP if we're single-stepping */
2067 if (ptrace_cancel_bpt(current))
2068 send_sig(SIGTRAP, current, 1);
2069 #endif
2070 unlock_user_struct(frame, frame_addr, 0);
2071 return env->regs[0];
2072
2073 badframe:
2074 unlock_user_struct(frame, frame_addr, 0);
2075 force_sig(TARGET_SIGSEGV /* , current */);
2076 return 0;
2077 }
2078
2079 static long do_rt_sigreturn_v2(CPUARMState *env)
2080 {
2081 abi_ulong frame_addr;
2082 struct rt_sigframe_v2 *frame = NULL;
2083
2084 /*
2085 * Since we stacked the signal on a 64-bit boundary,
2086 * then 'sp' should be word aligned here. If it's
2087 * not, then the user is trying to mess with us.
2088 */
2089 frame_addr = env->regs[13];
2090 trace_user_do_rt_sigreturn(env, frame_addr);
2091 if (frame_addr & 7) {
2092 goto badframe;
2093 }
2094
2095 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
2096 goto badframe;
2097
2098 if (do_sigframe_return_v2(env, frame_addr, &frame->uc))
2099 goto badframe;
2100
2101 unlock_user_struct(frame, frame_addr, 0);
2102 return env->regs[0];
2103
2104 badframe:
2105 unlock_user_struct(frame, frame_addr, 0);
2106 force_sig(TARGET_SIGSEGV /* , current */);
2107 return 0;
2108 }
2109
2110 long do_rt_sigreturn(CPUARMState *env)
2111 {
2112 if (get_osversion() >= 0x020612) {
2113 return do_rt_sigreturn_v2(env);
2114 } else {
2115 return do_rt_sigreturn_v1(env);
2116 }
2117 }
2118
2119 #elif defined(TARGET_SPARC)
2120
2121 #define __SUNOS_MAXWIN 31
2122
2123 /* This is what SunOS does, so shall I. */
2124 struct target_sigcontext {
2125 abi_ulong sigc_onstack; /* state to restore */
2126
2127 abi_ulong sigc_mask; /* sigmask to restore */
2128 abi_ulong sigc_sp; /* stack pointer */
2129 abi_ulong sigc_pc; /* program counter */
2130 abi_ulong sigc_npc; /* next program counter */
2131 abi_ulong sigc_psr; /* for condition codes etc */
2132 abi_ulong sigc_g1; /* User uses these two registers */
2133 abi_ulong sigc_o0; /* within the trampoline code. */
2134
2135 /* Now comes information regarding the users window set
2136 * at the time of the signal.
2137 */
2138 abi_ulong sigc_oswins; /* outstanding windows */
2139
2140 /* stack ptrs for each regwin buf */
2141 char *sigc_spbuf[__SUNOS_MAXWIN];
2142
2143 /* Windows to restore after signal */
2144 struct {
2145 abi_ulong locals[8];
2146 abi_ulong ins[8];
2147 } sigc_wbuf[__SUNOS_MAXWIN];
2148 };
2149 /* A Sparc stack frame */
2150 struct sparc_stackf {
2151 abi_ulong locals[8];
2152 abi_ulong ins[8];
2153 /* It's simpler to treat fp and callers_pc as elements of ins[]
2154 * since we never need to access them ourselves.
2155 */
2156 char *structptr;
2157 abi_ulong xargs[6];
2158 abi_ulong xxargs[1];
2159 };
2160
2161 typedef struct {
2162 struct {
2163 abi_ulong psr;
2164 abi_ulong pc;
2165 abi_ulong npc;
2166 abi_ulong y;
2167 abi_ulong u_regs[16]; /* globals and ins */
2168 } si_regs;
2169 int si_mask;
2170 } __siginfo_t;
2171
2172 typedef struct {
2173 abi_ulong si_float_regs[32];
2174 unsigned long si_fsr;
2175 unsigned long si_fpqdepth;
2176 struct {
2177 unsigned long *insn_addr;
2178 unsigned long insn;
2179 } si_fpqueue [16];
2180 } qemu_siginfo_fpu_t;
2181
2182
2183 struct target_signal_frame {
2184 struct sparc_stackf ss;
2185 __siginfo_t info;
2186 abi_ulong fpu_save;
2187 abi_ulong insns[2] __attribute__ ((aligned (8)));
2188 abi_ulong extramask[TARGET_NSIG_WORDS - 1];
2189 abi_ulong extra_size; /* Should be 0 */
2190 qemu_siginfo_fpu_t fpu_state;
2191 };
2192 struct target_rt_signal_frame {
2193 struct sparc_stackf ss;
2194 siginfo_t info;
2195 abi_ulong regs[20];
2196 sigset_t mask;
2197 abi_ulong fpu_save;
2198 unsigned int insns[2];
2199 stack_t stack;
2200 unsigned int extra_size; /* Should be 0 */
2201 qemu_siginfo_fpu_t fpu_state;
2202 };
2203
2204 #define UREG_O0 16
2205 #define UREG_O6 22
2206 #define UREG_I0 0
2207 #define UREG_I1 1
2208 #define UREG_I2 2
2209 #define UREG_I3 3
2210 #define UREG_I4 4
2211 #define UREG_I5 5
2212 #define UREG_I6 6
2213 #define UREG_I7 7
2214 #define UREG_L0 8
2215 #define UREG_FP UREG_I6
2216 #define UREG_SP UREG_O6
2217
2218 static inline abi_ulong get_sigframe(struct target_sigaction *sa,
2219 CPUSPARCState *env,
2220 unsigned long framesize)
2221 {
2222 abi_ulong sp;
2223
2224 sp = env->regwptr[UREG_FP];
2225
2226 /* This is the X/Open sanctioned signal stack switching. */
2227 if (sa->sa_flags & TARGET_SA_ONSTACK) {
2228 if (!on_sig_stack(sp)
2229 && !((target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size) & 7))
2230 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
2231 }
2232 return sp - framesize;
2233 }
2234
2235 static int
2236 setup___siginfo(__siginfo_t *si, CPUSPARCState *env, abi_ulong mask)
2237 {
2238 int err = 0, i;
2239
2240 __put_user(env->psr, &si->si_regs.psr);
2241 __put_user(env->pc, &si->si_regs.pc);
2242 __put_user(env->npc, &si->si_regs.npc);
2243 __put_user(env->y, &si->si_regs.y);
2244 for (i=0; i < 8; i++) {
2245 __put_user(env->gregs[i], &si->si_regs.u_regs[i]);
2246 }
2247 for (i=0; i < 8; i++) {
2248 __put_user(env->regwptr[UREG_I0 + i], &si->si_regs.u_regs[i+8]);
2249 }
2250 __put_user(mask, &si->si_mask);
2251 return err;
2252 }
2253
2254 #if 0
2255 static int
2256 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/
2257 CPUSPARCState *env, unsigned long mask)
2258 {
2259 int err = 0;
2260
2261 __put_user(mask, &sc->sigc_mask);
2262 __put_user(env->regwptr[UREG_SP], &sc->sigc_sp);
2263 __put_user(env->pc, &sc->sigc_pc);
2264 __put_user(env->npc, &sc->sigc_npc);
2265 __put_user(env->psr, &sc->sigc_psr);
2266 __put_user(env->gregs[1], &sc->sigc_g1);
2267 __put_user(env->regwptr[UREG_O0], &sc->sigc_o0);
2268
2269 return err;
2270 }
2271 #endif
2272 #define NF_ALIGNEDSZ (((sizeof(struct target_signal_frame) + 7) & (~7)))
2273
2274 static void setup_frame(int sig, struct target_sigaction *ka,
2275 target_sigset_t *set, CPUSPARCState *env)
2276 {
2277 abi_ulong sf_addr;
2278 struct target_signal_frame *sf;
2279 int sigframe_size, err, i;
2280
2281 /* 1. Make sure everything is clean */
2282 //synchronize_user_stack();
2283
2284 sigframe_size = NF_ALIGNEDSZ;
2285 sf_addr = get_sigframe(ka, env, sigframe_size);
2286 trace_user_setup_frame(env, sf_addr);
2287
2288 sf = lock_user(VERIFY_WRITE, sf_addr,
2289 sizeof(struct target_signal_frame), 0);
2290 if (!sf)
2291 goto sigsegv;
2292
2293 #if 0
2294 if (invalid_frame_pointer(sf, sigframe_size))
2295 goto sigill_and_return;
2296 #endif
2297 /* 2. Save the current process state */
2298 err = setup___siginfo(&sf->info, env, set->sig[0]);
2299 __put_user(0, &sf->extra_size);
2300
2301 //save_fpu_state(regs, &sf->fpu_state);
2302 //__put_user(&sf->fpu_state, &sf->fpu_save);
2303
2304 __put_user(set->sig[0], &sf->info.si_mask);
2305 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) {
2306 __put_user(set->sig[i + 1], &sf->extramask[i]);
2307 }
2308
2309 for (i = 0; i < 8; i++) {
2310 __put_user(env->regwptr[i + UREG_L0], &sf->ss.locals[i]);
2311 }
2312 for (i = 0; i < 8; i++) {
2313 __put_user(env->regwptr[i + UREG_I0], &sf->ss.ins[i]);
2314 }
2315 if (err)
2316 goto sigsegv;
2317
2318 /* 3. signal handler back-trampoline and parameters */
2319 env->regwptr[UREG_FP] = sf_addr;
2320 env->regwptr[UREG_I0] = sig;
2321 env->regwptr[UREG_I1] = sf_addr +
2322 offsetof(struct target_signal_frame, info);
2323 env->regwptr[UREG_I2] = sf_addr +
2324 offsetof(struct target_signal_frame, info);
2325
2326 /* 4. signal handler */
2327 env->pc = ka->_sa_handler;
2328 env->npc = (env->pc + 4);
2329 /* 5. return to kernel instructions */
2330 if (ka->sa_restorer)
2331 env->regwptr[UREG_I7] = ka->sa_restorer;
2332 else {
2333 uint32_t val32;
2334
2335 env->regwptr[UREG_I7] = sf_addr +
2336 offsetof(struct target_signal_frame, insns) - 2 * 4;
2337
2338 /* mov __NR_sigreturn, %g1 */
2339 val32 = 0x821020d8;
2340 __put_user(val32, &sf->insns[0]);
2341
2342 /* t 0x10 */
2343 val32 = 0x91d02010;
2344 __put_user(val32, &sf->insns[1]);
2345 if (err)
2346 goto sigsegv;
2347
2348 /* Flush instruction space. */
2349 //flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
2350 // tb_flush(CPU(sparc_env_get_cpu(env)));
2351 }
2352 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame));
2353 return;
2354 #if 0
2355 sigill_and_return:
2356 force_sig(TARGET_SIGILL);
2357 #endif
2358 sigsegv:
2359 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame));
2360 force_sig(TARGET_SIGSEGV);
2361 }
2362
2363 static void setup_rt_frame(int sig, struct target_sigaction *ka,
2364 target_siginfo_t *info,
2365 target_sigset_t *set, CPUSPARCState *env)
2366 {
2367 fprintf(stderr, "setup_rt_frame: not implemented\n");
2368 }
2369
2370 long do_sigreturn(CPUSPARCState *env)
2371 {
2372 abi_ulong sf_addr;
2373 struct target_signal_frame *sf;
2374 uint32_t up_psr, pc, npc;
2375 target_sigset_t set;
2376 sigset_t host_set;
2377 int err=0, i;
2378
2379 sf_addr = env->regwptr[UREG_FP];
2380 trace_user_do_sigreturn(env, sf_addr);
2381 if (!lock_user_struct(VERIFY_READ, sf, sf_addr, 1))
2382 goto segv_and_exit;
2383
2384 /* 1. Make sure we are not getting garbage from the user */
2385
2386 if (sf_addr & 3)
2387 goto segv_and_exit;
2388
2389 __get_user(pc, &sf->info.si_regs.pc);
2390 __get_user(npc, &sf->info.si_regs.npc);
2391
2392 if ((pc | npc) & 3)
2393 goto segv_and_exit;
2394
2395 /* 2. Restore the state */
2396 __get_user(up_psr, &sf->info.si_regs.psr);
2397
2398 /* User can only change condition codes and FPU enabling in %psr. */
2399 env->psr = (up_psr & (PSR_ICC /* | PSR_EF */))
2400 | (env->psr & ~(PSR_ICC /* | PSR_EF */));
2401
2402 env->pc = pc;
2403 env->npc = npc;
2404 __get_user(env->y, &sf->info.si_regs.y);
2405 for (i=0; i < 8; i++) {
2406 __get_user(env->gregs[i], &sf->info.si_regs.u_regs[i]);
2407 }
2408 for (i=0; i < 8; i++) {
2409 __get_user(env->regwptr[i + UREG_I0], &sf->info.si_regs.u_regs[i+8]);
2410 }
2411
2412 /* FIXME: implement FPU save/restore:
2413 * __get_user(fpu_save, &sf->fpu_save);
2414 * if (fpu_save)
2415 * err |= restore_fpu_state(env, fpu_save);
2416 */
2417
2418 /* This is pretty much atomic, no amount locking would prevent
2419 * the races which exist anyways.
2420 */
2421 __get_user(set.sig[0], &sf->info.si_mask);
2422 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
2423 __get_user(set.sig[i], &sf->extramask[i - 1]);
2424 }
2425
2426 target_to_host_sigset_internal(&host_set, &set);
2427 do_sigprocmask(SIG_SETMASK, &host_set, NULL);
2428
2429 if (err)
2430 goto segv_and_exit;
2431 unlock_user_struct(sf, sf_addr, 0);
2432 return env->regwptr[0];
2433
2434 segv_and_exit:
2435 unlock_user_struct(sf, sf_addr, 0);
2436 force_sig(TARGET_SIGSEGV);
2437 }
2438
2439 long do_rt_sigreturn(CPUSPARCState *env)
2440 {
2441 trace_user_do_rt_sigreturn(env, 0);
2442 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
2443 return -TARGET_ENOSYS;
2444 }
2445
2446 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
2447 #define MC_TSTATE 0
2448 #define MC_PC 1
2449 #define MC_NPC 2
2450 #define MC_Y 3
2451 #define MC_G1 4
2452 #define MC_G2 5
2453 #define MC_G3 6
2454 #define MC_G4 7
2455 #define MC_G5 8
2456 #define MC_G6 9
2457 #define MC_G7 10
2458 #define MC_O0 11
2459 #define MC_O1 12
2460 #define MC_O2 13
2461 #define MC_O3 14
2462 #define MC_O4 15
2463 #define MC_O5 16
2464 #define MC_O6 17
2465 #define MC_O7 18
2466 #define MC_NGREG 19
2467
2468 typedef abi_ulong target_mc_greg_t;
2469 typedef target_mc_greg_t target_mc_gregset_t[MC_NGREG];
2470
2471 struct target_mc_fq {
2472 abi_ulong *mcfq_addr;
2473 uint32_t mcfq_insn;
2474 };
2475
2476 struct target_mc_fpu {
2477 union {
2478 uint32_t sregs[32];
2479 uint64_t dregs[32];
2480 //uint128_t qregs[16];
2481 } mcfpu_fregs;
2482 abi_ulong mcfpu_fsr;
2483 abi_ulong mcfpu_fprs;
2484 abi_ulong mcfpu_gsr;
2485 struct target_mc_fq *mcfpu_fq;
2486 unsigned char mcfpu_qcnt;
2487 unsigned char mcfpu_qentsz;
2488 unsigned char mcfpu_enab;
2489 };
2490 typedef struct target_mc_fpu target_mc_fpu_t;
2491
2492 typedef struct {
2493 target_mc_gregset_t mc_gregs;
2494 target_mc_greg_t mc_fp;
2495 target_mc_greg_t mc_i7;
2496 target_mc_fpu_t mc_fpregs;
2497 } target_mcontext_t;
2498
2499 struct target_ucontext {
2500 struct target_ucontext *tuc_link;
2501 abi_ulong tuc_flags;
2502 target_sigset_t tuc_sigmask;
2503 target_mcontext_t tuc_mcontext;
2504 };
2505
2506 /* A V9 register window */
2507 struct target_reg_window {
2508 abi_ulong locals[8];
2509 abi_ulong ins[8];
2510 };
2511
2512 #define TARGET_STACK_BIAS 2047
2513
2514 /* {set, get}context() needed for 64-bit SparcLinux userland. */
2515 void sparc64_set_context(CPUSPARCState *env)
2516 {
2517 abi_ulong ucp_addr;
2518 struct target_ucontext *ucp;
2519 target_mc_gregset_t *grp;
2520 abi_ulong pc, npc, tstate;
2521 abi_ulong fp, i7, w_addr;
2522 unsigned int i;
2523
2524 ucp_addr = env->regwptr[UREG_I0];
2525 if (!lock_user_struct(VERIFY_READ, ucp, ucp_addr, 1))
2526 goto do_sigsegv;
2527 grp = &ucp->tuc_mcontext.mc_gregs;
2528 __get_user(pc, &((*grp)[MC_PC]));
2529 __get_user(npc, &((*grp)[MC_NPC]));
2530 if ((pc | npc) & 3)
2531 goto do_sigsegv;
2532 if (env->regwptr[UREG_I1]) {
2533 target_sigset_t target_set;
2534 sigset_t set;
2535
2536 if (TARGET_NSIG_WORDS == 1) {
2537 __get_user(target_set.sig[0], &ucp->tuc_sigmask.sig[0]);
2538 } else {
2539 abi_ulong *src, *dst;
2540 src = ucp->tuc_sigmask.sig;
2541 dst = target_set.sig;
2542 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) {
2543 __get_user(*dst, src);
2544 }
2545 }
2546 target_to_host_sigset_internal(&set, &target_set);
2547 do_sigprocmask(SIG_SETMASK, &set, NULL);
2548 }
2549 env->pc = pc;
2550 env->npc = npc;
2551 __get_user(env->y, &((*grp)[MC_Y]));
2552 __get_user(tstate, &((*grp)[MC_TSTATE]));
2553 env->asi = (tstate >> 24) & 0xff;
2554 cpu_put_ccr(env, tstate >> 32);
2555 cpu_put_cwp64(env, tstate & 0x1f);
2556 __get_user(env->gregs[1], (&(*grp)[MC_G1]));
2557 __get_user(env->gregs[2], (&(*grp)[MC_G2]));
2558 __get_user(env->gregs[3], (&(*grp)[MC_G3]));
2559 __get_user(env->gregs[4], (&(*grp)[MC_G4]));
2560 __get_user(env->gregs[5], (&(*grp)[MC_G5]));
2561 __get_user(env->gregs[6], (&(*grp)[MC_G6]));
2562 __get_user(env->gregs[7], (&(*grp)[MC_G7]));
2563 __get_user(env->regwptr[UREG_I0], (&(*grp)[MC_O0]));
2564 __get_user(env->regwptr[UREG_I1], (&(*grp)[MC_O1]));
2565 __get_user(env->regwptr[UREG_I2], (&(*grp)[MC_O2]));
2566 __get_user(env->regwptr[UREG_I3], (&(*grp)[MC_O3]));
2567 __get_user(env->regwptr[UREG_I4], (&(*grp)[MC_O4]));
2568 __get_user(env->regwptr[UREG_I5], (&(*grp)[MC_O5]));
2569 __get_user(env->regwptr[UREG_I6], (&(*grp)[MC_O6]));
2570 __get_user(env->regwptr[UREG_I7], (&(*grp)[MC_O7]));
2571
2572 __get_user(fp, &(ucp->tuc_mcontext.mc_fp));
2573 __get_user(i7, &(ucp->tuc_mcontext.mc_i7));
2574
2575 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6];
2576 if (put_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]),
2577 abi_ulong) != 0)
2578 goto do_sigsegv;
2579 if (put_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]),
2580 abi_ulong) != 0)
2581 goto do_sigsegv;
2582 /* FIXME this does not match how the kernel handles the FPU in
2583 * its sparc64_set_context implementation. In particular the FPU
2584 * is only restored if fenab is non-zero in:
2585 * __get_user(fenab, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_enab));
2586 */
2587 __get_user(env->fprs, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fprs));
2588 {
2589 uint32_t *src = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
2590 for (i = 0; i < 64; i++, src++) {
2591 if (i & 1) {
2592 __get_user(env->fpr[i/2].l.lower, src);
2593 } else {
2594 __get_user(env->fpr[i/2].l.upper, src);
2595 }
2596 }
2597 }
2598 __get_user(env->fsr,
2599 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fsr));
2600 __get_user(env->gsr,
2601 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_gsr));
2602 unlock_user_struct(ucp, ucp_addr, 0);
2603 return;
2604 do_sigsegv:
2605 unlock_user_struct(ucp, ucp_addr, 0);
2606 force_sig(TARGET_SIGSEGV);
2607 }
2608
2609 void sparc64_get_context(CPUSPARCState *env)
2610 {
2611 abi_ulong ucp_addr;
2612 struct target_ucontext *ucp;
2613 target_mc_gregset_t *grp;
2614 target_mcontext_t *mcp;
2615 abi_ulong fp, i7, w_addr;
2616 int err;
2617 unsigned int i;
2618 target_sigset_t target_set;
2619 sigset_t set;
2620
2621 ucp_addr = env->regwptr[UREG_I0];
2622 if (!lock_user_struct(VERIFY_WRITE, ucp, ucp_addr, 0))
2623 goto do_sigsegv;
2624
2625 mcp = &ucp->tuc_mcontext;
2626 grp = &mcp->mc_gregs;
2627
2628 /* Skip over the trap instruction, first. */
2629 env->pc = env->npc;
2630 env->npc += 4;
2631
2632 err = 0;
2633
2634 do_sigprocmask(0, NULL, &set);
2635 host_to_target_sigset_internal(&target_set, &set);
2636 if (TARGET_NSIG_WORDS == 1) {
2637 __put_user(target_set.sig[0],
2638 (abi_ulong *)&ucp->tuc_sigmask);
2639 } else {
2640 abi_ulong *src, *dst;
2641 src = target_set.sig;
2642 dst = ucp->tuc_sigmask.sig;
2643 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) {
2644 __put_user(*src, dst);
2645 }
2646 if (err)
2647 goto do_sigsegv;
2648 }
2649
2650 /* XXX: tstate must be saved properly */
2651 // __put_user(env->tstate, &((*grp)[MC_TSTATE]));
2652 __put_user(env->pc, &((*grp)[MC_PC]));
2653 __put_user(env->npc, &((*grp)[MC_NPC]));
2654 __put_user(env->y, &((*grp)[MC_Y]));
2655 __put_user(env->gregs[1], &((*grp)[MC_G1]));
2656 __put_user(env->gregs[2], &((*grp)[MC_G2]));
2657 __put_user(env->gregs[3], &((*grp)[MC_G3]));
2658 __put_user(env->gregs[4], &((*grp)[MC_G4]));
2659 __put_user(env->gregs[5], &((*grp)[MC_G5]));
2660 __put_user(env->gregs[6], &((*grp)[MC_G6]));
2661 __put_user(env->gregs[7], &((*grp)[MC_G7]));
2662 __put_user(env->regwptr[UREG_I0], &((*grp)[MC_O0]));
2663 __put_user(env->regwptr[UREG_I1], &((*grp)[MC_O1]));
2664 __put_user(env->regwptr[UREG_I2], &((*grp)[MC_O2]));
2665 __put_user(env->regwptr[UREG_I3], &((*grp)[MC_O3]));
2666 __put_user(env->regwptr[UREG_I4], &((*grp)[MC_O4]));
2667 __put_user(env->regwptr[UREG_I5], &((*grp)[MC_O5]));
2668 __put_user(env->regwptr[UREG_I6], &((*grp)[MC_O6]));
2669 __put_user(env->regwptr[UREG_I7], &((*grp)[MC_O7]));
2670
2671 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6];
2672 fp = i7 = 0;
2673 if (get_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]),
2674 abi_ulong) != 0)
2675 goto do_sigsegv;
2676 if (get_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]),
2677 abi_ulong) != 0)
2678 goto do_sigsegv;
2679 __put_user(fp, &(mcp->mc_fp));
2680 __put_user(i7, &(mcp->mc_i7));
2681
2682 {
2683 uint32_t *dst = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
2684 for (i = 0; i < 64; i++, dst++) {
2685 if (i & 1) {
2686 __put_user(env->fpr[i/2].l.lower, dst);
2687 } else {
2688 __put_user(env->fpr[i/2].l.upper, dst);
2689 }
2690 }
2691 }
2692 __put_user(env->fsr, &(mcp->mc_fpregs.mcfpu_fsr));
2693 __put_user(env->gsr, &(mcp->mc_fpregs.mcfpu_gsr));
2694 __put_user(env->fprs, &(mcp->mc_fpregs.mcfpu_fprs));
2695
2696 if (err)
2697 goto do_sigsegv;
2698 unlock_user_struct(ucp, ucp_addr, 1);
2699 return;
2700 do_sigsegv:
2701 unlock_user_struct(ucp, ucp_addr, 1);
2702 force_sig(TARGET_SIGSEGV);
2703 }
2704 #endif
2705 #elif defined(TARGET_MIPS) || defined(TARGET_MIPS64)
2706
2707 # if defined(TARGET_ABI_MIPSO32)
2708 struct target_sigcontext {
2709 uint32_t sc_regmask; /* Unused */
2710 uint32_t sc_status;
2711 uint64_t sc_pc;
2712 uint64_t sc_regs[32];
2713 uint64_t sc_fpregs[32];
2714 uint32_t sc_ownedfp; /* Unused */
2715 uint32_t sc_fpc_csr;
2716 uint32_t sc_fpc_eir; /* Unused */
2717 uint32_t sc_used_math;
2718 uint32_t sc_dsp; /* dsp status, was sc_ssflags */
2719 uint32_t pad0;
2720 uint64_t sc_mdhi;
2721 uint64_t sc_mdlo;
2722 target_ulong sc_hi1; /* Was sc_cause */
2723 target_ulong sc_lo1; /* Was sc_badvaddr */
2724 target_ulong sc_hi2; /* Was sc_sigset[4] */
2725 target_ulong sc_lo2;
2726 target_ulong sc_hi3;
2727 target_ulong sc_lo3;
2728 };
2729 # else /* N32 || N64 */
2730 struct target_sigcontext {
2731 uint64_t sc_regs[32];
2732 uint64_t sc_fpregs[32];
2733 uint64_t sc_mdhi;
2734 uint64_t sc_hi1;
2735 uint64_t sc_hi2;
2736 uint64_t sc_hi3;
2737 uint64_t sc_mdlo;
2738 uint64_t sc_lo1;
2739 uint64_t sc_lo2;
2740 uint64_t sc_lo3;
2741 uint64_t sc_pc;
2742 uint32_t sc_fpc_csr;
2743 uint32_t sc_used_math;
2744 uint32_t sc_dsp;
2745 uint32_t sc_reserved;
2746 };
2747 # endif /* O32 */
2748
2749 struct sigframe {
2750 uint32_t sf_ass[4]; /* argument save space for o32 */
2751 uint32_t sf_code[2]; /* signal trampoline */
2752 struct target_sigcontext sf_sc;
2753 target_sigset_t sf_mask;
2754 };
2755
2756 struct target_ucontext {
2757 target_ulong tuc_flags;
2758 target_ulong tuc_link;
2759 target_stack_t tuc_stack;
2760 target_ulong pad0;
2761 struct target_sigcontext tuc_mcontext;
2762 target_sigset_t tuc_sigmask;
2763 };
2764
2765 struct target_rt_sigframe {
2766 uint32_t rs_ass[4]; /* argument save space for o32 */
2767 uint32_t rs_code[2]; /* signal trampoline */
2768 struct target_siginfo rs_info;
2769 struct target_ucontext rs_uc;
2770 };
2771
2772 /* Install trampoline to jump back from signal handler */
2773 static inline int install_sigtramp(unsigned int *tramp, unsigned int syscall)
2774 {
2775 int err = 0;
2776
2777 /*
2778 * Set up the return code ...
2779 *
2780 * li v0, __NR__foo_sigreturn
2781 * syscall
2782 */
2783
2784 __put_user(0x24020000 + syscall, tramp + 0);
2785 __put_user(0x0000000c , tramp + 1);
2786 return err;
2787 }
2788
2789 static inline void setup_sigcontext(CPUMIPSState *regs,
2790 struct target_sigcontext *sc)
2791 {
2792 int i;
2793
2794 __put_user(exception_resume_pc(regs), &sc->sc_pc);
2795 regs->hflags &= ~MIPS_HFLAG_BMASK;
2796
2797 __put_user(0, &sc->sc_regs[0]);
2798 for (i = 1; i < 32; ++i) {
2799 __put_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
2800 }
2801
2802 __put_user(regs->active_tc.HI[0], &sc->sc_mdhi);
2803 __put_user(regs->active_tc.LO[0], &sc->sc_mdlo);
2804
2805 /* Rather than checking for dsp existence, always copy. The storage
2806 would just be garbage otherwise. */
2807 __put_user(regs->active_tc.HI[1], &sc->sc_hi1);
2808 __put_user(regs->active_tc.HI[2], &sc->sc_hi2);
2809 __put_user(regs->active_tc.HI[3], &sc->sc_hi3);
2810 __put_user(regs->active_tc.LO[1], &sc->sc_lo1);
2811 __put_user(regs->active_tc.LO[2], &sc->sc_lo2);
2812 __put_user(regs->active_tc.LO[3], &sc->sc_lo3);
2813 {
2814 uint32_t dsp = cpu_rddsp(0x3ff, regs);
2815 __put_user(dsp, &sc->sc_dsp);
2816 }
2817
2818 __put_user(1, &sc->sc_used_math);
2819
2820 for (i = 0; i < 32; ++i) {
2821 __put_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]);
2822 }
2823 }
2824
2825 static inline void
2826 restore_sigcontext(CPUMIPSState *regs, struct target_sigcontext *sc)
2827 {
2828 int i;
2829
2830 __get_user(regs->CP0_EPC, &sc->sc_pc);
2831
2832 __get_user(regs->active_tc.HI[0], &sc->sc_mdhi);
2833 __get_user(regs->active_tc.LO[0], &sc->sc_mdlo);
2834
2835 for (i = 1; i < 32; ++i) {
2836 __get_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
2837 }
2838
2839 __get_user(regs->active_tc.HI[1], &sc->sc_hi1);
2840 __get_user(regs->active_tc.HI[2], &sc->sc_hi2);
2841 __get_user(regs->active_tc.HI[3], &sc->sc_hi3);
2842 __get_user(regs->active_tc.LO[1], &sc->sc_lo1);
2843 __get_user(regs->active_tc.LO[2], &sc->sc_lo2);
2844 __get_user(regs->active_tc.LO[3], &sc->sc_lo3);
2845 {
2846 uint32_t dsp;
2847 __get_user(dsp, &sc->sc_dsp);
2848 cpu_wrdsp(dsp, 0x3ff, regs);
2849 }
2850
2851 for (i = 0; i < 32; ++i) {
2852 __get_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]);
2853 }
2854 }
2855
2856 /*
2857 * Determine which stack to use..
2858 */
2859 static inline abi_ulong
2860 get_sigframe(struct target_sigaction *ka, CPUMIPSState *regs, size_t frame_size)
2861 {
2862 unsigned long sp;
2863
2864 /* Default to using normal stack */
2865 sp = regs->active_tc.gpr[29];
2866
2867 /*
2868 * FPU emulator may have its own trampoline active just
2869 * above the user stack, 16-bytes before the next lowest
2870 * 16 byte boundary. Try to avoid trashing it.
2871 */
2872 sp -= 32;
2873
2874 /* This is the X/Open sanctioned signal stack switching. */
2875 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) {
2876 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
2877 }
2878
2879 return (sp - frame_size) & ~7;
2880 }
2881
2882 static void mips_set_hflags_isa_mode_from_pc(CPUMIPSState *env)
2883 {
2884 if (env->insn_flags & (ASE_MIPS16 | ASE_MICROMIPS)) {
2885 env->hflags &= ~MIPS_HFLAG_M16;
2886 env->hflags |= (env->active_tc.PC & 1) << MIPS_HFLAG_M16_SHIFT;
2887 env->active_tc.PC &= ~(target_ulong) 1;
2888 }
2889 }
2890
2891 # if defined(TARGET_ABI_MIPSO32)
2892 /* compare linux/arch/mips/kernel/signal.c:setup_frame() */
2893 static void setup_frame(int sig, struct target_sigaction * ka,
2894 target_sigset_t *set, CPUMIPSState *regs)
2895 {
2896 struct sigframe *frame;
2897 abi_ulong frame_addr;
2898 int i;
2899
2900 frame_addr = get_sigframe(ka, regs, sizeof(*frame));
2901 trace_user_setup_frame(regs, frame_addr);
2902 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
2903 goto give_sigsegv;
2904
2905 install_sigtramp(frame->sf_code, TARGET_NR_sigreturn);
2906
2907 setup_sigcontext(regs, &frame->sf_sc);
2908
2909 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
2910 __put_user(set->sig[i], &frame->sf_mask.sig[i]);
2911 }
2912
2913 /*
2914 * Arguments to signal handler:
2915 *
2916 * a0 = signal number
2917 * a1 = 0 (should be cause)
2918 * a2 = pointer to struct sigcontext
2919 *
2920 * $25 and PC point to the signal handler, $29 points to the
2921 * struct sigframe.
2922 */
2923 regs->active_tc.gpr[ 4] = sig;
2924 regs->active_tc.gpr[ 5] = 0;
2925 regs->active_tc.gpr[ 6] = frame_addr + offsetof(struct sigframe, sf_sc);
2926 regs->active_tc.gpr[29] = frame_addr;
2927 regs->active_tc.gpr[31] = frame_addr + offsetof(struct sigframe, sf_code);
2928 /* The original kernel code sets CP0_EPC to the handler
2929 * since it returns to userland using eret
2930 * we cannot do this here, and we must set PC directly */
2931 regs->active_tc.PC = regs->active_tc.gpr[25] = ka->_sa_handler;
2932 mips_set_hflags_isa_mode_from_pc(regs);
2933 unlock_user_struct(frame, frame_addr, 1);
2934 return;
2935
2936 give_sigsegv:
2937 force_sig(TARGET_SIGSEGV/*, current*/);
2938 }
2939
2940 long do_sigreturn(CPUMIPSState *regs)
2941 {
2942 struct sigframe *frame;
2943 abi_ulong frame_addr;
2944 sigset_t blocked;
2945 target_sigset_t target_set;
2946 int i;
2947
2948 frame_addr = regs->active_tc.gpr[29];
2949 trace_user_do_sigreturn(regs, frame_addr);
2950 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
2951 goto badframe;
2952
2953 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
2954 __get_user(target_set.sig[i], &frame->sf_mask.sig[i]);
2955 }
2956
2957 target_to_host_sigset_internal(&blocked, &target_set);
2958 do_sigprocmask(SIG_SETMASK, &blocked, NULL);
2959
2960 restore_sigcontext(regs, &frame->sf_sc);
2961
2962 #if 0
2963 /*
2964 * Don't let your children do this ...
2965 */
2966 __asm__ __volatile__(
2967 "move\t$29, %0\n\t"
2968 "j\tsyscall_exit"
2969 :/* no outputs */
2970 :"r" (&regs));
2971 /* Unreached */
2972 #endif
2973
2974 regs->active_tc.PC = regs->CP0_EPC;
2975 mips_set_hflags_isa_mode_from_pc(regs);
2976 /* I am not sure this is right, but it seems to work
2977 * maybe a problem with nested signals ? */
2978 regs->CP0_EPC = 0;
2979 return -TARGET_QEMU_ESIGRETURN;
2980
2981 badframe:
2982 force_sig(TARGET_SIGSEGV/*, current*/);
2983 return 0;
2984 }
2985 # endif /* O32 */
2986
2987 static void setup_rt_frame(int sig, struct target_sigaction *ka,
2988 target_siginfo_t *info,
2989 target_sigset_t *set, CPUMIPSState *env)
2990 {
2991 struct target_rt_sigframe *frame;
2992 abi_ulong frame_addr;
2993 int i;
2994
2995 frame_addr = get_sigframe(ka, env, sizeof(*frame));
2996 trace_user_setup_rt_frame(env, frame_addr);
2997 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
2998 goto give_sigsegv;
2999
3000 install_sigtramp(frame->rs_code, TARGET_NR_rt_sigreturn);
3001
3002 tswap_siginfo(&frame->rs_info, info);
3003
3004 __put_user(0, &frame->rs_uc.tuc_flags);
3005 __put_user(0, &frame->rs_uc.tuc_link);
3006 __put_user(target_sigaltstack_used.ss_sp, &frame->rs_uc.tuc_stack.ss_sp);
3007 __put_user(target_sigaltstack_used.ss_size, &frame->rs_uc.tuc_stack.ss_size);
3008 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
3009 &frame->rs_uc.tuc_stack.ss_flags);
3010
3011 setup_sigcontext(env, &frame->rs_uc.tuc_mcontext);
3012
3013 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3014 __put_user(set->sig[i], &frame->rs_uc.tuc_sigmask.sig[i]);
3015 }
3016
3017 /*
3018 * Arguments to signal handler:
3019 *
3020 * a0 = signal number
3021 * a1 = pointer to siginfo_t
3022 * a2 = pointer to struct ucontext
3023 *
3024 * $25 and PC point to the signal handler, $29 points to the
3025 * struct sigframe.
3026 */
3027 env->active_tc.gpr[ 4] = sig;
3028 env->active_tc.gpr[ 5] = frame_addr
3029 + offsetof(struct target_rt_sigframe, rs_info);
3030 env->active_tc.gpr[ 6] = frame_addr
3031 + offsetof(struct target_rt_sigframe, rs_uc);
3032 env->active_tc.gpr[29] = frame_addr;
3033 env->active_tc.gpr[31] = frame_addr
3034 + offsetof(struct target_rt_sigframe, rs_code);
3035 /* The original kernel code sets CP0_EPC to the handler
3036 * since it returns to userland using eret
3037 * we cannot do this here, and we must set PC directly */
3038 env->active_tc.PC = env->active_tc.gpr[25] = ka->_sa_handler;
3039 mips_set_hflags_isa_mode_from_pc(env);
3040 unlock_user_struct(frame, frame_addr, 1);
3041 return;
3042
3043 give_sigsegv:
3044 unlock_user_struct(frame, frame_addr, 1);
3045 force_sig(TARGET_SIGSEGV/*, current*/);
3046 }
3047
3048 long do_rt_sigreturn(CPUMIPSState *env)
3049 {
3050 struct target_rt_sigframe *frame;
3051 abi_ulong frame_addr;
3052 sigset_t blocked;
3053
3054 frame_addr = env->active_tc.gpr[29];
3055 trace_user_do_rt_sigreturn(env, frame_addr);
3056 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
3057 goto badframe;
3058
3059 target_to_host_sigset(&blocked, &frame->rs_uc.tuc_sigmask);
3060 do_sigprocmask(SIG_SETMASK, &blocked, NULL);
3061
3062 restore_sigcontext(env, &frame->rs_uc.tuc_mcontext);
3063
3064 if (do_sigaltstack(frame_addr +
3065 offsetof(struct target_rt_sigframe, rs_uc.tuc_stack),
3066 0, get_sp_from_cpustate(env)) == -EFAULT)
3067 goto badframe;
3068
3069 env->active_tc.PC = env->CP0_EPC;
3070 mips_set_hflags_isa_mode_from_pc(env);
3071 /* I am not sure this is right, but it seems to work
3072 * maybe a problem with nested signals ? */
3073 env->CP0_EPC = 0;
3074 return -TARGET_QEMU_ESIGRETURN;
3075
3076 badframe:
3077 force_sig(TARGET_SIGSEGV/*, current*/);
3078 return 0;
3079 }
3080
3081 #elif defined(TARGET_SH4)
3082
3083 /*
3084 * code and data structures from linux kernel:
3085 * include/asm-sh/sigcontext.h
3086 * arch/sh/kernel/signal.c
3087 */
3088
3089 struct target_sigcontext {
3090 target_ulong oldmask;
3091
3092 /* CPU registers */
3093 target_ulong sc_gregs[16];
3094 target_ulong sc_pc;
3095 target_ulong sc_pr;
3096 target_ulong sc_sr;
3097 target_ulong sc_gbr;
3098 target_ulong sc_mach;
3099 target_ulong sc_macl;
3100
3101 /* FPU registers */
3102 target_ulong sc_fpregs[16];
3103 target_ulong sc_xfpregs[16];
3104 unsigned int sc_fpscr;
3105 unsigned int sc_fpul;
3106 unsigned int sc_ownedfp;
3107 };
3108
3109 struct target_sigframe
3110 {
3111 struct target_sigcontext sc;
3112 target_ulong extramask[TARGET_NSIG_WORDS-1];
3113 uint16_t retcode[3];
3114 };
3115
3116
3117 struct target_ucontext {
3118 target_ulong tuc_flags;
3119 struct target_ucontext *tuc_link;
3120 target_stack_t tuc_stack;
3121 struct target_sigcontext tuc_mcontext;
3122 target_sigset_t tuc_sigmask; /* mask last for extensibility */
3123 };
3124
3125 struct target_rt_sigframe
3126 {
3127 struct target_siginfo info;
3128 struct target_ucontext uc;
3129 uint16_t retcode[3];
3130 };
3131
3132
3133 #define MOVW(n) (0x9300|((n)-2)) /* Move mem word at PC+n to R3 */
3134 #define TRAP_NOARG 0xc310 /* Syscall w/no args (NR in R3) SH3/4 */
3135
3136 static abi_ulong get_sigframe(struct target_sigaction *ka,
3137 unsigned long sp, size_t frame_size)
3138 {
3139 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags(sp) == 0)) {
3140 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3141 }
3142
3143 return (sp - frame_size) & -8ul;
3144 }
3145
3146 static void setup_sigcontext(struct target_sigcontext *sc,
3147 CPUSH4State *regs, unsigned long mask)
3148 {
3149 int i;
3150
3151 #define COPY(x) __put_user(regs->x, &sc->sc_##x)
3152 COPY(gregs[0]); COPY(gregs[1]);
3153 COPY(gregs[2]); COPY(gregs[3]);
3154 COPY(gregs[4]); COPY(gregs[5]);
3155 COPY(gregs[6]); COPY(gregs[7]);
3156 COPY(gregs[8]); COPY(gregs[9]);
3157 COPY(gregs[10]); COPY(gregs[11]);
3158 COPY(gregs[12]); COPY(gregs[13]);
3159 COPY(gregs[14]); COPY(gregs[15]);
3160 COPY(gbr); COPY(mach);
3161 COPY(macl); COPY(pr);
3162 COPY(sr); COPY(pc);
3163 #undef COPY
3164
3165 for (i=0; i<16; i++) {
3166 __put_user(regs->fregs[i], &sc->sc_fpregs[i]);
3167 }
3168 __put_user(regs->fpscr, &sc->sc_fpscr);
3169 __put_user(regs->fpul, &sc->sc_fpul);
3170
3171 /* non-iBCS2 extensions.. */
3172 __put_user(mask, &sc->oldmask);
3173 }
3174
3175 static void restore_sigcontext(CPUSH4State *regs, struct target_sigcontext *sc,
3176 target_ulong *r0_p)
3177 {
3178 int i;
3179
3180 #define COPY(x) __get_user(regs->x, &sc->sc_##x)
3181 COPY(gregs[1]);
3182 COPY(gregs[2]); COPY(gregs[3]);
3183 COPY(gregs[4]); COPY(gregs[5]);
3184 COPY(gregs[6]); COPY(gregs[7]);
3185 COPY(gregs[8]); COPY(gregs[9]);
3186 COPY(gregs[10]); COPY(gregs[11]);
3187 COPY(gregs[12]); COPY(gregs[13]);
3188 COPY(gregs[14]); COPY(gregs[15]);
3189 COPY(gbr); COPY(mach);
3190 COPY(macl); COPY(pr);
3191 COPY(sr); COPY(pc);
3192 #undef COPY
3193
3194 for (i=0; i<16; i++) {
3195 __get_user(regs->fregs[i], &sc->sc_fpregs[i]);
3196 }
3197 __get_user(regs->fpscr, &sc->sc_fpscr);
3198 __get_user(regs->fpul, &sc->sc_fpul);
3199
3200 regs->tra = -1; /* disable syscall checks */
3201 __get_user(*r0_p, &sc->sc_gregs[0]);
3202 }
3203
3204 static void setup_frame(int sig, struct target_sigaction *ka,
3205 target_sigset_t *set, CPUSH4State *regs)
3206 {
3207 struct target_sigframe *frame;
3208 abi_ulong frame_addr;
3209 int i;
3210
3211 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame));
3212 trace_user_setup_frame(regs, frame_addr);
3213 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
3214 goto give_sigsegv;
3215
3216 setup_sigcontext(&frame->sc, regs, set->sig[0]);
3217
3218 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) {
3219 __put_user(set->sig[i + 1], &frame->extramask[i]);
3220 }
3221
3222 /* Set up to return from userspace. If provided, use a stub
3223 already in userspace. */
3224 if (ka->sa_flags & TARGET_SA_RESTORER) {
3225 regs->pr = (unsigned long) ka->sa_restorer;
3226 } else {
3227 /* Generate return code (system call to sigreturn) */
3228 abi_ulong retcode_addr = frame_addr +
3229 offsetof(struct target_sigframe, retcode);
3230 __put_user(MOVW(2), &frame->retcode[0]);
3231 __put_user(TRAP_NOARG, &frame->retcode[1]);
3232 __put_user((TARGET_NR_sigreturn