scsi: esp: check buffer length before reading scsi command
[qemu.git] / linux-user / signal.c
1 /*
2 * Emulation of Linux signals
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "qemu/bitops.h"
21 #include <sys/ucontext.h>
22 #include <sys/resource.h>
23
24 #include "qemu.h"
25 #include "qemu-common.h"
26 #include "target_signal.h"
27 #include "trace.h"
28
29 static struct target_sigaltstack target_sigaltstack_used = {
30 .ss_sp = 0,
31 .ss_size = 0,
32 .ss_flags = TARGET_SS_DISABLE,
33 };
34
35 static struct target_sigaction sigact_table[TARGET_NSIG];
36
37 static void host_signal_handler(int host_signum, siginfo_t *info,
38 void *puc);
39
40 static uint8_t host_to_target_signal_table[_NSIG] = {
41 [SIGHUP] = TARGET_SIGHUP,
42 [SIGINT] = TARGET_SIGINT,
43 [SIGQUIT] = TARGET_SIGQUIT,
44 [SIGILL] = TARGET_SIGILL,
45 [SIGTRAP] = TARGET_SIGTRAP,
46 [SIGABRT] = TARGET_SIGABRT,
47 /* [SIGIOT] = TARGET_SIGIOT,*/
48 [SIGBUS] = TARGET_SIGBUS,
49 [SIGFPE] = TARGET_SIGFPE,
50 [SIGKILL] = TARGET_SIGKILL,
51 [SIGUSR1] = TARGET_SIGUSR1,
52 [SIGSEGV] = TARGET_SIGSEGV,
53 [SIGUSR2] = TARGET_SIGUSR2,
54 [SIGPIPE] = TARGET_SIGPIPE,
55 [SIGALRM] = TARGET_SIGALRM,
56 [SIGTERM] = TARGET_SIGTERM,
57 #ifdef SIGSTKFLT
58 [SIGSTKFLT] = TARGET_SIGSTKFLT,
59 #endif
60 [SIGCHLD] = TARGET_SIGCHLD,
61 [SIGCONT] = TARGET_SIGCONT,
62 [SIGSTOP] = TARGET_SIGSTOP,
63 [SIGTSTP] = TARGET_SIGTSTP,
64 [SIGTTIN] = TARGET_SIGTTIN,
65 [SIGTTOU] = TARGET_SIGTTOU,
66 [SIGURG] = TARGET_SIGURG,
67 [SIGXCPU] = TARGET_SIGXCPU,
68 [SIGXFSZ] = TARGET_SIGXFSZ,
69 [SIGVTALRM] = TARGET_SIGVTALRM,
70 [SIGPROF] = TARGET_SIGPROF,
71 [SIGWINCH] = TARGET_SIGWINCH,
72 [SIGIO] = TARGET_SIGIO,
73 [SIGPWR] = TARGET_SIGPWR,
74 [SIGSYS] = TARGET_SIGSYS,
75 /* next signals stay the same */
76 /* Nasty hack: Reverse SIGRTMIN and SIGRTMAX to avoid overlap with
77 host libpthread signals. This assumes no one actually uses SIGRTMAX :-/
78 To fix this properly we need to do manual signal delivery multiplexed
79 over a single host signal. */
80 [__SIGRTMIN] = __SIGRTMAX,
81 [__SIGRTMAX] = __SIGRTMIN,
82 };
83 static uint8_t target_to_host_signal_table[_NSIG];
84
85 static inline int on_sig_stack(unsigned long sp)
86 {
87 return (sp - target_sigaltstack_used.ss_sp
88 < target_sigaltstack_used.ss_size);
89 }
90
91 static inline int sas_ss_flags(unsigned long sp)
92 {
93 return (target_sigaltstack_used.ss_size == 0 ? SS_DISABLE
94 : on_sig_stack(sp) ? SS_ONSTACK : 0);
95 }
96
97 int host_to_target_signal(int sig)
98 {
99 if (sig < 0 || sig >= _NSIG)
100 return sig;
101 return host_to_target_signal_table[sig];
102 }
103
104 int target_to_host_signal(int sig)
105 {
106 if (sig < 0 || sig >= _NSIG)
107 return sig;
108 return target_to_host_signal_table[sig];
109 }
110
111 static inline void target_sigemptyset(target_sigset_t *set)
112 {
113 memset(set, 0, sizeof(*set));
114 }
115
116 static inline void target_sigaddset(target_sigset_t *set, int signum)
117 {
118 signum--;
119 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
120 set->sig[signum / TARGET_NSIG_BPW] |= mask;
121 }
122
123 static inline int target_sigismember(const target_sigset_t *set, int signum)
124 {
125 signum--;
126 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
127 return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0);
128 }
129
130 static void host_to_target_sigset_internal(target_sigset_t *d,
131 const sigset_t *s)
132 {
133 int i;
134 target_sigemptyset(d);
135 for (i = 1; i <= TARGET_NSIG; i++) {
136 if (sigismember(s, i)) {
137 target_sigaddset(d, host_to_target_signal(i));
138 }
139 }
140 }
141
142 void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
143 {
144 target_sigset_t d1;
145 int i;
146
147 host_to_target_sigset_internal(&d1, s);
148 for(i = 0;i < TARGET_NSIG_WORDS; i++)
149 d->sig[i] = tswapal(d1.sig[i]);
150 }
151
152 static void target_to_host_sigset_internal(sigset_t *d,
153 const target_sigset_t *s)
154 {
155 int i;
156 sigemptyset(d);
157 for (i = 1; i <= TARGET_NSIG; i++) {
158 if (target_sigismember(s, i)) {
159 sigaddset(d, target_to_host_signal(i));
160 }
161 }
162 }
163
164 void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
165 {
166 target_sigset_t s1;
167 int i;
168
169 for(i = 0;i < TARGET_NSIG_WORDS; i++)
170 s1.sig[i] = tswapal(s->sig[i]);
171 target_to_host_sigset_internal(d, &s1);
172 }
173
174 void host_to_target_old_sigset(abi_ulong *old_sigset,
175 const sigset_t *sigset)
176 {
177 target_sigset_t d;
178 host_to_target_sigset(&d, sigset);
179 *old_sigset = d.sig[0];
180 }
181
182 void target_to_host_old_sigset(sigset_t *sigset,
183 const abi_ulong *old_sigset)
184 {
185 target_sigset_t d;
186 int i;
187
188 d.sig[0] = *old_sigset;
189 for(i = 1;i < TARGET_NSIG_WORDS; i++)
190 d.sig[i] = 0;
191 target_to_host_sigset(sigset, &d);
192 }
193
194 int block_signals(void)
195 {
196 TaskState *ts = (TaskState *)thread_cpu->opaque;
197 sigset_t set;
198 int pending;
199
200 /* It's OK to block everything including SIGSEGV, because we won't
201 * run any further guest code before unblocking signals in
202 * process_pending_signals().
203 */
204 sigfillset(&set);
205 sigprocmask(SIG_SETMASK, &set, 0);
206
207 pending = atomic_xchg(&ts->signal_pending, 1);
208
209 return pending;
210 }
211
212 /* Wrapper for sigprocmask function
213 * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
214 * are host signal set, not guest ones. Returns -TARGET_ERESTARTSYS if
215 * a signal was already pending and the syscall must be restarted, or
216 * 0 on success.
217 * If set is NULL, this is guaranteed not to fail.
218 */
219 int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
220 {
221 TaskState *ts = (TaskState *)thread_cpu->opaque;
222
223 if (oldset) {
224 *oldset = ts->signal_mask;
225 }
226
227 if (set) {
228 int i;
229
230 if (block_signals()) {
231 return -TARGET_ERESTARTSYS;
232 }
233
234 switch (how) {
235 case SIG_BLOCK:
236 sigorset(&ts->signal_mask, &ts->signal_mask, set);
237 break;
238 case SIG_UNBLOCK:
239 for (i = 1; i <= NSIG; ++i) {
240 if (sigismember(set, i)) {
241 sigdelset(&ts->signal_mask, i);
242 }
243 }
244 break;
245 case SIG_SETMASK:
246 ts->signal_mask = *set;
247 break;
248 default:
249 g_assert_not_reached();
250 }
251
252 /* Silently ignore attempts to change blocking status of KILL or STOP */
253 sigdelset(&ts->signal_mask, SIGKILL);
254 sigdelset(&ts->signal_mask, SIGSTOP);
255 }
256 return 0;
257 }
258
259 #if !defined(TARGET_OPENRISC) && !defined(TARGET_UNICORE32) && \
260 !defined(TARGET_X86_64)
261 /* Just set the guest's signal mask to the specified value; the
262 * caller is assumed to have called block_signals() already.
263 */
264 static void set_sigmask(const sigset_t *set)
265 {
266 TaskState *ts = (TaskState *)thread_cpu->opaque;
267
268 ts->signal_mask = *set;
269 }
270 #endif
271
272 /* siginfo conversion */
273
274 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
275 const siginfo_t *info)
276 {
277 int sig = host_to_target_signal(info->si_signo);
278 int si_code = info->si_code;
279 int si_type;
280 tinfo->si_signo = sig;
281 tinfo->si_errno = 0;
282 tinfo->si_code = info->si_code;
283
284 /* This is awkward, because we have to use a combination of
285 * the si_code and si_signo to figure out which of the union's
286 * members are valid. (Within the host kernel it is always possible
287 * to tell, but the kernel carefully avoids giving userspace the
288 * high 16 bits of si_code, so we don't have the information to
289 * do this the easy way...) We therefore make our best guess,
290 * bearing in mind that a guest can spoof most of the si_codes
291 * via rt_sigqueueinfo() if it likes.
292 *
293 * Once we have made our guess, we record it in the top 16 bits of
294 * the si_code, so that tswap_siginfo() later can use it.
295 * tswap_siginfo() will strip these top bits out before writing
296 * si_code to the guest (sign-extending the lower bits).
297 */
298
299 switch (si_code) {
300 case SI_USER:
301 case SI_TKILL:
302 case SI_KERNEL:
303 /* Sent via kill(), tkill() or tgkill(), or direct from the kernel.
304 * These are the only unspoofable si_code values.
305 */
306 tinfo->_sifields._kill._pid = info->si_pid;
307 tinfo->_sifields._kill._uid = info->si_uid;
308 si_type = QEMU_SI_KILL;
309 break;
310 default:
311 /* Everything else is spoofable. Make best guess based on signal */
312 switch (sig) {
313 case TARGET_SIGCHLD:
314 tinfo->_sifields._sigchld._pid = info->si_pid;
315 tinfo->_sifields._sigchld._uid = info->si_uid;
316 tinfo->_sifields._sigchld._status
317 = host_to_target_waitstatus(info->si_status);
318 tinfo->_sifields._sigchld._utime = info->si_utime;
319 tinfo->_sifields._sigchld._stime = info->si_stime;
320 si_type = QEMU_SI_CHLD;
321 break;
322 case TARGET_SIGIO:
323 tinfo->_sifields._sigpoll._band = info->si_band;
324 tinfo->_sifields._sigpoll._fd = info->si_fd;
325 si_type = QEMU_SI_POLL;
326 break;
327 default:
328 /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */
329 tinfo->_sifields._rt._pid = info->si_pid;
330 tinfo->_sifields._rt._uid = info->si_uid;
331 /* XXX: potential problem if 64 bit */
332 tinfo->_sifields._rt._sigval.sival_ptr
333 = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
334 si_type = QEMU_SI_RT;
335 break;
336 }
337 break;
338 }
339
340 tinfo->si_code = deposit32(si_code, 16, 16, si_type);
341 }
342
343 static void tswap_siginfo(target_siginfo_t *tinfo,
344 const target_siginfo_t *info)
345 {
346 int si_type = extract32(info->si_code, 16, 16);
347 int si_code = sextract32(info->si_code, 0, 16);
348
349 __put_user(info->si_signo, &tinfo->si_signo);
350 __put_user(info->si_errno, &tinfo->si_errno);
351 __put_user(si_code, &tinfo->si_code);
352
353 /* We can use our internal marker of which fields in the structure
354 * are valid, rather than duplicating the guesswork of
355 * host_to_target_siginfo_noswap() here.
356 */
357 switch (si_type) {
358 case QEMU_SI_KILL:
359 __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid);
360 __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid);
361 break;
362 case QEMU_SI_TIMER:
363 __put_user(info->_sifields._timer._timer1,
364 &tinfo->_sifields._timer._timer1);
365 __put_user(info->_sifields._timer._timer2,
366 &tinfo->_sifields._timer._timer2);
367 break;
368 case QEMU_SI_POLL:
369 __put_user(info->_sifields._sigpoll._band,
370 &tinfo->_sifields._sigpoll._band);
371 __put_user(info->_sifields._sigpoll._fd,
372 &tinfo->_sifields._sigpoll._fd);
373 break;
374 case QEMU_SI_FAULT:
375 __put_user(info->_sifields._sigfault._addr,
376 &tinfo->_sifields._sigfault._addr);
377 break;
378 case QEMU_SI_CHLD:
379 __put_user(info->_sifields._sigchld._pid,
380 &tinfo->_sifields._sigchld._pid);
381 __put_user(info->_sifields._sigchld._uid,
382 &tinfo->_sifields._sigchld._uid);
383 __put_user(info->_sifields._sigchld._status,
384 &tinfo->_sifields._sigchld._status);
385 __put_user(info->_sifields._sigchld._utime,
386 &tinfo->_sifields._sigchld._utime);
387 __put_user(info->_sifields._sigchld._stime,
388 &tinfo->_sifields._sigchld._stime);
389 break;
390 case QEMU_SI_RT:
391 __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid);
392 __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid);
393 __put_user(info->_sifields._rt._sigval.sival_ptr,
394 &tinfo->_sifields._rt._sigval.sival_ptr);
395 break;
396 default:
397 g_assert_not_reached();
398 }
399 }
400
401 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
402 {
403 host_to_target_siginfo_noswap(tinfo, info);
404 tswap_siginfo(tinfo, tinfo);
405 }
406
407 /* XXX: we support only POSIX RT signals are used. */
408 /* XXX: find a solution for 64 bit (additional malloced data is needed) */
409 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
410 {
411 /* This conversion is used only for the rt_sigqueueinfo syscall,
412 * and so we know that the _rt fields are the valid ones.
413 */
414 abi_ulong sival_ptr;
415
416 __get_user(info->si_signo, &tinfo->si_signo);
417 __get_user(info->si_errno, &tinfo->si_errno);
418 __get_user(info->si_code, &tinfo->si_code);
419 __get_user(info->si_pid, &tinfo->_sifields._rt._pid);
420 __get_user(info->si_uid, &tinfo->_sifields._rt._uid);
421 __get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr);
422 info->si_value.sival_ptr = (void *)(long)sival_ptr;
423 }
424
425 static int fatal_signal (int sig)
426 {
427 switch (sig) {
428 case TARGET_SIGCHLD:
429 case TARGET_SIGURG:
430 case TARGET_SIGWINCH:
431 /* Ignored by default. */
432 return 0;
433 case TARGET_SIGCONT:
434 case TARGET_SIGSTOP:
435 case TARGET_SIGTSTP:
436 case TARGET_SIGTTIN:
437 case TARGET_SIGTTOU:
438 /* Job control signals. */
439 return 0;
440 default:
441 return 1;
442 }
443 }
444
445 /* returns 1 if given signal should dump core if not handled */
446 static int core_dump_signal(int sig)
447 {
448 switch (sig) {
449 case TARGET_SIGABRT:
450 case TARGET_SIGFPE:
451 case TARGET_SIGILL:
452 case TARGET_SIGQUIT:
453 case TARGET_SIGSEGV:
454 case TARGET_SIGTRAP:
455 case TARGET_SIGBUS:
456 return (1);
457 default:
458 return (0);
459 }
460 }
461
462 void signal_init(void)
463 {
464 TaskState *ts = (TaskState *)thread_cpu->opaque;
465 struct sigaction act;
466 struct sigaction oact;
467 int i, j;
468 int host_sig;
469
470 /* generate signal conversion tables */
471 for(i = 1; i < _NSIG; i++) {
472 if (host_to_target_signal_table[i] == 0)
473 host_to_target_signal_table[i] = i;
474 }
475 for(i = 1; i < _NSIG; i++) {
476 j = host_to_target_signal_table[i];
477 target_to_host_signal_table[j] = i;
478 }
479
480 /* Set the signal mask from the host mask. */
481 sigprocmask(0, 0, &ts->signal_mask);
482
483 /* set all host signal handlers. ALL signals are blocked during
484 the handlers to serialize them. */
485 memset(sigact_table, 0, sizeof(sigact_table));
486
487 sigfillset(&act.sa_mask);
488 act.sa_flags = SA_SIGINFO;
489 act.sa_sigaction = host_signal_handler;
490 for(i = 1; i <= TARGET_NSIG; i++) {
491 host_sig = target_to_host_signal(i);
492 sigaction(host_sig, NULL, &oact);
493 if (oact.sa_sigaction == (void *)SIG_IGN) {
494 sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
495 } else if (oact.sa_sigaction == (void *)SIG_DFL) {
496 sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
497 }
498 /* If there's already a handler installed then something has
499 gone horribly wrong, so don't even try to handle that case. */
500 /* Install some handlers for our own use. We need at least
501 SIGSEGV and SIGBUS, to detect exceptions. We can not just
502 trap all signals because it affects syscall interrupt
503 behavior. But do trap all default-fatal signals. */
504 if (fatal_signal (i))
505 sigaction(host_sig, &act, NULL);
506 }
507 }
508
509
510 /* abort execution with signal */
511 static void QEMU_NORETURN force_sig(int target_sig)
512 {
513 CPUState *cpu = thread_cpu;
514 CPUArchState *env = cpu->env_ptr;
515 TaskState *ts = (TaskState *)cpu->opaque;
516 int host_sig, core_dumped = 0;
517 struct sigaction act;
518
519 host_sig = target_to_host_signal(target_sig);
520 trace_user_force_sig(env, target_sig, host_sig);
521 gdb_signalled(env, target_sig);
522
523 /* dump core if supported by target binary format */
524 if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
525 stop_all_tasks();
526 core_dumped =
527 ((*ts->bprm->core_dump)(target_sig, env) == 0);
528 }
529 if (core_dumped) {
530 /* we already dumped the core of target process, we don't want
531 * a coredump of qemu itself */
532 struct rlimit nodump;
533 getrlimit(RLIMIT_CORE, &nodump);
534 nodump.rlim_cur=0;
535 setrlimit(RLIMIT_CORE, &nodump);
536 (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n",
537 target_sig, strsignal(host_sig), "core dumped" );
538 }
539
540 /* The proper exit code for dying from an uncaught signal is
541 * -<signal>. The kernel doesn't allow exit() or _exit() to pass
542 * a negative value. To get the proper exit code we need to
543 * actually die from an uncaught signal. Here the default signal
544 * handler is installed, we send ourself a signal and we wait for
545 * it to arrive. */
546 sigfillset(&act.sa_mask);
547 act.sa_handler = SIG_DFL;
548 act.sa_flags = 0;
549 sigaction(host_sig, &act, NULL);
550
551 /* For some reason raise(host_sig) doesn't send the signal when
552 * statically linked on x86-64. */
553 kill(getpid(), host_sig);
554
555 /* Make sure the signal isn't masked (just reuse the mask inside
556 of act) */
557 sigdelset(&act.sa_mask, host_sig);
558 sigsuspend(&act.sa_mask);
559
560 /* unreachable */
561 abort();
562 }
563
564 /* queue a signal so that it will be send to the virtual CPU as soon
565 as possible */
566 int queue_signal(CPUArchState *env, int sig, target_siginfo_t *info)
567 {
568 CPUState *cpu = ENV_GET_CPU(env);
569 TaskState *ts = cpu->opaque;
570
571 trace_user_queue_signal(env, sig);
572
573 /* Currently all callers define siginfo structures which
574 * use the _sifields._sigfault union member, so we can
575 * set the type here. If that changes we should push this
576 * out so the si_type is passed in by callers.
577 */
578 info->si_code = deposit32(info->si_code, 16, 16, QEMU_SI_FAULT);
579
580 ts->sync_signal.info = *info;
581 ts->sync_signal.pending = sig;
582 /* signal that a new signal is pending */
583 atomic_set(&ts->signal_pending, 1);
584 return 1; /* indicates that the signal was queued */
585 }
586
587 #ifndef HAVE_SAFE_SYSCALL
588 static inline void rewind_if_in_safe_syscall(void *puc)
589 {
590 /* Default version: never rewind */
591 }
592 #endif
593
594 static void host_signal_handler(int host_signum, siginfo_t *info,
595 void *puc)
596 {
597 CPUArchState *env = thread_cpu->env_ptr;
598 CPUState *cpu = ENV_GET_CPU(env);
599 TaskState *ts = cpu->opaque;
600
601 int sig;
602 target_siginfo_t tinfo;
603 ucontext_t *uc = puc;
604 struct emulated_sigtable *k;
605
606 /* the CPU emulator uses some host signals to detect exceptions,
607 we forward to it some signals */
608 if ((host_signum == SIGSEGV || host_signum == SIGBUS)
609 && info->si_code > 0) {
610 if (cpu_signal_handler(host_signum, info, puc))
611 return;
612 }
613
614 /* get target signal number */
615 sig = host_to_target_signal(host_signum);
616 if (sig < 1 || sig > TARGET_NSIG)
617 return;
618 trace_user_host_signal(env, host_signum, sig);
619
620 rewind_if_in_safe_syscall(puc);
621
622 host_to_target_siginfo_noswap(&tinfo, info);
623 k = &ts->sigtab[sig - 1];
624 k->info = tinfo;
625 k->pending = sig;
626 ts->signal_pending = 1;
627
628 /* Block host signals until target signal handler entered. We
629 * can't block SIGSEGV or SIGBUS while we're executing guest
630 * code in case the guest code provokes one in the window between
631 * now and it getting out to the main loop. Signals will be
632 * unblocked again in process_pending_signals().
633 */
634 sigfillset(&uc->uc_sigmask);
635 sigdelset(&uc->uc_sigmask, SIGSEGV);
636 sigdelset(&uc->uc_sigmask, SIGBUS);
637
638 /* interrupt the virtual CPU as soon as possible */
639 cpu_exit(thread_cpu);
640 }
641
642 /* do_sigaltstack() returns target values and errnos. */
643 /* compare linux/kernel/signal.c:do_sigaltstack() */
644 abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp)
645 {
646 int ret;
647 struct target_sigaltstack oss;
648
649 /* XXX: test errors */
650 if(uoss_addr)
651 {
652 __put_user(target_sigaltstack_used.ss_sp, &oss.ss_sp);
653 __put_user(target_sigaltstack_used.ss_size, &oss.ss_size);
654 __put_user(sas_ss_flags(sp), &oss.ss_flags);
655 }
656
657 if(uss_addr)
658 {
659 struct target_sigaltstack *uss;
660 struct target_sigaltstack ss;
661 size_t minstacksize = TARGET_MINSIGSTKSZ;
662
663 #if defined(TARGET_PPC64)
664 /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
665 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
666 if (get_ppc64_abi(image) > 1) {
667 minstacksize = 4096;
668 }
669 #endif
670
671 ret = -TARGET_EFAULT;
672 if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
673 goto out;
674 }
675 __get_user(ss.ss_sp, &uss->ss_sp);
676 __get_user(ss.ss_size, &uss->ss_size);
677 __get_user(ss.ss_flags, &uss->ss_flags);
678 unlock_user_struct(uss, uss_addr, 0);
679
680 ret = -TARGET_EPERM;
681 if (on_sig_stack(sp))
682 goto out;
683
684 ret = -TARGET_EINVAL;
685 if (ss.ss_flags != TARGET_SS_DISABLE
686 && ss.ss_flags != TARGET_SS_ONSTACK
687 && ss.ss_flags != 0)
688 goto out;
689
690 if (ss.ss_flags == TARGET_SS_DISABLE) {
691 ss.ss_size = 0;
692 ss.ss_sp = 0;
693 } else {
694 ret = -TARGET_ENOMEM;
695 if (ss.ss_size < minstacksize) {
696 goto out;
697 }
698 }
699
700 target_sigaltstack_used.ss_sp = ss.ss_sp;
701 target_sigaltstack_used.ss_size = ss.ss_size;
702 }
703
704 if (uoss_addr) {
705 ret = -TARGET_EFAULT;
706 if (copy_to_user(uoss_addr, &oss, sizeof(oss)))
707 goto out;
708 }
709
710 ret = 0;
711 out:
712 return ret;
713 }
714
715 /* do_sigaction() return target values and host errnos */
716 int do_sigaction(int sig, const struct target_sigaction *act,
717 struct target_sigaction *oact)
718 {
719 struct target_sigaction *k;
720 struct sigaction act1;
721 int host_sig;
722 int ret = 0;
723
724 if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP) {
725 return -TARGET_EINVAL;
726 }
727
728 if (block_signals()) {
729 return -TARGET_ERESTARTSYS;
730 }
731
732 k = &sigact_table[sig - 1];
733 if (oact) {
734 __put_user(k->_sa_handler, &oact->_sa_handler);
735 __put_user(k->sa_flags, &oact->sa_flags);
736 #if !defined(TARGET_MIPS)
737 __put_user(k->sa_restorer, &oact->sa_restorer);
738 #endif
739 /* Not swapped. */
740 oact->sa_mask = k->sa_mask;
741 }
742 if (act) {
743 /* FIXME: This is not threadsafe. */
744 __get_user(k->_sa_handler, &act->_sa_handler);
745 __get_user(k->sa_flags, &act->sa_flags);
746 #if !defined(TARGET_MIPS)
747 __get_user(k->sa_restorer, &act->sa_restorer);
748 #endif
749 /* To be swapped in target_to_host_sigset. */
750 k->sa_mask = act->sa_mask;
751
752 /* we update the host linux signal state */
753 host_sig = target_to_host_signal(sig);
754 if (host_sig != SIGSEGV && host_sig != SIGBUS) {
755 sigfillset(&act1.sa_mask);
756 act1.sa_flags = SA_SIGINFO;
757 if (k->sa_flags & TARGET_SA_RESTART)
758 act1.sa_flags |= SA_RESTART;
759 /* NOTE: it is important to update the host kernel signal
760 ignore state to avoid getting unexpected interrupted
761 syscalls */
762 if (k->_sa_handler == TARGET_SIG_IGN) {
763 act1.sa_sigaction = (void *)SIG_IGN;
764 } else if (k->_sa_handler == TARGET_SIG_DFL) {
765 if (fatal_signal (sig))
766 act1.sa_sigaction = host_signal_handler;
767 else
768 act1.sa_sigaction = (void *)SIG_DFL;
769 } else {
770 act1.sa_sigaction = host_signal_handler;
771 }
772 ret = sigaction(host_sig, &act1, NULL);
773 }
774 }
775 return ret;
776 }
777
778 #if defined(TARGET_I386) && TARGET_ABI_BITS == 32
779
780 /* from the Linux kernel */
781
782 struct target_fpreg {
783 uint16_t significand[4];
784 uint16_t exponent;
785 };
786
787 struct target_fpxreg {
788 uint16_t significand[4];
789 uint16_t exponent;
790 uint16_t padding[3];
791 };
792
793 struct target_xmmreg {
794 abi_ulong element[4];
795 };
796
797 struct target_fpstate {
798 /* Regular FPU environment */
799 abi_ulong cw;
800 abi_ulong sw;
801 abi_ulong tag;
802 abi_ulong ipoff;
803 abi_ulong cssel;
804 abi_ulong dataoff;
805 abi_ulong datasel;
806 struct target_fpreg _st[8];
807 uint16_t status;
808 uint16_t magic; /* 0xffff = regular FPU data only */
809
810 /* FXSR FPU environment */
811 abi_ulong _fxsr_env[6]; /* FXSR FPU env is ignored */
812 abi_ulong mxcsr;
813 abi_ulong reserved;
814 struct target_fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */
815 struct target_xmmreg _xmm[8];
816 abi_ulong padding[56];
817 };
818
819 #define X86_FXSR_MAGIC 0x0000
820
821 struct target_sigcontext {
822 uint16_t gs, __gsh;
823 uint16_t fs, __fsh;
824 uint16_t es, __esh;
825 uint16_t ds, __dsh;
826 abi_ulong edi;
827 abi_ulong esi;
828 abi_ulong ebp;
829 abi_ulong esp;
830 abi_ulong ebx;
831 abi_ulong edx;
832 abi_ulong ecx;
833 abi_ulong eax;
834 abi_ulong trapno;
835 abi_ulong err;
836 abi_ulong eip;
837 uint16_t cs, __csh;
838 abi_ulong eflags;
839 abi_ulong esp_at_signal;
840 uint16_t ss, __ssh;
841 abi_ulong fpstate; /* pointer */
842 abi_ulong oldmask;
843 abi_ulong cr2;
844 };
845
846 struct target_ucontext {
847 abi_ulong tuc_flags;
848 abi_ulong tuc_link;
849 target_stack_t tuc_stack;
850 struct target_sigcontext tuc_mcontext;
851 target_sigset_t tuc_sigmask; /* mask last for extensibility */
852 };
853
854 struct sigframe
855 {
856 abi_ulong pretcode;
857 int sig;
858 struct target_sigcontext sc;
859 struct target_fpstate fpstate;
860 abi_ulong extramask[TARGET_NSIG_WORDS-1];
861 char retcode[8];
862 };
863
864 struct rt_sigframe
865 {
866 abi_ulong pretcode;
867 int sig;
868 abi_ulong pinfo;
869 abi_ulong puc;
870 struct target_siginfo info;
871 struct target_ucontext uc;
872 struct target_fpstate fpstate;
873 char retcode[8];
874 };
875
876 /*
877 * Set up a signal frame.
878 */
879
880 /* XXX: save x87 state */
881 static void setup_sigcontext(struct target_sigcontext *sc,
882 struct target_fpstate *fpstate, CPUX86State *env, abi_ulong mask,
883 abi_ulong fpstate_addr)
884 {
885 CPUState *cs = CPU(x86_env_get_cpu(env));
886 uint16_t magic;
887
888 /* already locked in setup_frame() */
889 __put_user(env->segs[R_GS].selector, (unsigned int *)&sc->gs);
890 __put_user(env->segs[R_FS].selector, (unsigned int *)&sc->fs);
891 __put_user(env->segs[R_ES].selector, (unsigned int *)&sc->es);
892 __put_user(env->segs[R_DS].selector, (unsigned int *)&sc->ds);
893 __put_user(env->regs[R_EDI], &sc->edi);
894 __put_user(env->regs[R_ESI], &sc->esi);
895 __put_user(env->regs[R_EBP], &sc->ebp);
896 __put_user(env->regs[R_ESP], &sc->esp);
897 __put_user(env->regs[R_EBX], &sc->ebx);
898 __put_user(env->regs[R_EDX], &sc->edx);
899 __put_user(env->regs[R_ECX], &sc->ecx);
900 __put_user(env->regs[R_EAX], &sc->eax);
901 __put_user(cs->exception_index, &sc->trapno);
902 __put_user(env->error_code, &sc->err);
903 __put_user(env->eip, &sc->eip);
904 __put_user(env->segs[R_CS].selector, (unsigned int *)&sc->cs);
905 __put_user(env->eflags, &sc->eflags);
906 __put_user(env->regs[R_ESP], &sc->esp_at_signal);
907 __put_user(env->segs[R_SS].selector, (unsigned int *)&sc->ss);
908
909 cpu_x86_fsave(env, fpstate_addr, 1);
910 fpstate->status = fpstate->sw;
911 magic = 0xffff;
912 __put_user(magic, &fpstate->magic);
913 __put_user(fpstate_addr, &sc->fpstate);
914
915 /* non-iBCS2 extensions.. */
916 __put_user(mask, &sc->oldmask);
917 __put_user(env->cr[2], &sc->cr2);
918 }
919
920 /*
921 * Determine which stack to use..
922 */
923
924 static inline abi_ulong
925 get_sigframe(struct target_sigaction *ka, CPUX86State *env, size_t frame_size)
926 {
927 unsigned long esp;
928
929 /* Default to using normal stack */
930 esp = env->regs[R_ESP];
931 /* This is the X/Open sanctioned signal stack switching. */
932 if (ka->sa_flags & TARGET_SA_ONSTACK) {
933 if (sas_ss_flags(esp) == 0) {
934 esp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
935 }
936 } else {
937
938 /* This is the legacy signal stack switching. */
939 if ((env->segs[R_SS].selector & 0xffff) != __USER_DS &&
940 !(ka->sa_flags & TARGET_SA_RESTORER) &&
941 ka->sa_restorer) {
942 esp = (unsigned long) ka->sa_restorer;
943 }
944 }
945 return (esp - frame_size) & -8ul;
946 }
947
948 /* compare linux/arch/i386/kernel/signal.c:setup_frame() */
949 static void setup_frame(int sig, struct target_sigaction *ka,
950 target_sigset_t *set, CPUX86State *env)
951 {
952 abi_ulong frame_addr;
953 struct sigframe *frame;
954 int i;
955
956 frame_addr = get_sigframe(ka, env, sizeof(*frame));
957 trace_user_setup_frame(env, frame_addr);
958
959 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
960 goto give_sigsegv;
961
962 __put_user(sig, &frame->sig);
963
964 setup_sigcontext(&frame->sc, &frame->fpstate, env, set->sig[0],
965 frame_addr + offsetof(struct sigframe, fpstate));
966
967 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
968 __put_user(set->sig[i], &frame->extramask[i - 1]);
969 }
970
971 /* Set up to return from userspace. If provided, use a stub
972 already in userspace. */
973 if (ka->sa_flags & TARGET_SA_RESTORER) {
974 __put_user(ka->sa_restorer, &frame->pretcode);
975 } else {
976 uint16_t val16;
977 abi_ulong retcode_addr;
978 retcode_addr = frame_addr + offsetof(struct sigframe, retcode);
979 __put_user(retcode_addr, &frame->pretcode);
980 /* This is popl %eax ; movl $,%eax ; int $0x80 */
981 val16 = 0xb858;
982 __put_user(val16, (uint16_t *)(frame->retcode+0));
983 __put_user(TARGET_NR_sigreturn, (int *)(frame->retcode+2));
984 val16 = 0x80cd;
985 __put_user(val16, (uint16_t *)(frame->retcode+6));
986 }
987
988
989 /* Set up registers for signal handler */
990 env->regs[R_ESP] = frame_addr;
991 env->eip = ka->_sa_handler;
992
993 cpu_x86_load_seg(env, R_DS, __USER_DS);
994 cpu_x86_load_seg(env, R_ES, __USER_DS);
995 cpu_x86_load_seg(env, R_SS, __USER_DS);
996 cpu_x86_load_seg(env, R_CS, __USER_CS);
997 env->eflags &= ~TF_MASK;
998
999 unlock_user_struct(frame, frame_addr, 1);
1000
1001 return;
1002
1003 give_sigsegv:
1004 if (sig == TARGET_SIGSEGV) {
1005 ka->_sa_handler = TARGET_SIG_DFL;
1006 }
1007 force_sig(TARGET_SIGSEGV /* , current */);
1008 }
1009
1010 /* compare linux/arch/i386/kernel/signal.c:setup_rt_frame() */
1011 static void setup_rt_frame(int sig, struct target_sigaction *ka,
1012 target_siginfo_t *info,
1013 target_sigset_t *set, CPUX86State *env)
1014 {
1015 abi_ulong frame_addr, addr;
1016 struct rt_sigframe *frame;
1017 int i;
1018
1019 frame_addr = get_sigframe(ka, env, sizeof(*frame));
1020 trace_user_setup_rt_frame(env, frame_addr);
1021
1022 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
1023 goto give_sigsegv;
1024
1025 __put_user(sig, &frame->sig);
1026 addr = frame_addr + offsetof(struct rt_sigframe, info);
1027 __put_user(addr, &frame->pinfo);
1028 addr = frame_addr + offsetof(struct rt_sigframe, uc);
1029 __put_user(addr, &frame->puc);
1030 tswap_siginfo(&frame->info, info);
1031
1032 /* Create the ucontext. */
1033 __put_user(0, &frame->uc.tuc_flags);
1034 __put_user(0, &frame->uc.tuc_link);
1035 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
1036 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
1037 &frame->uc.tuc_stack.ss_flags);
1038 __put_user(target_sigaltstack_used.ss_size,
1039 &frame->uc.tuc_stack.ss_size);
1040 setup_sigcontext(&frame->uc.tuc_mcontext, &frame->fpstate, env,
1041 set->sig[0], frame_addr + offsetof(struct rt_sigframe, fpstate));
1042
1043 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1044 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
1045 }
1046
1047 /* Set up to return from userspace. If provided, use a stub
1048 already in userspace. */
1049 if (ka->sa_flags & TARGET_SA_RESTORER) {
1050 __put_user(ka->sa_restorer, &frame->pretcode);
1051 } else {
1052 uint16_t val16;
1053 addr = frame_addr + offsetof(struct rt_sigframe, retcode);
1054 __put_user(addr, &frame->pretcode);
1055 /* This is movl $,%eax ; int $0x80 */
1056 __put_user(0xb8, (char *)(frame->retcode+0));
1057 __put_user(TARGET_NR_rt_sigreturn, (int *)(frame->retcode+1));
1058 val16 = 0x80cd;
1059 __put_user(val16, (uint16_t *)(frame->retcode+5));
1060 }
1061
1062 /* Set up registers for signal handler */
1063 env->regs[R_ESP] = frame_addr;
1064 env->eip = ka->_sa_handler;
1065
1066 cpu_x86_load_seg(env, R_DS, __USER_DS);
1067 cpu_x86_load_seg(env, R_ES, __USER_DS);
1068 cpu_x86_load_seg(env, R_SS, __USER_DS);
1069 cpu_x86_load_seg(env, R_CS, __USER_CS);
1070 env->eflags &= ~TF_MASK;
1071
1072 unlock_user_struct(frame, frame_addr, 1);
1073
1074 return;
1075
1076 give_sigsegv:
1077 if (sig == TARGET_SIGSEGV) {
1078 ka->_sa_handler = TARGET_SIG_DFL;
1079 }
1080 force_sig(TARGET_SIGSEGV /* , current */);
1081 }
1082
1083 static int
1084 restore_sigcontext(CPUX86State *env, struct target_sigcontext *sc)
1085 {
1086 unsigned int err = 0;
1087 abi_ulong fpstate_addr;
1088 unsigned int tmpflags;
1089
1090 cpu_x86_load_seg(env, R_GS, tswap16(sc->gs));
1091 cpu_x86_load_seg(env, R_FS, tswap16(sc->fs));
1092 cpu_x86_load_seg(env, R_ES, tswap16(sc->es));
1093 cpu_x86_load_seg(env, R_DS, tswap16(sc->ds));
1094
1095 env->regs[R_EDI] = tswapl(sc->edi);
1096 env->regs[R_ESI] = tswapl(sc->esi);
1097 env->regs[R_EBP] = tswapl(sc->ebp);
1098 env->regs[R_ESP] = tswapl(sc->esp);
1099 env->regs[R_EBX] = tswapl(sc->ebx);
1100 env->regs[R_EDX] = tswapl(sc->edx);
1101 env->regs[R_ECX] = tswapl(sc->ecx);
1102 env->regs[R_EAX] = tswapl(sc->eax);
1103 env->eip = tswapl(sc->eip);
1104
1105 cpu_x86_load_seg(env, R_CS, lduw_p(&sc->cs) | 3);
1106 cpu_x86_load_seg(env, R_SS, lduw_p(&sc->ss) | 3);
1107
1108 tmpflags = tswapl(sc->eflags);
1109 env->eflags = (env->eflags & ~0x40DD5) | (tmpflags & 0x40DD5);
1110 // regs->orig_eax = -1; /* disable syscall checks */
1111
1112 fpstate_addr = tswapl(sc->fpstate);
1113 if (fpstate_addr != 0) {
1114 if (!access_ok(VERIFY_READ, fpstate_addr,
1115 sizeof(struct target_fpstate)))
1116 goto badframe;
1117 cpu_x86_frstor(env, fpstate_addr, 1);
1118 }
1119
1120 return err;
1121 badframe:
1122 return 1;
1123 }
1124
1125 long do_sigreturn(CPUX86State *env)
1126 {
1127 struct sigframe *frame;
1128 abi_ulong frame_addr = env->regs[R_ESP] - 8;
1129 target_sigset_t target_set;
1130 sigset_t set;
1131 int i;
1132
1133 trace_user_do_sigreturn(env, frame_addr);
1134 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1135 goto badframe;
1136 /* set blocked signals */
1137 __get_user(target_set.sig[0], &frame->sc.oldmask);
1138 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1139 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
1140 }
1141
1142 target_to_host_sigset_internal(&set, &target_set);
1143 set_sigmask(&set);
1144
1145 /* restore registers */
1146 if (restore_sigcontext(env, &frame->sc))
1147 goto badframe;
1148 unlock_user_struct(frame, frame_addr, 0);
1149 return -TARGET_QEMU_ESIGRETURN;
1150
1151 badframe:
1152 unlock_user_struct(frame, frame_addr, 0);
1153 force_sig(TARGET_SIGSEGV);
1154 return 0;
1155 }
1156
1157 long do_rt_sigreturn(CPUX86State *env)
1158 {
1159 abi_ulong frame_addr;
1160 struct rt_sigframe *frame;
1161 sigset_t set;
1162
1163 frame_addr = env->regs[R_ESP] - 4;
1164 trace_user_do_rt_sigreturn(env, frame_addr);
1165 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1166 goto badframe;
1167 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
1168 set_sigmask(&set);
1169
1170 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) {
1171 goto badframe;
1172 }
1173
1174 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe, uc.tuc_stack), 0,
1175 get_sp_from_cpustate(env)) == -EFAULT) {
1176 goto badframe;
1177 }
1178
1179 unlock_user_struct(frame, frame_addr, 0);
1180 return -TARGET_QEMU_ESIGRETURN;
1181
1182 badframe:
1183 unlock_user_struct(frame, frame_addr, 0);
1184 force_sig(TARGET_SIGSEGV);
1185 return 0;
1186 }
1187
1188 #elif defined(TARGET_AARCH64)
1189
1190 struct target_sigcontext {
1191 uint64_t fault_address;
1192 /* AArch64 registers */
1193 uint64_t regs[31];
1194 uint64_t sp;
1195 uint64_t pc;
1196 uint64_t pstate;
1197 /* 4K reserved for FP/SIMD state and future expansion */
1198 char __reserved[4096] __attribute__((__aligned__(16)));
1199 };
1200
1201 struct target_ucontext {
1202 abi_ulong tuc_flags;
1203 abi_ulong tuc_link;
1204 target_stack_t tuc_stack;
1205 target_sigset_t tuc_sigmask;
1206 /* glibc uses a 1024-bit sigset_t */
1207 char __unused[1024 / 8 - sizeof(target_sigset_t)];
1208 /* last for future expansion */
1209 struct target_sigcontext tuc_mcontext;
1210 };
1211
1212 /*
1213 * Header to be used at the beginning of structures extending the user
1214 * context. Such structures must be placed after the rt_sigframe on the stack
1215 * and be 16-byte aligned. The last structure must be a dummy one with the
1216 * magic and size set to 0.
1217 */
1218 struct target_aarch64_ctx {
1219 uint32_t magic;
1220 uint32_t size;
1221 };
1222
1223 #define TARGET_FPSIMD_MAGIC 0x46508001
1224
1225 struct target_fpsimd_context {
1226 struct target_aarch64_ctx head;
1227 uint32_t fpsr;
1228 uint32_t fpcr;
1229 uint64_t vregs[32 * 2]; /* really uint128_t vregs[32] */
1230 };
1231
1232 /*
1233 * Auxiliary context saved in the sigcontext.__reserved array. Not exported to
1234 * user space as it will change with the addition of new context. User space
1235 * should check the magic/size information.
1236 */
1237 struct target_aux_context {
1238 struct target_fpsimd_context fpsimd;
1239 /* additional context to be added before "end" */
1240 struct target_aarch64_ctx end;
1241 };
1242
1243 struct target_rt_sigframe {
1244 struct target_siginfo info;
1245 struct target_ucontext uc;
1246 uint64_t fp;
1247 uint64_t lr;
1248 uint32_t tramp[2];
1249 };
1250
1251 static int target_setup_sigframe(struct target_rt_sigframe *sf,
1252 CPUARMState *env, target_sigset_t *set)
1253 {
1254 int i;
1255 struct target_aux_context *aux =
1256 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved;
1257
1258 /* set up the stack frame for unwinding */
1259 __put_user(env->xregs[29], &sf->fp);
1260 __put_user(env->xregs[30], &sf->lr);
1261
1262 for (i = 0; i < 31; i++) {
1263 __put_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]);
1264 }
1265 __put_user(env->xregs[31], &sf->uc.tuc_mcontext.sp);
1266 __put_user(env->pc, &sf->uc.tuc_mcontext.pc);
1267 __put_user(pstate_read(env), &sf->uc.tuc_mcontext.pstate);
1268
1269 __put_user(env->exception.vaddress, &sf->uc.tuc_mcontext.fault_address);
1270
1271 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
1272 __put_user(set->sig[i], &sf->uc.tuc_sigmask.sig[i]);
1273 }
1274
1275 for (i = 0; i < 32; i++) {
1276 #ifdef TARGET_WORDS_BIGENDIAN
1277 __put_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2 + 1]);
1278 __put_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2]);
1279 #else
1280 __put_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2]);
1281 __put_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2 + 1]);
1282 #endif
1283 }
1284 __put_user(vfp_get_fpsr(env), &aux->fpsimd.fpsr);
1285 __put_user(vfp_get_fpcr(env), &aux->fpsimd.fpcr);
1286 __put_user(TARGET_FPSIMD_MAGIC, &aux->fpsimd.head.magic);
1287 __put_user(sizeof(struct target_fpsimd_context),
1288 &aux->fpsimd.head.size);
1289
1290 /* set the "end" magic */
1291 __put_user(0, &aux->end.magic);
1292 __put_user(0, &aux->end.size);
1293
1294 return 0;
1295 }
1296
1297 static int target_restore_sigframe(CPUARMState *env,
1298 struct target_rt_sigframe *sf)
1299 {
1300 sigset_t set;
1301 int i;
1302 struct target_aux_context *aux =
1303 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved;
1304 uint32_t magic, size, fpsr, fpcr;
1305 uint64_t pstate;
1306
1307 target_to_host_sigset(&set, &sf->uc.tuc_sigmask);
1308 set_sigmask(&set);
1309
1310 for (i = 0; i < 31; i++) {
1311 __get_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]);
1312 }
1313
1314 __get_user(env->xregs[31], &sf->uc.tuc_mcontext.sp);
1315 __get_user(env->pc, &sf->uc.tuc_mcontext.pc);
1316 __get_user(pstate, &sf->uc.tuc_mcontext.pstate);
1317 pstate_write(env, pstate);
1318
1319 __get_user(magic, &aux->fpsimd.head.magic);
1320 __get_user(size, &aux->fpsimd.head.size);
1321
1322 if (magic != TARGET_FPSIMD_MAGIC
1323 || size != sizeof(struct target_fpsimd_context)) {
1324 return 1;
1325 }
1326
1327 for (i = 0; i < 32; i++) {
1328 #ifdef TARGET_WORDS_BIGENDIAN
1329 __get_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2 + 1]);
1330 __get_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2]);
1331 #else
1332 __get_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2]);
1333 __get_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2 + 1]);
1334 #endif
1335 }
1336 __get_user(fpsr, &aux->fpsimd.fpsr);
1337 vfp_set_fpsr(env, fpsr);
1338 __get_user(fpcr, &aux->fpsimd.fpcr);
1339 vfp_set_fpcr(env, fpcr);
1340
1341 return 0;
1342 }
1343
1344 static abi_ulong get_sigframe(struct target_sigaction *ka, CPUARMState *env)
1345 {
1346 abi_ulong sp;
1347
1348 sp = env->xregs[31];
1349
1350 /*
1351 * This is the X/Open sanctioned signal stack switching.
1352 */
1353 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
1354 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1355 }
1356
1357 sp = (sp - sizeof(struct target_rt_sigframe)) & ~15;
1358
1359 return sp;
1360 }
1361
1362 static void target_setup_frame(int usig, struct target_sigaction *ka,
1363 target_siginfo_t *info, target_sigset_t *set,
1364 CPUARMState *env)
1365 {
1366 struct target_rt_sigframe *frame;
1367 abi_ulong frame_addr, return_addr;
1368
1369 frame_addr = get_sigframe(ka, env);
1370 trace_user_setup_frame(env, frame_addr);
1371 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1372 goto give_sigsegv;
1373 }
1374
1375 __put_user(0, &frame->uc.tuc_flags);
1376 __put_user(0, &frame->uc.tuc_link);
1377
1378 __put_user(target_sigaltstack_used.ss_sp,
1379 &frame->uc.tuc_stack.ss_sp);
1380 __put_user(sas_ss_flags(env->xregs[31]),
1381 &frame->uc.tuc_stack.ss_flags);
1382 __put_user(target_sigaltstack_used.ss_size,
1383 &frame->uc.tuc_stack.ss_size);
1384 target_setup_sigframe(frame, env, set);
1385 if (ka->sa_flags & TARGET_SA_RESTORER) {
1386 return_addr = ka->sa_restorer;
1387 } else {
1388 /* mov x8,#__NR_rt_sigreturn; svc #0 */
1389 __put_user(0xd2801168, &frame->tramp[0]);
1390 __put_user(0xd4000001, &frame->tramp[1]);
1391 return_addr = frame_addr + offsetof(struct target_rt_sigframe, tramp);
1392 }
1393 env->xregs[0] = usig;
1394 env->xregs[31] = frame_addr;
1395 env->xregs[29] = env->xregs[31] + offsetof(struct target_rt_sigframe, fp);
1396 env->pc = ka->_sa_handler;
1397 env->xregs[30] = return_addr;
1398 if (info) {
1399 tswap_siginfo(&frame->info, info);
1400 env->xregs[1] = frame_addr + offsetof(struct target_rt_sigframe, info);
1401 env->xregs[2] = frame_addr + offsetof(struct target_rt_sigframe, uc);
1402 }
1403
1404 unlock_user_struct(frame, frame_addr, 1);
1405 return;
1406
1407 give_sigsegv:
1408 unlock_user_struct(frame, frame_addr, 1);
1409 force_sig(TARGET_SIGSEGV);
1410 }
1411
1412 static void setup_rt_frame(int sig, struct target_sigaction *ka,
1413 target_siginfo_t *info, target_sigset_t *set,
1414 CPUARMState *env)
1415 {
1416 target_setup_frame(sig, ka, info, set, env);
1417 }
1418
1419 static void setup_frame(int sig, struct target_sigaction *ka,
1420 target_sigset_t *set, CPUARMState *env)
1421 {
1422 target_setup_frame(sig, ka, 0, set, env);
1423 }
1424
1425 long do_rt_sigreturn(CPUARMState *env)
1426 {
1427 struct target_rt_sigframe *frame = NULL;
1428 abi_ulong frame_addr = env->xregs[31];
1429
1430 trace_user_do_rt_sigreturn(env, frame_addr);
1431 if (frame_addr & 15) {
1432 goto badframe;
1433 }
1434
1435 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
1436 goto badframe;
1437 }
1438
1439 if (target_restore_sigframe(env, frame)) {
1440 goto badframe;
1441 }
1442
1443 if (do_sigaltstack(frame_addr +
1444 offsetof(struct target_rt_sigframe, uc.tuc_stack),
1445 0, get_sp_from_cpustate(env)) == -EFAULT) {
1446 goto badframe;
1447 }
1448
1449 unlock_user_struct(frame, frame_addr, 0);
1450 return -TARGET_QEMU_ESIGRETURN;
1451
1452 badframe:
1453 unlock_user_struct(frame, frame_addr, 0);
1454 force_sig(TARGET_SIGSEGV);
1455 return 0;
1456 }
1457
1458 long do_sigreturn(CPUARMState *env)
1459 {
1460 return do_rt_sigreturn(env);
1461 }
1462
1463 #elif defined(TARGET_ARM)
1464
1465 struct target_sigcontext {
1466 abi_ulong trap_no;
1467 abi_ulong error_code;
1468 abi_ulong oldmask;
1469 abi_ulong arm_r0;
1470 abi_ulong arm_r1;
1471 abi_ulong arm_r2;
1472 abi_ulong arm_r3;
1473 abi_ulong arm_r4;
1474 abi_ulong arm_r5;
1475 abi_ulong arm_r6;
1476 abi_ulong arm_r7;
1477 abi_ulong arm_r8;
1478 abi_ulong arm_r9;
1479 abi_ulong arm_r10;
1480 abi_ulong arm_fp;
1481 abi_ulong arm_ip;
1482 abi_ulong arm_sp;
1483 abi_ulong arm_lr;
1484 abi_ulong arm_pc;
1485 abi_ulong arm_cpsr;
1486 abi_ulong fault_address;
1487 };
1488
1489 struct target_ucontext_v1 {
1490 abi_ulong tuc_flags;
1491 abi_ulong tuc_link;
1492 target_stack_t tuc_stack;
1493 struct target_sigcontext tuc_mcontext;
1494 target_sigset_t tuc_sigmask; /* mask last for extensibility */
1495 };
1496
1497 struct target_ucontext_v2 {
1498 abi_ulong tuc_flags;
1499 abi_ulong tuc_link;
1500 target_stack_t tuc_stack;
1501 struct target_sigcontext tuc_mcontext;
1502 target_sigset_t tuc_sigmask; /* mask last for extensibility */
1503 char __unused[128 - sizeof(target_sigset_t)];
1504 abi_ulong tuc_regspace[128] __attribute__((__aligned__(8)));
1505 };
1506
1507 struct target_user_vfp {
1508 uint64_t fpregs[32];
1509 abi_ulong fpscr;
1510 };
1511
1512 struct target_user_vfp_exc {
1513 abi_ulong fpexc;
1514 abi_ulong fpinst;
1515 abi_ulong fpinst2;
1516 };
1517
1518 struct target_vfp_sigframe {
1519 abi_ulong magic;
1520 abi_ulong size;
1521 struct target_user_vfp ufp;
1522 struct target_user_vfp_exc ufp_exc;
1523 } __attribute__((__aligned__(8)));
1524
1525 struct target_iwmmxt_sigframe {
1526 abi_ulong magic;
1527 abi_ulong size;
1528 uint64_t regs[16];
1529 /* Note that not all the coprocessor control registers are stored here */
1530 uint32_t wcssf;
1531 uint32_t wcasf;
1532 uint32_t wcgr0;
1533 uint32_t wcgr1;
1534 uint32_t wcgr2;
1535 uint32_t wcgr3;
1536 } __attribute__((__aligned__(8)));
1537
1538 #define TARGET_VFP_MAGIC 0x56465001
1539 #define TARGET_IWMMXT_MAGIC 0x12ef842a
1540
1541 struct sigframe_v1
1542 {
1543 struct target_sigcontext sc;
1544 abi_ulong extramask[TARGET_NSIG_WORDS-1];
1545 abi_ulong retcode;
1546 };
1547
1548 struct sigframe_v2
1549 {
1550 struct target_ucontext_v2 uc;
1551 abi_ulong retcode;
1552 };
1553
1554 struct rt_sigframe_v1
1555 {
1556 abi_ulong pinfo;
1557 abi_ulong puc;
1558 struct target_siginfo info;
1559 struct target_ucontext_v1 uc;
1560 abi_ulong retcode;
1561 };
1562
1563 struct rt_sigframe_v2
1564 {
1565 struct target_siginfo info;
1566 struct target_ucontext_v2 uc;
1567 abi_ulong retcode;
1568 };
1569
1570 #define TARGET_CONFIG_CPU_32 1
1571
1572 /*
1573 * For ARM syscalls, we encode the syscall number into the instruction.
1574 */
1575 #define SWI_SYS_SIGRETURN (0xef000000|(TARGET_NR_sigreturn + ARM_SYSCALL_BASE))
1576 #define SWI_SYS_RT_SIGRETURN (0xef000000|(TARGET_NR_rt_sigreturn + ARM_SYSCALL_BASE))
1577
1578 /*
1579 * For Thumb syscalls, we pass the syscall number via r7. We therefore
1580 * need two 16-bit instructions.
1581 */
1582 #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_sigreturn))
1583 #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_rt_sigreturn))
1584
1585 static const abi_ulong retcodes[4] = {
1586 SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN,
1587 SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN
1588 };
1589
1590
1591 static inline int valid_user_regs(CPUARMState *regs)
1592 {
1593 return 1;
1594 }
1595
1596 static void
1597 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/
1598 CPUARMState *env, abi_ulong mask)
1599 {
1600 __put_user(env->regs[0], &sc->arm_r0);
1601 __put_user(env->regs[1], &sc->arm_r1);
1602 __put_user(env->regs[2], &sc->arm_r2);
1603 __put_user(env->regs[3], &sc->arm_r3);
1604 __put_user(env->regs[4], &sc->arm_r4);
1605 __put_user(env->regs[5], &sc->arm_r5);
1606 __put_user(env->regs[6], &sc->arm_r6);
1607 __put_user(env->regs[7], &sc->arm_r7);
1608 __put_user(env->regs[8], &sc->arm_r8);
1609 __put_user(env->regs[9], &sc->arm_r9);
1610 __put_user(env->regs[10], &sc->arm_r10);
1611 __put_user(env->regs[11], &sc->arm_fp);
1612 __put_user(env->regs[12], &sc->arm_ip);
1613 __put_user(env->regs[13], &sc->arm_sp);
1614 __put_user(env->regs[14], &sc->arm_lr);
1615 __put_user(env->regs[15], &sc->arm_pc);
1616 #ifdef TARGET_CONFIG_CPU_32
1617 __put_user(cpsr_read(env), &sc->arm_cpsr);
1618 #endif
1619
1620 __put_user(/* current->thread.trap_no */ 0, &sc->trap_no);
1621 __put_user(/* current->thread.error_code */ 0, &sc->error_code);
1622 __put_user(/* current->thread.address */ 0, &sc->fault_address);
1623 __put_user(mask, &sc->oldmask);
1624 }
1625
1626 static inline abi_ulong
1627 get_sigframe(struct target_sigaction *ka, CPUARMState *regs, int framesize)
1628 {
1629 unsigned long sp = regs->regs[13];
1630
1631 /*
1632 * This is the X/Open sanctioned signal stack switching.
1633 */
1634 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
1635 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1636 }
1637 /*
1638 * ATPCS B01 mandates 8-byte alignment
1639 */
1640 return (sp - framesize) & ~7;
1641 }
1642
1643 static void
1644 setup_return(CPUARMState *env, struct target_sigaction *ka,
1645 abi_ulong *rc, abi_ulong frame_addr, int usig, abi_ulong rc_addr)
1646 {
1647 abi_ulong handler = ka->_sa_handler;
1648 abi_ulong retcode;
1649 int thumb = handler & 1;
1650 uint32_t cpsr = cpsr_read(env);
1651
1652 cpsr &= ~CPSR_IT;
1653 if (thumb) {
1654 cpsr |= CPSR_T;
1655 } else {
1656 cpsr &= ~CPSR_T;
1657 }
1658
1659 if (ka->sa_flags & TARGET_SA_RESTORER) {
1660 retcode = ka->sa_restorer;
1661 } else {
1662 unsigned int idx = thumb;
1663
1664 if (ka->sa_flags & TARGET_SA_SIGINFO) {
1665 idx += 2;
1666 }
1667
1668 __put_user(retcodes[idx], rc);
1669
1670 retcode = rc_addr + thumb;
1671 }
1672
1673 env->regs[0] = usig;
1674 env->regs[13] = frame_addr;
1675 env->regs[14] = retcode;
1676 env->regs[15] = handler & (thumb ? ~1 : ~3);
1677 cpsr_write(env, cpsr, CPSR_IT | CPSR_T, CPSRWriteByInstr);
1678 }
1679
1680 static abi_ulong *setup_sigframe_v2_vfp(abi_ulong *regspace, CPUARMState *env)
1681 {
1682 int i;
1683 struct target_vfp_sigframe *vfpframe;
1684 vfpframe = (struct target_vfp_sigframe *)regspace;
1685 __put_user(TARGET_VFP_MAGIC, &vfpframe->magic);
1686 __put_user(sizeof(*vfpframe), &vfpframe->size);
1687 for (i = 0; i < 32; i++) {
1688 __put_user(float64_val(env->vfp.regs[i]), &vfpframe->ufp.fpregs[i]);
1689 }
1690 __put_user(vfp_get_fpscr(env), &vfpframe->ufp.fpscr);
1691 __put_user(env->vfp.xregs[ARM_VFP_FPEXC], &vfpframe->ufp_exc.fpexc);
1692 __put_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst);
1693 __put_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2);
1694 return (abi_ulong*)(vfpframe+1);
1695 }
1696
1697 static abi_ulong *setup_sigframe_v2_iwmmxt(abi_ulong *regspace,
1698 CPUARMState *env)
1699 {
1700 int i;
1701 struct target_iwmmxt_sigframe *iwmmxtframe;
1702 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace;
1703 __put_user(TARGET_IWMMXT_MAGIC, &iwmmxtframe->magic);
1704 __put_user(sizeof(*iwmmxtframe), &iwmmxtframe->size);
1705 for (i = 0; i < 16; i++) {
1706 __put_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]);
1707 }
1708 __put_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf);
1709 __put_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf);
1710 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0);
1711 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1);
1712 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2);
1713 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3);
1714 return (abi_ulong*)(iwmmxtframe+1);
1715 }
1716
1717 static void setup_sigframe_v2(struct target_ucontext_v2 *uc,
1718 target_sigset_t *set, CPUARMState *env)
1719 {
1720 struct target_sigaltstack stack;
1721 int i;
1722 abi_ulong *regspace;
1723
1724 /* Clear all the bits of the ucontext we don't use. */
1725 memset(uc, 0, offsetof(struct target_ucontext_v2, tuc_mcontext));
1726
1727 memset(&stack, 0, sizeof(stack));
1728 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp);
1729 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size);
1730 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags);
1731 memcpy(&uc->tuc_stack, &stack, sizeof(stack));
1732
1733 setup_sigcontext(&uc->tuc_mcontext, env, set->sig[0]);
1734 /* Save coprocessor signal frame. */
1735 regspace = uc->tuc_regspace;
1736 if (arm_feature(env, ARM_FEATURE_VFP)) {
1737 regspace = setup_sigframe_v2_vfp(regspace, env);
1738 }
1739 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
1740 regspace = setup_sigframe_v2_iwmmxt(regspace, env);
1741 }
1742
1743 /* Write terminating magic word */
1744 __put_user(0, regspace);
1745
1746 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1747 __put_user(set->sig[i], &uc->tuc_sigmask.sig[i]);
1748 }
1749 }
1750
1751 /* compare linux/arch/arm/kernel/signal.c:setup_frame() */
1752 static void setup_frame_v1(int usig, struct target_sigaction *ka,
1753 target_sigset_t *set, CPUARMState *regs)
1754 {
1755 struct sigframe_v1 *frame;
1756 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame));
1757 int i;
1758
1759 trace_user_setup_frame(regs, frame_addr);
1760 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1761 return;
1762 }
1763
1764 setup_sigcontext(&frame->sc, regs, set->sig[0]);
1765
1766 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1767 __put_user(set->sig[i], &frame->extramask[i - 1]);
1768 }
1769
1770 setup_return(regs, ka, &frame->retcode, frame_addr, usig,
1771 frame_addr + offsetof(struct sigframe_v1, retcode));
1772
1773 unlock_user_struct(frame, frame_addr, 1);
1774 }
1775
1776 static void setup_frame_v2(int usig, struct target_sigaction *ka,
1777 target_sigset_t *set, CPUARMState *regs)
1778 {
1779 struct sigframe_v2 *frame;
1780 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame));
1781
1782 trace_user_setup_frame(regs, frame_addr);
1783 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1784 return;
1785 }
1786
1787 setup_sigframe_v2(&frame->uc, set, regs);
1788
1789 setup_return(regs, ka, &frame->retcode, frame_addr, usig,
1790 frame_addr + offsetof(struct sigframe_v2, retcode));
1791
1792 unlock_user_struct(frame, frame_addr, 1);
1793 }
1794
1795 static void setup_frame(int usig, struct target_sigaction *ka,
1796 target_sigset_t *set, CPUARMState *regs)
1797 {
1798 if (get_osversion() >= 0x020612) {
1799 setup_frame_v2(usig, ka, set, regs);
1800 } else {
1801 setup_frame_v1(usig, ka, set, regs);
1802 }
1803 }
1804
1805 /* compare linux/arch/arm/kernel/signal.c:setup_rt_frame() */
1806 static void setup_rt_frame_v1(int usig, struct target_sigaction *ka,
1807 target_siginfo_t *info,
1808 target_sigset_t *set, CPUARMState *env)
1809 {
1810 struct rt_sigframe_v1 *frame;
1811 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame));
1812 struct target_sigaltstack stack;
1813 int i;
1814 abi_ulong info_addr, uc_addr;
1815
1816 trace_user_setup_rt_frame(env, frame_addr);
1817 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1818 return /* 1 */;
1819 }
1820
1821 info_addr = frame_addr + offsetof(struct rt_sigframe_v1, info);
1822 __put_user(info_addr, &frame->pinfo);
1823 uc_addr = frame_addr + offsetof(struct rt_sigframe_v1, uc);
1824 __put_user(uc_addr, &frame->puc);
1825 tswap_siginfo(&frame->info, info);
1826
1827 /* Clear all the bits of the ucontext we don't use. */
1828 memset(&frame->uc, 0, offsetof(struct target_ucontext_v1, tuc_mcontext));
1829
1830 memset(&stack, 0, sizeof(stack));
1831 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp);
1832 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size);
1833 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags);
1834 memcpy(&frame->uc.tuc_stack, &stack, sizeof(stack));
1835
1836 setup_sigcontext(&frame->uc.tuc_mcontext, env, set->sig[0]);
1837 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1838 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
1839 }
1840
1841 setup_return(env, ka, &frame->retcode, frame_addr, usig,
1842 frame_addr + offsetof(struct rt_sigframe_v1, retcode));
1843
1844 env->regs[1] = info_addr;
1845 env->regs[2] = uc_addr;
1846
1847 unlock_user_struct(frame, frame_addr, 1);
1848 }
1849
1850 static void setup_rt_frame_v2(int usig, struct target_sigaction *ka,
1851 target_siginfo_t *info,
1852 target_sigset_t *set, CPUARMState *env)
1853 {
1854 struct rt_sigframe_v2 *frame;
1855 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame));
1856 abi_ulong info_addr, uc_addr;
1857
1858 trace_user_setup_rt_frame(env, frame_addr);
1859 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1860 return /* 1 */;
1861 }
1862
1863 info_addr = frame_addr + offsetof(struct rt_sigframe_v2, info);
1864 uc_addr = frame_addr + offsetof(struct rt_sigframe_v2, uc);
1865 tswap_siginfo(&frame->info, info);
1866
1867 setup_sigframe_v2(&frame->uc, set, env);
1868
1869 setup_return(env, ka, &frame->retcode, frame_addr, usig,
1870 frame_addr + offsetof(struct rt_sigframe_v2, retcode));
1871
1872 env->regs[1] = info_addr;
1873 env->regs[2] = uc_addr;
1874
1875 unlock_user_struct(frame, frame_addr, 1);
1876 }
1877
1878 static void setup_rt_frame(int usig, struct target_sigaction *ka,
1879 target_siginfo_t *info,
1880 target_sigset_t *set, CPUARMState *env)
1881 {
1882 if (get_osversion() >= 0x020612) {
1883 setup_rt_frame_v2(usig, ka, info, set, env);
1884 } else {
1885 setup_rt_frame_v1(usig, ka, info, set, env);
1886 }
1887 }
1888
1889 static int
1890 restore_sigcontext(CPUARMState *env, struct target_sigcontext *sc)
1891 {
1892 int err = 0;
1893 uint32_t cpsr;
1894
1895 __get_user(env->regs[0], &sc->arm_r0);
1896 __get_user(env->regs[1], &sc->arm_r1);
1897 __get_user(env->regs[2], &sc->arm_r2);
1898 __get_user(env->regs[3], &sc->arm_r3);
1899 __get_user(env->regs[4], &sc->arm_r4);
1900 __get_user(env->regs[5], &sc->arm_r5);
1901 __get_user(env->regs[6], &sc->arm_r6);
1902 __get_user(env->regs[7], &sc->arm_r7);
1903 __get_user(env->regs[8], &sc->arm_r8);
1904 __get_user(env->regs[9], &sc->arm_r9);
1905 __get_user(env->regs[10], &sc->arm_r10);
1906 __get_user(env->regs[11], &sc->arm_fp);
1907 __get_user(env->regs[12], &sc->arm_ip);
1908 __get_user(env->regs[13], &sc->arm_sp);
1909 __get_user(env->regs[14], &sc->arm_lr);
1910 __get_user(env->regs[15], &sc->arm_pc);
1911 #ifdef TARGET_CONFIG_CPU_32
1912 __get_user(cpsr, &sc->arm_cpsr);
1913 cpsr_write(env, cpsr, CPSR_USER | CPSR_EXEC, CPSRWriteByInstr);
1914 #endif
1915
1916 err |= !valid_user_regs(env);
1917
1918 return err;
1919 }
1920
1921 static long do_sigreturn_v1(CPUARMState *env)
1922 {
1923 abi_ulong frame_addr;
1924 struct sigframe_v1 *frame = NULL;
1925 target_sigset_t set;
1926 sigset_t host_set;
1927 int i;
1928
1929 /*
1930 * Since we stacked the signal on a 64-bit boundary,
1931 * then 'sp' should be word aligned here. If it's
1932 * not, then the user is trying to mess with us.
1933 */
1934 frame_addr = env->regs[13];
1935 trace_user_do_sigreturn(env, frame_addr);
1936 if (frame_addr & 7) {
1937 goto badframe;
1938 }
1939
1940 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
1941 goto badframe;
1942 }
1943
1944 __get_user(set.sig[0], &frame->sc.oldmask);
1945 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1946 __get_user(set.sig[i], &frame->extramask[i - 1]);
1947 }
1948
1949 target_to_host_sigset_internal(&host_set, &set);
1950 set_sigmask(&host_set);
1951
1952 if (restore_sigcontext(env, &frame->sc)) {
1953 goto badframe;
1954 }
1955
1956 #if 0
1957 /* Send SIGTRAP if we're single-stepping */
1958 if (ptrace_cancel_bpt(current))
1959 send_sig(SIGTRAP, current, 1);
1960 #endif
1961 unlock_user_struct(frame, frame_addr, 0);
1962 return -TARGET_QEMU_ESIGRETURN;
1963
1964 badframe:
1965 force_sig(TARGET_SIGSEGV /* , current */);
1966 return 0;
1967 }
1968
1969 static abi_ulong *restore_sigframe_v2_vfp(CPUARMState *env, abi_ulong *regspace)
1970 {
1971 int i;
1972 abi_ulong magic, sz;
1973 uint32_t fpscr, fpexc;
1974 struct target_vfp_sigframe *vfpframe;
1975 vfpframe = (struct target_vfp_sigframe *)regspace;
1976
1977 __get_user(magic, &vfpframe->magic);
1978 __get_user(sz, &vfpframe->size);
1979 if (magic != TARGET_VFP_MAGIC || sz != sizeof(*vfpframe)) {
1980 return 0;
1981 }
1982 for (i = 0; i < 32; i++) {
1983 __get_user(float64_val(env->vfp.regs[i]), &vfpframe->ufp.fpregs[i]);
1984 }
1985 __get_user(fpscr, &vfpframe->ufp.fpscr);
1986 vfp_set_fpscr(env, fpscr);
1987 __get_user(fpexc, &vfpframe->ufp_exc.fpexc);
1988 /* Sanitise FPEXC: ensure VFP is enabled, FPINST2 is invalid
1989 * and the exception flag is cleared
1990 */
1991 fpexc |= (1 << 30);
1992 fpexc &= ~((1 << 31) | (1 << 28));
1993 env->vfp.xregs[ARM_VFP_FPEXC] = fpexc;
1994 __get_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst);
1995 __get_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2);
1996 return (abi_ulong*)(vfpframe + 1);
1997 }
1998
1999 static abi_ulong *restore_sigframe_v2_iwmmxt(CPUARMState *env,
2000 abi_ulong *regspace)
2001 {
2002 int i;
2003 abi_ulong magic, sz;
2004 struct target_iwmmxt_sigframe *iwmmxtframe;
2005 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace;
2006
2007 __get_user(magic, &iwmmxtframe->magic);
2008 __get_user(sz, &iwmmxtframe->size);
2009 if (magic != TARGET_IWMMXT_MAGIC || sz != sizeof(*iwmmxtframe)) {
2010 return 0;
2011 }
2012 for (i = 0; i < 16; i++) {
2013 __get_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]);
2014 }
2015 __get_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf);
2016 __get_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf);
2017 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0);
2018 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1);
2019 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2);
2020 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3);
2021 return (abi_ulong*)(iwmmxtframe + 1);
2022 }
2023
2024 static int do_sigframe_return_v2(CPUARMState *env, target_ulong frame_addr,
2025 struct target_ucontext_v2 *uc)
2026 {
2027 sigset_t host_set;
2028 abi_ulong *regspace;
2029
2030 target_to_host_sigset(&host_set, &uc->tuc_sigmask);
2031 set_sigmask(&host_set);
2032
2033 if (restore_sigcontext(env, &uc->tuc_mcontext))
2034 return 1;
2035
2036 /* Restore coprocessor signal frame */
2037 regspace = uc->tuc_regspace;
2038 if (arm_feature(env, ARM_FEATURE_VFP)) {
2039 regspace = restore_sigframe_v2_vfp(env, regspace);
2040 if (!regspace) {
2041 return 1;
2042 }
2043 }
2044 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
2045 regspace = restore_sigframe_v2_iwmmxt(env, regspace);
2046 if (!regspace) {
2047 return 1;
2048 }
2049 }
2050
2051 if (do_sigaltstack(frame_addr + offsetof(struct target_ucontext_v2, tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT)
2052 return 1;
2053
2054 #if 0
2055 /* Send SIGTRAP if we're single-stepping */
2056 if (ptrace_cancel_bpt(current))
2057 send_sig(SIGTRAP, current, 1);
2058 #endif
2059
2060 return 0;
2061 }
2062
2063 static long do_sigreturn_v2(CPUARMState *env)
2064 {
2065 abi_ulong frame_addr;
2066 struct sigframe_v2 *frame = NULL;
2067
2068 /*
2069 * Since we stacked the signal on a 64-bit boundary,
2070 * then 'sp' should be word aligned here. If it's
2071 * not, then the user is trying to mess with us.
2072 */
2073 frame_addr = env->regs[13];
2074 trace_user_do_sigreturn(env, frame_addr);
2075 if (frame_addr & 7) {
2076 goto badframe;
2077 }
2078
2079 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2080 goto badframe;
2081 }
2082
2083 if (do_sigframe_return_v2(env, frame_addr, &frame->uc)) {
2084 goto badframe;
2085 }
2086
2087 unlock_user_struct(frame, frame_addr, 0);
2088 return -TARGET_QEMU_ESIGRETURN;
2089
2090 badframe:
2091 unlock_user_struct(frame, frame_addr, 0);
2092 force_sig(TARGET_SIGSEGV /* , current */);
2093 return 0;
2094 }
2095
2096 long do_sigreturn(CPUARMState *env)
2097 {
2098 if (get_osversion() >= 0x020612) {
2099 return do_sigreturn_v2(env);
2100 } else {
2101 return do_sigreturn_v1(env);
2102 }
2103 }
2104
2105 static long do_rt_sigreturn_v1(CPUARMState *env)
2106 {
2107 abi_ulong frame_addr;
2108 struct rt_sigframe_v1 *frame = NULL;
2109 sigset_t host_set;
2110
2111 /*
2112 * Since we stacked the signal on a 64-bit boundary,
2113 * then 'sp' should be word aligned here. If it's
2114 * not, then the user is trying to mess with us.
2115 */
2116 frame_addr = env->regs[13];
2117 trace_user_do_rt_sigreturn(env, frame_addr);
2118 if (frame_addr & 7) {
2119 goto badframe;
2120 }
2121
2122 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2123 goto badframe;
2124 }
2125
2126 target_to_host_sigset(&host_set, &frame->uc.tuc_sigmask);
2127 set_sigmask(&host_set);
2128
2129 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) {
2130 goto badframe;
2131 }
2132
2133 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe_v1, uc.tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT)
2134 goto badframe;
2135
2136 #if 0
2137 /* Send SIGTRAP if we're single-stepping */
2138 if (ptrace_cancel_bpt(current))
2139 send_sig(SIGTRAP, current, 1);
2140 #endif
2141 unlock_user_struct(frame, frame_addr, 0);
2142 return -TARGET_QEMU_ESIGRETURN;
2143
2144 badframe:
2145 unlock_user_struct(frame, frame_addr, 0);
2146 force_sig(TARGET_SIGSEGV /* , current */);
2147 return 0;
2148 }
2149
2150 static long do_rt_sigreturn_v2(CPUARMState *env)
2151 {
2152 abi_ulong frame_addr;
2153 struct rt_sigframe_v2 *frame = NULL;
2154
2155 /*
2156 * Since we stacked the signal on a 64-bit boundary,
2157 * then 'sp' should be word aligned here. If it's
2158 * not, then the user is trying to mess with us.
2159 */
2160 frame_addr = env->regs[13];
2161 trace_user_do_rt_sigreturn(env, frame_addr);
2162 if (frame_addr & 7) {
2163 goto badframe;
2164 }
2165
2166 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2167 goto badframe;
2168 }
2169
2170 if (do_sigframe_return_v2(env, frame_addr, &frame->uc)) {
2171 goto badframe;
2172 }
2173
2174 unlock_user_struct(frame, frame_addr, 0);
2175 return -TARGET_QEMU_ESIGRETURN;
2176
2177 badframe:
2178 unlock_user_struct(frame, frame_addr, 0);
2179 force_sig(TARGET_SIGSEGV /* , current */);
2180 return 0;
2181 }
2182
2183 long do_rt_sigreturn(CPUARMState *env)
2184 {
2185 if (get_osversion() >= 0x020612) {
2186 return do_rt_sigreturn_v2(env);
2187 } else {
2188 return do_rt_sigreturn_v1(env);
2189 }
2190 }
2191
2192 #elif defined(TARGET_SPARC)
2193
2194 #define __SUNOS_MAXWIN 31
2195
2196 /* This is what SunOS does, so shall I. */
2197 struct target_sigcontext {
2198 abi_ulong sigc_onstack; /* state to restore */
2199
2200 abi_ulong sigc_mask; /* sigmask to restore */
2201 abi_ulong sigc_sp; /* stack pointer */
2202 abi_ulong sigc_pc; /* program counter */
2203 abi_ulong sigc_npc; /* next program counter */
2204 abi_ulong sigc_psr; /* for condition codes etc */
2205 abi_ulong sigc_g1; /* User uses these two registers */
2206 abi_ulong sigc_o0; /* within the trampoline code. */
2207
2208 /* Now comes information regarding the users window set
2209 * at the time of the signal.
2210 */
2211 abi_ulong sigc_oswins; /* outstanding windows */
2212
2213 /* stack ptrs for each regwin buf */
2214 char *sigc_spbuf[__SUNOS_MAXWIN];
2215
2216 /* Windows to restore after signal */
2217 struct {
2218 abi_ulong locals[8];
2219 abi_ulong ins[8];
2220 } sigc_wbuf[__SUNOS_MAXWIN];
2221 };
2222 /* A Sparc stack frame */
2223 struct sparc_stackf {
2224 abi_ulong locals[8];
2225 abi_ulong ins[8];
2226 /* It's simpler to treat fp and callers_pc as elements of ins[]
2227 * since we never need to access them ourselves.
2228 */
2229 char *structptr;
2230 abi_ulong xargs[6];
2231 abi_ulong xxargs[1];
2232 };
2233
2234 typedef struct {
2235 struct {
2236 abi_ulong psr;
2237 abi_ulong pc;
2238 abi_ulong npc;
2239 abi_ulong y;
2240 abi_ulong u_regs[16]; /* globals and ins */
2241 } si_regs;
2242 int si_mask;
2243 } __siginfo_t;
2244
2245 typedef struct {
2246 abi_ulong si_float_regs[32];
2247 unsigned long si_fsr;
2248 unsigned long si_fpqdepth;
2249 struct {
2250 unsigned long *insn_addr;
2251 unsigned long insn;
2252 } si_fpqueue [16];
2253 } qemu_siginfo_fpu_t;
2254
2255
2256 struct target_signal_frame {
2257 struct sparc_stackf ss;
2258 __siginfo_t info;
2259 abi_ulong fpu_save;
2260 abi_ulong insns[2] __attribute__ ((aligned (8)));
2261 abi_ulong extramask[TARGET_NSIG_WORDS - 1];
2262 abi_ulong extra_size; /* Should be 0 */
2263 qemu_siginfo_fpu_t fpu_state;
2264 };
2265 struct target_rt_signal_frame {
2266 struct sparc_stackf ss;
2267 siginfo_t info;
2268 abi_ulong regs[20];
2269 sigset_t mask;
2270 abi_ulong fpu_save;
2271 unsigned int insns[2];
2272 stack_t stack;
2273 unsigned int extra_size; /* Should be 0 */
2274 qemu_siginfo_fpu_t fpu_state;
2275 };
2276
2277 #define UREG_O0 16
2278 #define UREG_O6 22
2279 #define UREG_I0 0
2280 #define UREG_I1 1
2281 #define UREG_I2 2
2282 #define UREG_I3 3
2283 #define UREG_I4 4
2284 #define UREG_I5 5
2285 #define UREG_I6 6
2286 #define UREG_I7 7
2287 #define UREG_L0 8
2288 #define UREG_FP UREG_I6
2289 #define UREG_SP UREG_O6
2290
2291 static inline abi_ulong get_sigframe(struct target_sigaction *sa,
2292 CPUSPARCState *env,
2293 unsigned long framesize)
2294 {
2295 abi_ulong sp;
2296
2297 sp = env->regwptr[UREG_FP];
2298
2299 /* This is the X/Open sanctioned signal stack switching. */
2300 if (sa->sa_flags & TARGET_SA_ONSTACK) {
2301 if (!on_sig_stack(sp)
2302 && !((target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size) & 7)) {
2303 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
2304 }
2305 }
2306 return sp - framesize;
2307 }
2308
2309 static int
2310 setup___siginfo(__siginfo_t *si, CPUSPARCState *env, abi_ulong mask)
2311 {
2312 int err = 0, i;
2313
2314 __put_user(env->psr, &si->si_regs.psr);
2315 __put_user(env->pc, &si->si_regs.pc);
2316 __put_user(env->npc, &si->si_regs.npc);
2317 __put_user(env->y, &si->si_regs.y);
2318 for (i=0; i < 8; i++) {
2319 __put_user(env->gregs[i], &si->si_regs.u_regs[i]);
2320 }
2321 for (i=0; i < 8; i++) {
2322 __put_user(env->regwptr[UREG_I0 + i], &si->si_regs.u_regs[i+8]);
2323 }
2324 __put_user(mask, &si->si_mask);
2325 return err;
2326 }
2327
2328 #if 0
2329 static int
2330 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/
2331 CPUSPARCState *env, unsigned long mask)
2332 {
2333 int err = 0;
2334
2335 __put_user(mask, &sc->sigc_mask);
2336 __put_user(env->regwptr[UREG_SP], &sc->sigc_sp);
2337 __put_user(env->pc, &sc->sigc_pc);
2338 __put_user(env->npc, &sc->sigc_npc);
2339 __put_user(env->psr, &sc->sigc_psr);
2340 __put_user(env->gregs[1], &sc->sigc_g1);
2341 __put_user(env->regwptr[UREG_O0], &sc->sigc_o0);
2342
2343 return err;
2344 }
2345 #endif
2346 #define NF_ALIGNEDSZ (((sizeof(struct target_signal_frame) + 7) & (~7)))
2347
2348 static void setup_frame(int sig, struct target_sigaction *ka,
2349 target_sigset_t *set, CPUSPARCState *env)
2350 {
2351 abi_ulong sf_addr;
2352 struct target_signal_frame *sf;
2353 int sigframe_size, err, i;
2354
2355 /* 1. Make sure everything is clean */
2356 //synchronize_user_stack();
2357
2358 sigframe_size = NF_ALIGNEDSZ;
2359 sf_addr = get_sigframe(ka, env, sigframe_size);
2360 trace_user_setup_frame(env, sf_addr);
2361
2362 sf = lock_user(VERIFY_WRITE, sf_addr,
2363 sizeof(struct target_signal_frame), 0);
2364 if (!sf) {
2365 goto sigsegv;
2366 }
2367 #if 0
2368 if (invalid_frame_pointer(sf, sigframe_size))
2369 goto sigill_and_return;
2370 #endif
2371 /* 2. Save the current process state */
2372 err = setup___siginfo(&sf->info, env, set->sig[0]);
2373 __put_user(0, &sf->extra_size);
2374
2375 //save_fpu_state(regs, &sf->fpu_state);
2376 //__put_user(&sf->fpu_state, &sf->fpu_save);
2377
2378 __put_user(set->sig[0], &sf->info.si_mask);
2379 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) {
2380 __put_user(set->sig[i + 1], &sf->extramask[i]);
2381 }
2382
2383 for (i = 0; i < 8; i++) {
2384 __put_user(env->regwptr[i + UREG_L0], &sf->ss.locals[i]);
2385 }
2386 for (i = 0; i < 8; i++) {
2387 __put_user(env->regwptr[i + UREG_I0], &sf->ss.ins[i]);
2388 }
2389 if (err)
2390 goto sigsegv;
2391
2392 /* 3. signal handler back-trampoline and parameters */
2393 env->regwptr[UREG_FP] = sf_addr;
2394 env->regwptr[UREG_I0] = sig;
2395 env->regwptr[UREG_I1] = sf_addr +
2396 offsetof(struct target_signal_frame, info);
2397 env->regwptr[UREG_I2] = sf_addr +
2398 offsetof(struct target_signal_frame, info);
2399
2400 /* 4. signal handler */
2401 env->pc = ka->_sa_handler;
2402 env->npc = (env->pc + 4);
2403 /* 5. return to kernel instructions */
2404 if (ka->sa_restorer) {
2405 env->regwptr[UREG_I7] = ka->sa_restorer;
2406 } else {
2407 uint32_t val32;
2408
2409 env->regwptr[UREG_I7] = sf_addr +
2410 offsetof(struct target_signal_frame, insns) - 2 * 4;
2411
2412 /* mov __NR_sigreturn, %g1 */
2413 val32 = 0x821020d8;
2414 __put_user(val32, &sf->insns[0]);
2415
2416 /* t 0x10 */
2417 val32 = 0x91d02010;
2418 __put_user(val32, &sf->insns[1]);
2419 if (err)
2420 goto sigsegv;
2421
2422 /* Flush instruction space. */
2423 // flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
2424 // tb_flush(env);
2425 }
2426 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame));
2427 return;
2428 #if 0
2429 sigill_and_return:
2430 force_sig(TARGET_SIGILL);
2431 #endif
2432 sigsegv:
2433 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame));
2434 force_sig(TARGET_SIGSEGV);
2435 }
2436
2437 static void setup_rt_frame(int sig, struct target_sigaction *ka,
2438 target_siginfo_t *info,
2439 target_sigset_t *set, CPUSPARCState *env)
2440 {
2441 fprintf(stderr, "setup_rt_frame: not implemented\n");
2442 }
2443
2444 long do_sigreturn(CPUSPARCState *env)
2445 {
2446 abi_ulong sf_addr;
2447 struct target_signal_frame *sf;
2448 uint32_t up_psr, pc, npc;
2449 target_sigset_t set;
2450 sigset_t host_set;
2451 int err=0, i;
2452
2453 sf_addr = env->regwptr[UREG_FP];
2454 trace_user_do_sigreturn(env, sf_addr);
2455 if (!lock_user_struct(VERIFY_READ, sf, sf_addr, 1)) {
2456 goto segv_and_exit;
2457 }
2458
2459 /* 1. Make sure we are not getting garbage from the user */
2460
2461 if (sf_addr & 3)
2462 goto segv_and_exit;
2463
2464 __get_user(pc, &sf->info.si_regs.pc);
2465 __get_user(npc, &sf->info.si_regs.npc);
2466
2467 if ((pc | npc) & 3) {
2468 goto segv_and_exit;
2469 }
2470
2471 /* 2. Restore the state */
2472 __get_user(up_psr, &sf->info.si_regs.psr);
2473
2474 /* User can only change condition codes and FPU enabling in %psr. */
2475 env->psr = (up_psr & (PSR_ICC /* | PSR_EF */))
2476 | (env->psr & ~(PSR_ICC /* | PSR_EF */));
2477
2478 env->pc = pc;
2479 env->npc = npc;
2480 __get_user(env->y, &sf->info.si_regs.y);
2481 for (i=0; i < 8; i++) {
2482 __get_user(env->gregs[i], &sf->info.si_regs.u_regs[i]);
2483 }
2484 for (i=0; i < 8; i++) {
2485 __get_user(env->regwptr[i + UREG_I0], &sf->info.si_regs.u_regs[i+8]);
2486 }
2487
2488 /* FIXME: implement FPU save/restore:
2489 * __get_user(fpu_save, &sf->fpu_save);
2490 * if (fpu_save)
2491 * err |= restore_fpu_state(env, fpu_save);
2492 */
2493
2494 /* This is pretty much atomic, no amount locking would prevent
2495 * the races which exist anyways.
2496 */
2497 __get_user(set.sig[0], &sf->info.si_mask);
2498 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
2499 __get_user(set.sig[i], &sf->extramask[i - 1]);
2500 }
2501
2502 target_to_host_sigset_internal(&host_set, &set);
2503 set_sigmask(&host_set);
2504
2505 if (err) {
2506 goto segv_and_exit;
2507 }
2508 unlock_user_struct(sf, sf_addr, 0);
2509 return -TARGET_QEMU_ESIGRETURN;
2510
2511 segv_and_exit:
2512 unlock_user_struct(sf, sf_addr, 0);
2513 force_sig(TARGET_SIGSEGV);
2514 }
2515
2516 long do_rt_sigreturn(CPUSPARCState *env)
2517 {
2518 trace_user_do_rt_sigreturn(env, 0);
2519 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
2520 return -TARGET_ENOSYS;
2521 }
2522
2523 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
2524 #define MC_TSTATE 0
2525 #define MC_PC 1
2526 #define MC_NPC 2
2527 #define MC_Y 3
2528 #define MC_G1 4
2529 #define MC_G2 5
2530 #define MC_G3 6
2531 #define MC_G4 7
2532 #define MC_G5 8
2533 #define MC_G6 9
2534 #define MC_G7 10
2535 #define MC_O0 11
2536 #define MC_O1 12
2537 #define MC_O2 13
2538 #define MC_O3 14
2539 #define MC_O4 15
2540 #define MC_O5 16
2541 #define MC_O6 17
2542 #define MC_O7 18
2543 #define MC_NGREG 19
2544
2545 typedef abi_ulong target_mc_greg_t;
2546 typedef target_mc_greg_t target_mc_gregset_t[MC_NGREG];
2547
2548 struct target_mc_fq {
2549 abi_ulong *mcfq_addr;
2550 uint32_t mcfq_insn;
2551 };
2552
2553 struct target_mc_fpu {
2554 union {
2555 uint32_t sregs[32];
2556 uint64_t dregs[32];
2557 //uint128_t qregs[16];
2558 } mcfpu_fregs;
2559 abi_ulong mcfpu_fsr;
2560 abi_ulong mcfpu_fprs;
2561 abi_ulong mcfpu_gsr;
2562 struct target_mc_fq *mcfpu_fq;
2563 unsigned char mcfpu_qcnt;
2564 unsigned char mcfpu_qentsz;
2565 unsigned char mcfpu_enab;
2566 };
2567 typedef struct target_mc_fpu target_mc_fpu_t;
2568
2569 typedef struct {
2570 target_mc_gregset_t mc_gregs;
2571 target_mc_greg_t mc_fp;
2572 target_mc_greg_t mc_i7;
2573 target_mc_fpu_t mc_fpregs;
2574 } target_mcontext_t;
2575
2576 struct target_ucontext {
2577 struct target_ucontext *tuc_link;
2578 abi_ulong tuc_flags;
2579 target_sigset_t tuc_sigmask;
2580 target_mcontext_t tuc_mcontext;
2581 };
2582
2583 /* A V9 register window */
2584 struct target_reg_window {
2585 abi_ulong locals[8];
2586 abi_ulong ins[8];
2587 };
2588
2589 #define TARGET_STACK_BIAS 2047
2590
2591 /* {set, get}context() needed for 64-bit SparcLinux userland. */
2592 void sparc64_set_context(CPUSPARCState *env)
2593 {
2594 abi_ulong ucp_addr;
2595 struct target_ucontext *ucp;
2596 target_mc_gregset_t *grp;
2597 abi_ulong pc, npc, tstate;
2598 abi_ulong fp, i7, w_addr;
2599 unsigned int i;
2600
2601 ucp_addr = env->regwptr[UREG_I0];
2602 if (!lock_user_struct(VERIFY_READ, ucp, ucp_addr, 1)) {
2603 goto do_sigsegv;
2604 }
2605 grp = &ucp->tuc_mcontext.mc_gregs;
2606 __get_user(pc, &((*grp)[MC_PC]));
2607 __get_user(npc, &((*grp)[MC_NPC]));
2608 if ((pc | npc) & 3) {
2609 goto do_sigsegv;
2610 }
2611 if (env->regwptr[UREG_I1]) {
2612 target_sigset_t target_set;
2613 sigset_t set;
2614
2615 if (TARGET_NSIG_WORDS == 1) {
2616 __get_user(target_set.sig[0], &ucp->tuc_sigmask.sig[0]);
2617 } else {
2618 abi_ulong *src, *dst;
2619 src = ucp->tuc_sigmask.sig;
2620 dst = target_set.sig;
2621 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) {
2622 __get_user(*dst, src);
2623 }
2624 }
2625 target_to_host_sigset_internal(&set, &target_set);
2626 set_sigmask(&set);
2627 }
2628 env->pc = pc;
2629 env->npc = npc;
2630 __get_user(env->y, &((*grp)[MC_Y]));
2631 __get_user(tstate, &((*grp)[MC_TSTATE]));
2632 env->asi = (tstate >> 24) & 0xff;
2633 cpu_put_ccr(env, tstate >> 32);
2634 cpu_put_cwp64(env, tstate & 0x1f);
2635 __get_user(env->gregs[1], (&(*grp)[MC_G1]));
2636 __get_user(env->gregs[2], (&(*grp)[MC_G2]));
2637 __get_user(env->gregs[3], (&(*grp)[MC_G3]));
2638 __get_user(env->gregs[4], (&(*grp)[MC_G4]));
2639 __get_user(env->gregs[5], (&(*grp)[MC_G5]));
2640 __get_user(env->gregs[6], (&(*grp)[MC_G6]));
2641 __get_user(env->gregs[7], (&(*grp)[MC_G7]));
2642 __get_user(env->regwptr[UREG_I0], (&(*grp)[MC_O0]));
2643 __get_user(env->regwptr[UREG_I1], (&(*grp)[MC_O1]));
2644 __get_user(env->regwptr[UREG_I2], (&(*grp)[MC_O2]));
2645 __get_user(env->regwptr[UREG_I3], (&(*grp)[MC_O3]));
2646 __get_user(env->regwptr[UREG_I4], (&(*grp)[MC_O4]));
2647 __get_user(env->regwptr[UREG_I5], (&(*grp)[MC_O5]));
2648 __get_user(env->regwptr[UREG_I6], (&(*grp)[MC_O6]));
2649 __get_user(env->regwptr[UREG_I7], (&(*grp)[MC_O7]));
2650
2651 __get_user(fp, &(ucp->tuc_mcontext.mc_fp));
2652 __get_user(i7, &(ucp->tuc_mcontext.mc_i7));
2653
2654 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6];
2655 if (put_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]),
2656 abi_ulong) != 0) {
2657 goto do_sigsegv;
2658 }
2659 if (put_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]),
2660 abi_ulong) != 0) {
2661 goto do_sigsegv;
2662 }
2663 /* FIXME this does not match how the kernel handles the FPU in
2664 * its sparc64_set_context implementation. In particular the FPU
2665 * is only restored if fenab is non-zero in:
2666 * __get_user(fenab, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_enab));
2667 */
2668 __get_user(env->fprs, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fprs));
2669 {
2670 uint32_t *src = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
2671 for (i = 0; i < 64; i++, src++) {
2672 if (i & 1) {
2673 __get_user(env->fpr[i/2].l.lower, src);
2674 } else {
2675 __get_user(env->fpr[i/2].l.upper, src);
2676 }
2677 }
2678 }
2679 __get_user(env->fsr,
2680 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fsr));
2681 __get_user(env->gsr,
2682 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_gsr));
2683 unlock_user_struct(ucp, ucp_addr, 0);
2684 return;
2685 do_sigsegv:
2686 unlock_user_struct(ucp, ucp_addr, 0);
2687 force_sig(TARGET_SIGSEGV);
2688 }
2689
2690 void sparc64_get_context(CPUSPARCState *env)
2691 {
2692 abi_ulong ucp_addr;
2693 struct target_ucontext *ucp;
2694 target_mc_gregset_t *grp;
2695 target_mcontext_t *mcp;
2696 abi_ulong fp, i7, w_addr;
2697 int err;
2698 unsigned int i;
2699 target_sigset_t target_set;
2700 sigset_t set;
2701
2702 ucp_addr = env->regwptr[UREG_I0];
2703 if (!lock_user_struct(VERIFY_WRITE, ucp, ucp_addr, 0)) {
2704 goto do_sigsegv;
2705 }
2706
2707 mcp = &ucp->tuc_mcontext;
2708 grp = &mcp->mc_gregs;
2709
2710 /* Skip over the trap instruction, first. */
2711 env->pc = env->npc;
2712 env->npc += 4;
2713
2714 /* If we're only reading the signal mask then do_sigprocmask()
2715 * is guaranteed not to fail, which is important because we don't
2716 * have any way to signal a failure or restart this operation since
2717 * this is not a normal syscall.
2718 */
2719 err = do_sigprocmask(0, NULL, &set);
2720 assert(err == 0);
2721 host_to_target_sigset_internal(&target_set, &set);
2722 if (TARGET_NSIG_WORDS == 1) {
2723 __put_user(target_set.sig[0],
2724 (abi_ulong *)&ucp->tuc_sigmask);
2725 } else {
2726 abi_ulong *src, *dst;
2727 src = target_set.sig;
2728 dst = ucp->tuc_sigmask.sig;
2729 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) {
2730 __put_user(*src, dst);
2731 }
2732 if (err)
2733 goto do_sigsegv;
2734 }
2735
2736 /* XXX: tstate must be saved properly */
2737 // __put_user(env->tstate, &((*grp)[MC_TSTATE]));
2738 __put_user(env->pc, &((*grp)[MC_PC]));
2739 __put_user(env->npc, &((*grp)[MC_NPC]));
2740 __put_user(env->y, &((*grp)[MC_Y]));
2741 __put_user(env->gregs[1], &((*grp)[MC_G1]));
2742 __put_user(env->gregs[2], &((*grp)[MC_G2]));
2743 __put_user(env->gregs[3], &((*grp)[MC_G3]));
2744 __put_user(env->gregs[4], &((*grp)[MC_G4]));
2745 __put_user(env->gregs[5], &((*grp)[MC_G5]));
2746 __put_user(env->gregs[6], &((*grp)[MC_G6]));
2747 __put_user(env->gregs[7], &((*grp)[MC_G7]));
2748 __put_user(env->regwptr[UREG_I0], &((*grp)[MC_O0]));
2749 __put_user(env->regwptr[UREG_I1], &((*grp)[MC_O1]));
2750 __put_user(env->regwptr[UREG_I2], &((*grp)[MC_O2]));
2751 __put_user(env->regwptr[UREG_I3], &((*grp)[MC_O3]));
2752 __put_user(env->regwptr[UREG_I4], &((*grp)[MC_O4]));
2753 __put_user(env->regwptr[UREG_I5], &((*grp)[MC_O5]));
2754 __put_user(env->regwptr[UREG_I6], &((*grp)[MC_O6]));
2755 __put_user(env->regwptr[UREG_I7], &((*grp)[MC_O7]));
2756
2757 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6];
2758 fp = i7 = 0;
2759 if (get_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]),
2760 abi_ulong) != 0) {
2761 goto do_sigsegv;
2762 }
2763 if (get_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]),
2764 abi_ulong) != 0) {
2765 goto do_sigsegv;
2766 }
2767 __put_user(fp, &(mcp->mc_fp));
2768 __put_user(i7, &(mcp->mc_i7));
2769
2770 {
2771 uint32_t *dst = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
2772 for (i = 0; i < 64; i++, dst++) {
2773 if (i & 1) {
2774 __put_user(env->fpr[i/2].l.lower, dst);
2775 } else {
2776 __put_user(env->fpr[i/2].l.upper, dst);
2777 }
2778 }
2779 }
2780 __put_user(env->fsr, &(mcp->mc_fpregs.mcfpu_fsr));
2781 __put_user(env->gsr, &(mcp->mc_fpregs.mcfpu_gsr));
2782 __put_user(env->fprs, &(mcp->mc_fpregs.mcfpu_fprs));
2783
2784 if (err)
2785 goto do_sigsegv;
2786 unlock_user_struct(ucp, ucp_addr, 1);
2787 return;
2788 do_sigsegv:
2789 unlock_user_struct(ucp, ucp_addr, 1);
2790 force_sig(TARGET_SIGSEGV);
2791 }
2792 #endif
2793 #elif defined(TARGET_MIPS) || defined(TARGET_MIPS64)
2794
2795 # if defined(TARGET_ABI_MIPSO32)
2796 struct target_sigcontext {
2797 uint32_t sc_regmask; /* Unused */
2798 uint32_t sc_status;
2799 uint64_t sc_pc;
2800 uint64_t sc_regs[32];
2801 uint64_t sc_fpregs[32];
2802 uint32_t sc_ownedfp; /* Unused */
2803 uint32_t sc_fpc_csr;
2804 uint32_t sc_fpc_eir; /* Unused */
2805 uint32_t sc_used_math;
2806 uint32_t sc_dsp; /* dsp status, was sc_ssflags */
2807 uint32_t pad0;
2808 uint64_t sc_mdhi;
2809 uint64_t sc_mdlo;
2810 target_ulong sc_hi1; /* Was sc_cause */
2811 target_ulong sc_lo1; /* Was sc_badvaddr */
2812 target_ulong sc_hi2; /* Was sc_sigset[4] */
2813 target_ulong sc_lo2;
2814 target_ulong sc_hi3;
2815 target_ulong sc_lo3;
2816 };
2817 # else /* N32 || N64 */
2818 struct target_sigcontext {
2819 uint64_t sc_regs[32];
2820 uint64_t sc_fpregs[32];
2821 uint64_t sc_mdhi;
2822 uint64_t sc_hi1;
2823 uint64_t sc_hi2;
2824 uint64_t sc_hi3;
2825 uint64_t sc_mdlo;
2826 uint64_t sc_lo1;
2827 uint64_t sc_lo2;
2828 uint64_t sc_lo3;
2829 uint64_t sc_pc;
2830 uint32_t sc_fpc_csr;
2831 uint32_t sc_used_math;
2832 uint32_t sc_dsp;
2833 uint32_t sc_reserved;
2834 };
2835 # endif /* O32 */
2836
2837 struct sigframe {
2838 uint32_t sf_ass[4]; /* argument save space for o32 */
2839 uint32_t sf_code[2]; /* signal trampoline */
2840 struct target_sigcontext sf_sc;
2841 target_sigset_t sf_mask;
2842 };
2843
2844 struct target_ucontext {
2845 target_ulong tuc_flags;
2846 target_ulong tuc_link;
2847 target_stack_t tuc_stack;
2848 target_ulong pad0;
2849 struct target_sigcontext tuc_mcontext;
2850 target_sigset_t tuc_sigmask;
2851 };
2852
2853 struct target_rt_sigframe {
2854 uint32_t rs_ass[4]; /* argument save space for o32 */
2855 uint32_t rs_code[2]; /* signal trampoline */
2856 struct target_siginfo rs_info;
2857 struct target_ucontext rs_uc;
2858 };
2859
2860 /* Install trampoline to jump back from signal handler */
2861 static inline int install_sigtramp(unsigned int *tramp, unsigned int syscall)
2862 {
2863 int err = 0;
2864
2865 /*
2866 * Set up the return code ...
2867 *
2868 * li v0, __NR__foo_sigreturn
2869 * syscall
2870 */
2871
2872 __put_user(0x24020000 + syscall, tramp + 0);
2873 __put_user(0x0000000c , tramp + 1);
2874 return err;
2875 }
2876
2877 static inline void setup_sigcontext(CPUMIPSState *regs,
2878 struct target_sigcontext *sc)
2879 {
2880 int i;
2881
2882 __put_user(exception_resume_pc(regs), &sc->sc_pc);
2883 regs->hflags &= ~MIPS_HFLAG_BMASK;
2884
2885 __put_user(0, &sc->sc_regs[0]);
2886 for (i = 1; i < 32; ++i) {
2887 __put_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
2888 }
2889
2890 __put_user(regs->active_tc.HI[0], &sc->sc_mdhi);
2891 __put_user(regs->active_tc.LO[0], &sc->sc_mdlo);
2892
2893 /* Rather than checking for dsp existence, always copy. The storage
2894 would just be garbage otherwise. */
2895 __put_user(regs->active_tc.HI[1], &sc->sc_hi1);
2896 __put_user(regs->active_tc.HI[2], &sc->sc_hi2);
2897 __put_user(regs->active_tc.HI[3], &sc->sc_hi3);
2898 __put_user(regs->active_tc.LO[1], &sc->sc_lo1);
2899 __put_user(regs->active_tc.LO[2], &sc->sc_lo2);
2900 __put_user(regs->active_tc.LO[3], &sc->sc_lo3);
2901 {
2902 uint32_t dsp = cpu_rddsp(0x3ff, regs);
2903 __put_user(dsp, &sc->sc_dsp);
2904 }
2905
2906 __put_user(1, &sc->sc_used_math);
2907
2908 for (i = 0; i < 32; ++i) {
2909 __put_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]);
2910 }
2911 }
2912
2913 static inline void
2914 restore_sigcontext(CPUMIPSState *regs, struct target_sigcontext *sc)
2915 {
2916 int i;
2917
2918 __get_user(regs->CP0_EPC, &sc->sc_pc);
2919
2920 __get_user(regs->active_tc.HI[0], &sc->sc_mdhi);
2921 __get_user(regs->active_tc.LO[0], &sc->sc_mdlo);
2922
2923 for (i = 1; i < 32; ++i) {
2924 __get_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
2925 }
2926
2927 __get_user(regs->active_tc.HI[1], &sc->sc_hi1);
2928 __get_user(regs->active_tc.HI[2], &sc->sc_hi2);
2929 __get_user(regs->active_tc.HI[3], &sc->sc_hi3);
2930 __get_user(regs->active_tc.LO[1], &sc->sc_lo1);
2931 __get_user(regs->active_tc.LO[2], &sc->sc_lo2);
2932 __get_user(regs->active_tc.LO[3], &sc->sc_lo3);
2933 {
2934 uint32_t dsp;
2935 __get_user(dsp, &sc->sc_dsp);
2936 cpu_wrdsp(dsp, 0x3ff, regs);
2937 }
2938
2939 for (i = 0; i < 32; ++i) {
2940 __get_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]);
2941 }
2942 }
2943
2944 /*
2945 * Determine which stack to use..
2946 */
2947 static inline abi_ulong
2948 get_sigframe(struct target_sigaction *ka, CPUMIPSState *regs, size_t frame_size)
2949 {
2950 unsigned long sp;
2951
2952 /* Default to using normal stack */
2953 sp = regs->active_tc.gpr[29];
2954
2955 /*
2956 * FPU emulator may have its own trampoline active just
2957 * above the user stack, 16-bytes before the next lowest
2958 * 16 byte boundary. Try to avoid trashing it.
2959 */
2960 sp -= 32;
2961
2962 /* This is the X/Open sanctioned signal stack switching. */
2963 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) {
2964 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
2965 }
2966
2967 return (sp - frame_size) & ~7;
2968 }
2969
2970 static void mips_set_hflags_isa_mode_from_pc(CPUMIPSState *env)
2971 {
2972 if (env->insn_flags & (ASE_MIPS16 | ASE_MICROMIPS)) {
2973 env->hflags &= ~MIPS_HFLAG_M16;
2974 env->hflags |= (env->active_tc.PC & 1) << MIPS_HFLAG_M16_SHIFT;
2975 env->active_tc.PC &= ~(target_ulong) 1;
2976 }
2977 }
2978
2979 # if defined(TARGET_ABI_MIPSO32)
2980 /* compare linux/arch/mips/kernel/signal.c:setup_frame() */
2981 static void setup_frame(int sig, struct target_sigaction * ka,
2982 target_sigset_t *set, CPUMIPSState *regs)
2983 {
2984 struct sigframe *frame;
2985 abi_ulong frame_addr;
2986 int i;
2987
2988 frame_addr = get_sigframe(ka, regs, sizeof(*frame));
2989 trace_user_setup_frame(regs, frame_addr);
2990 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
2991 goto give_sigsegv;
2992 }
2993
2994 install_sigtramp(frame->sf_code, TARGET_NR_sigreturn);
2995
2996 setup_sigcontext(regs, &frame->sf_sc);
2997
2998 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
2999 __put_user(set->sig[i], &frame->sf_mask.sig[i]);
3000 }
3001
3002 /*
3003 * Arguments to signal handler:
3004 *
3005 * a0 = signal number
3006 * a1 = 0 (should be cause)
3007 * a2 = pointer to struct sigcontext
3008 *
3009 * $25 and PC point to the signal handler, $29 points to the
3010 * struct sigframe.
3011 */
3012 regs->active_tc.gpr[ 4] = sig;
3013 regs->active_tc.gpr[ 5] = 0;
3014 regs->active_tc.gpr[ 6] = frame_addr + offsetof(struct sigframe, sf_sc);
3015 regs->active_tc.gpr[29] = frame_addr;
3016 regs->active_tc.gpr[31] = frame_addr + offsetof(struct sigframe, sf_code);
3017 /* The original kernel code sets CP0_EPC to the handler
3018 * since it returns to userland using eret
3019 * we cannot do this here, and we must set PC directly */
3020 regs->active_tc.PC = regs->active_tc.gpr[25] = ka->_sa_handler;
3021 mips_set_hflags_isa_mode_from_pc(regs);
3022 unlock_user_struct(frame, frame_addr, 1);
3023 return;
3024
3025 give_sigsegv:
3026 force_sig(TARGET_SIGSEGV/*, current*/);
3027 }
3028
3029 long do_sigreturn(CPUMIPSState *regs)
3030 {
3031 struct sigframe *frame;
3032 abi_ulong frame_addr;
3033 sigset_t blocked;
3034 target_sigset_t target_set;
3035 int i;
3036
3037 frame_addr = regs->active_tc.gpr[29];
3038 trace_user_do_sigreturn(regs, frame_addr);
3039 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
3040 goto badframe;
3041
3042 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3043 __get_user(target_set.sig[i], &frame->sf_mask.sig[i]);
3044 }
3045
3046 target_to_host_sigset_internal(&blocked, &target_set);
3047 set_sigmask(&blocked);
3048
3049 restore_sigcontext(regs, &frame->sf_sc);
3050
3051 #if 0
3052 /*
3053 * Don't let your children do this ...
3054 */
3055 __asm__ __volatile__(
3056 "move\t$29, %0\n\t"
3057 "j\tsyscall_exit"
3058 :/* no outputs */
3059 :"r" (&regs));
3060 /* Unreached */
3061 #endif
3062
3063 regs->active_tc.PC = regs->CP0_EPC;
3064 mips_set_hflags_isa_mode_from_pc(regs);
3065 /* I am not sure this is right, but it seems to work
3066 * maybe a problem with nested signals ? */
3067 regs->CP0_EPC = 0;
3068 return -TARGET_QEMU_ESIGRETURN;
3069
3070 badframe:
3071 force_sig(TARGET_SIGSEGV/*, current*/);
3072 return 0;
3073 }
3074 # endif /* O32 */
3075
3076 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3077 target_siginfo_t *info,
3078 target_sigset_t *set, CPUMIPSState *env)
3079 {
3080 struct target_rt_sigframe *frame;
3081 abi_ulong frame_addr;
3082 int i;
3083
3084 frame_addr = get_sigframe(ka, env, sizeof(*frame));
3085 trace_user_setup_rt_frame(env, frame_addr);
3086 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3087 goto give_sigsegv;
3088 }
3089
3090 install_sigtramp(frame->rs_code, TARGET_NR_rt_sigreturn);
3091
3092 tswap_siginfo(&frame->rs_info, info);
3093
3094 __put_user(0, &frame->rs_uc.tuc_flags);
3095 __put_user(0, &frame->rs_uc.tuc_link);
3096 __put_user(target_sigaltstack_used.ss_sp, &frame->rs_uc.tuc_stack.ss_sp);
3097 __put_user(target_sigaltstack_used.ss_size, &frame->rs_uc.tuc_stack.ss_size);
3098 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
3099 &frame->rs_uc.tuc_stack.ss_flags);
3100
3101 setup_sigcontext(env, &frame->rs_uc.tuc_mcontext);
3102
3103 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3104 __put_user(set->sig[i], &frame->rs_uc.tuc_sigmask.sig[i]);
3105 }
3106
3107 /*
3108 * Arguments to signal handler:
3109 *
3110 * a0 = signal number
3111 * a1 = pointer to siginfo_t
3112 * a2 = pointer to struct ucontext
3113 *
3114 * $25 and PC point to the signal handler, $29 points to the
3115 * struct sigframe.
3116 */
3117 env->active_tc.gpr[ 4] = sig;
3118 env->active_tc.gpr[ 5] = frame_addr
3119 + offsetof(struct target_rt_sigframe, rs_info);
3120 env->active_tc.gpr[ 6] = frame_addr
3121 + offsetof(struct target_rt_sigframe, rs_uc);
3122 env->active_tc.gpr[29] = frame_addr;
3123 env->active_tc.gpr[31] = frame_addr
3124 + offsetof(struct target_rt_sigframe, rs_code);
3125 /* The original kernel code sets CP0_EPC to the handler
3126 * since it returns to userland using eret
3127 * we cannot do this here, and we must set PC directly */
3128 env->active_tc.PC = env->active_tc.gpr[25] = ka->_sa_handler;
3129 mips_set_hflags_isa_mode_from_pc(env);
3130 unlock_user_struct(frame, frame_addr, 1);
3131 return;
3132
3133 give_sigsegv:
3134 unlock_user_struct(frame, frame_addr, 1);
3135 force_sig(TARGET_SIGSEGV/*, current*/);
3136 }
3137
3138 long do_rt_sigreturn(CPUMIPSState *env)
3139 {
3140 struct target_rt_sigframe *frame;
3141 abi_ulong frame_addr;
3142 sigset_t blocked;
3143
3144 frame_addr = env->active_tc.gpr[29];
3145 trace_user_do_rt_sigreturn(env, frame_addr);
3146 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3147 goto badframe;
3148 }
3149
3150 target_to_host_sigset(&blocked, &frame->rs_uc.tuc_sigmask);
3151 set_sigmask(&blocked);
3152
3153 restore_sigcontext(env, &frame->rs_uc.tuc_mcontext);
3154
3155 if (do_sigaltstack(frame_addr +
3156 offsetof(struct target_rt_sigframe, rs_uc.tuc_stack),
3157 0, get_sp_from_cpustate(env)) == -EFAULT)
3158 goto badframe;
3159
3160 env->active_tc.PC = env->CP0_EPC;
3161 mips_set_hflags_isa_mode_from_pc(env);
3162 /* I am not sure this is right, but it seems to work
3163 * maybe a problem with nested signals ? */
3164 env->CP0_EPC = 0;
3165 return -TARGET_QEMU_ESIGRETURN;
3166
3167 badframe:
3168 force_sig(TARGET_SIGSEGV/*, current*/);
3169 return 0;
3170 }
3171
3172 #elif defined(TARGET_SH4)
3173
3174 /*
3175 * code and data structures from linux kernel:
3176 * include/asm-sh/sigcontext.h
3177 * arch/sh/kernel/signal.c
3178 */
3179
3180 struct target_sigcontext {
3181 target_ulong oldmask;
3182
3183 /* CPU registers */
3184 target_ulong sc_gregs[16];
3185 target_ulong sc_pc;
3186 target_ulong sc_pr;
3187 target_ulong sc_sr;
3188 target_ulong sc_gbr;
3189 target_ulong sc_mach;
3190 target_ulong sc_macl;
3191
3192 /* FPU registers */
3193 target_ulong sc_fpregs[16];
3194 target_ulong sc_xfpregs[16];
3195 unsigned int sc_fpscr;
3196 unsigned int sc_fpul;
3197 unsigned int sc_ownedfp;
3198 };
3199
3200 struct target_sigframe
3201 {
3202 struct target_sigcontext sc;
3203 target_ulong extramask[TARGET_NSIG_WORDS-1];
3204 uint16_t retcode[3];
3205 };
3206
3207
3208 struct target_ucontext {
3209 target_ulong tuc_flags;
3210 struct target_ucontext *tuc_link;
3211 target_stack_t tuc_stack;
3212 struct target_sigcontext tuc_mcontext;
3213 target_sigset_t tuc_sigmask; /* mask last for extensibility */
3214 };
3215
3216 struct target_rt_sigframe
3217 {
3218 struct target_siginfo info;
3219 struct target_ucontext uc;
3220 uint16_t retcode[3];
3221 };
3222
3223
3224 #define MOVW(n) (0x9300|((n)-2)) /* Move mem word at PC+n to R3 */
3225 #define TRAP_NOARG 0xc310 /* Syscall w/no args (NR in R3) SH3/4 */
3226
3227 static abi_ulong get_sigframe(struct target_sigaction *ka,
3228 unsigned long sp, size_t frame_size)
3229 {
3230 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags(sp) == 0)) {
3231 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3232 }
3233
3234 return (sp - frame_size) & -8ul;
3235 }
3236
3237 static void setup_sigcontext(struct target_sigcontext *sc,
3238 CPUSH4State *regs, unsigned long mask)
3239 {
3240 int i;
3241
3242 #define COPY(x) __put_user(regs->x, &sc->sc_##x)
3243 COPY(gregs[0]); COPY(gregs[1]);
3244 COPY(gregs[2]); COPY(gregs[3]);
3245 COPY(gregs[4]); COPY(gregs[5]);
3246 COPY(gregs[6]); COPY(gregs[7]);
3247 COPY(gregs[8]); COPY(gregs[9]);
3248 COPY(gregs[10]); COPY(gregs[11]);
3249 COPY(gregs[12]); COPY(gregs[13]);
3250 COPY(gregs[14]); COPY(gregs[15]);
3251 COPY(gbr); COPY(mach);
3252 COPY(macl); COPY(pr);
3253 COPY(sr); COPY(pc);
3254 #undef COPY
3255
3256 for (i=0; i<16; i++) {
3257 __put_user(regs->fregs[i], &sc->sc_fpregs[i]);
3258 }
3259 __put_user(regs->fpscr, &sc->sc_fpscr);
3260 __put_user(regs->fpul, &sc->sc_fpul);
3261
3262 /* non-iBCS2 extensions.. */
3263 __put_user(mask, &sc->oldmask);
3264 }
3265
3266 static void restore_sigcontext(CPUSH4State *regs, struct target_sigcontext *sc)
3267 {
3268 int i;