4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
31 #include <sys/mount.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
38 #include <linux/capability.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
46 #include <sys/times.h>
49 #include <sys/statfs.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include <linux/icmpv6.h>
59 #include <linux/errqueue.h>
60 #include <linux/random.h>
62 #include <sys/timerfd.h>
65 #include <sys/eventfd.h>
68 #include <sys/epoll.h>
71 #include "qemu/xattr.h"
73 #ifdef CONFIG_SENDFILE
74 #include <sys/sendfile.h>
80 #define termios host_termios
81 #define winsize host_winsize
82 #define termio host_termio
83 #define sgttyb host_sgttyb /* same as target */
84 #define tchars host_tchars /* same as target */
85 #define ltchars host_ltchars /* same as target */
87 #include <linux/termios.h>
88 #include <linux/unistd.h>
89 #include <linux/cdrom.h>
90 #include <linux/hdreg.h>
91 #include <linux/soundcard.h>
93 #include <linux/mtio.h>
96 #if defined(CONFIG_FIEMAP)
97 #include <linux/fiemap.h>
100 #if defined(CONFIG_USBFS)
101 #include <linux/usbdevice_fs.h>
102 #include <linux/usb/ch9.h>
104 #include <linux/vt.h>
105 #include <linux/dm-ioctl.h>
106 #include <linux/reboot.h>
107 #include <linux/route.h>
108 #include <linux/filter.h>
109 #include <linux/blkpg.h>
110 #include <netpacket/packet.h>
111 #include <linux/netlink.h>
112 #include <linux/if_alg.h>
113 #include <linux/rtc.h>
114 #include "linux_loop.h"
118 #include "qemu/guest-random.h"
119 #include "user/syscall-trace.h"
120 #include "qapi/error.h"
121 #include "fd-trans.h"
125 #define CLONE_IO 0x80000000 /* Clone io context */
128 /* We can't directly call the host clone syscall, because this will
129 * badly confuse libc (breaking mutexes, for example). So we must
130 * divide clone flags into:
131 * * flag combinations that look like pthread_create()
132 * * flag combinations that look like fork()
133 * * flags we can implement within QEMU itself
134 * * flags we can't support and will return an error for
136 /* For thread creation, all these flags must be present; for
137 * fork, none must be present.
139 #define CLONE_THREAD_FLAGS \
140 (CLONE_VM | CLONE_FS | CLONE_FILES | \
141 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
143 /* These flags are ignored:
144 * CLONE_DETACHED is now ignored by the kernel;
145 * CLONE_IO is just an optimisation hint to the I/O scheduler
147 #define CLONE_IGNORED_FLAGS \
148 (CLONE_DETACHED | CLONE_IO)
150 /* Flags for fork which we can implement within QEMU itself */
151 #define CLONE_OPTIONAL_FORK_FLAGS \
152 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
153 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
155 /* Flags for thread creation which we can implement within QEMU itself */
156 #define CLONE_OPTIONAL_THREAD_FLAGS \
157 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
158 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
160 #define CLONE_INVALID_FORK_FLAGS \
161 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
163 #define CLONE_INVALID_THREAD_FLAGS \
164 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
165 CLONE_IGNORED_FLAGS))
167 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
168 * have almost all been allocated. We cannot support any of
169 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
170 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
171 * The checks against the invalid thread masks above will catch these.
172 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
175 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
176 * once. This exercises the codepaths for restart.
178 //#define DEBUG_ERESTARTSYS
180 //#include <linux/msdos_fs.h>
181 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
182 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
192 #define _syscall0(type,name) \
193 static type name (void) \
195 return syscall(__NR_##name); \
198 #define _syscall1(type,name,type1,arg1) \
199 static type name (type1 arg1) \
201 return syscall(__NR_##name, arg1); \
204 #define _syscall2(type,name,type1,arg1,type2,arg2) \
205 static type name (type1 arg1,type2 arg2) \
207 return syscall(__NR_##name, arg1, arg2); \
210 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
211 static type name (type1 arg1,type2 arg2,type3 arg3) \
213 return syscall(__NR_##name, arg1, arg2, arg3); \
216 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
217 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
219 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
222 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
224 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
226 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
230 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
231 type5,arg5,type6,arg6) \
232 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
235 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
239 #define __NR_sys_uname __NR_uname
240 #define __NR_sys_getcwd1 __NR_getcwd
241 #define __NR_sys_getdents __NR_getdents
242 #define __NR_sys_getdents64 __NR_getdents64
243 #define __NR_sys_getpriority __NR_getpriority
244 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
245 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
246 #define __NR_sys_syslog __NR_syslog
247 #define __NR_sys_futex __NR_futex
248 #define __NR_sys_inotify_init __NR_inotify_init
249 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
250 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
251 #define __NR_sys_statx __NR_statx
253 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
254 #define __NR__llseek __NR_lseek
257 /* Newer kernel ports have llseek() instead of _llseek() */
258 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
259 #define TARGET_NR__llseek TARGET_NR_llseek
262 #define __NR_sys_gettid __NR_gettid
263 _syscall0(int, sys_gettid
)
265 /* For the 64-bit guest on 32-bit host case we must emulate
266 * getdents using getdents64, because otherwise the host
267 * might hand us back more dirent records than we can fit
268 * into the guest buffer after structure format conversion.
269 * Otherwise we emulate getdents with getdents if the host has it.
271 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
272 #define EMULATE_GETDENTS_WITH_GETDENTS
275 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
276 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
278 #if (defined(TARGET_NR_getdents) && \
279 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
280 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
281 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
283 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
284 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
285 loff_t
*, res
, uint
, wh
);
287 _syscall3(int, sys_rt_sigqueueinfo
, pid_t
, pid
, int, sig
, siginfo_t
*, uinfo
)
288 _syscall4(int, sys_rt_tgsigqueueinfo
, pid_t
, pid
, pid_t
, tid
, int, sig
,
290 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
291 #ifdef __NR_exit_group
292 _syscall1(int,exit_group
,int,error_code
)
294 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
295 _syscall1(int,set_tid_address
,int *,tidptr
)
297 #if defined(TARGET_NR_futex) && defined(__NR_futex)
298 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
299 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
301 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
302 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
303 unsigned long *, user_mask_ptr
);
304 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
305 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
306 unsigned long *, user_mask_ptr
);
307 #define __NR_sys_getcpu __NR_getcpu
308 _syscall3(int, sys_getcpu
, unsigned *, cpu
, unsigned *, node
, void *, tcache
);
309 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
311 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
312 struct __user_cap_data_struct
*, data
);
313 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
314 struct __user_cap_data_struct
*, data
);
315 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
316 _syscall2(int, ioprio_get
, int, which
, int, who
)
318 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
319 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
321 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
322 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
325 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
326 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
327 unsigned long, idx1
, unsigned long, idx2
)
331 * It is assumed that struct statx is architecture independent.
333 #if defined(TARGET_NR_statx) && defined(__NR_statx)
334 _syscall5(int, sys_statx
, int, dirfd
, const char *, pathname
, int, flags
,
335 unsigned int, mask
, struct target_statx
*, statxbuf
)
338 static bitmask_transtbl fcntl_flags_tbl
[] = {
339 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
340 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
341 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
342 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
343 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
344 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
345 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
346 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
347 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
348 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
349 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
350 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
351 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
352 #if defined(O_DIRECT)
353 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
355 #if defined(O_NOATIME)
356 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
358 #if defined(O_CLOEXEC)
359 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
362 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
364 #if defined(O_TMPFILE)
365 { TARGET_O_TMPFILE
, TARGET_O_TMPFILE
, O_TMPFILE
, O_TMPFILE
},
367 /* Don't terminate the list prematurely on 64-bit host+guest. */
368 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
369 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
374 static int sys_getcwd1(char *buf
, size_t size
)
376 if (getcwd(buf
, size
) == NULL
) {
377 /* getcwd() sets errno */
380 return strlen(buf
)+1;
383 #ifdef TARGET_NR_utimensat
384 #if defined(__NR_utimensat)
385 #define __NR_sys_utimensat __NR_utimensat
386 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
387 const struct timespec
*,tsp
,int,flags
)
389 static int sys_utimensat(int dirfd
, const char *pathname
,
390 const struct timespec times
[2], int flags
)
396 #endif /* TARGET_NR_utimensat */
398 #ifdef TARGET_NR_renameat2
399 #if defined(__NR_renameat2)
400 #define __NR_sys_renameat2 __NR_renameat2
401 _syscall5(int, sys_renameat2
, int, oldfd
, const char *, old
, int, newfd
,
402 const char *, new, unsigned int, flags
)
404 static int sys_renameat2(int oldfd
, const char *old
,
405 int newfd
, const char *new, int flags
)
408 return renameat(oldfd
, old
, newfd
, new);
414 #endif /* TARGET_NR_renameat2 */
416 #ifdef CONFIG_INOTIFY
417 #include <sys/inotify.h>
419 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
420 static int sys_inotify_init(void)
422 return (inotify_init());
425 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
426 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
428 return (inotify_add_watch(fd
, pathname
, mask
));
431 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
432 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
434 return (inotify_rm_watch(fd
, wd
));
437 #ifdef CONFIG_INOTIFY1
438 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
439 static int sys_inotify_init1(int flags
)
441 return (inotify_init1(flags
));
446 /* Userspace can usually survive runtime without inotify */
447 #undef TARGET_NR_inotify_init
448 #undef TARGET_NR_inotify_init1
449 #undef TARGET_NR_inotify_add_watch
450 #undef TARGET_NR_inotify_rm_watch
451 #endif /* CONFIG_INOTIFY */
453 #if defined(TARGET_NR_prlimit64)
454 #ifndef __NR_prlimit64
455 # define __NR_prlimit64 -1
457 #define __NR_sys_prlimit64 __NR_prlimit64
458 /* The glibc rlimit structure may not be that used by the underlying syscall */
459 struct host_rlimit64
{
463 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
464 const struct host_rlimit64
*, new_limit
,
465 struct host_rlimit64
*, old_limit
)
469 #if defined(TARGET_NR_timer_create)
470 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
471 static timer_t g_posix_timers
[32] = { 0, } ;
473 static inline int next_free_host_timer(void)
476 /* FIXME: Does finding the next free slot require a lock? */
477 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
478 if (g_posix_timers
[k
] == 0) {
479 g_posix_timers
[k
] = (timer_t
) 1;
487 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
489 static inline int regpairs_aligned(void *cpu_env
, int num
)
491 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
493 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
494 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
495 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
496 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
497 * of registers which translates to the same as ARM/MIPS, because we start with
499 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
500 #elif defined(TARGET_SH4)
501 /* SH4 doesn't align register pairs, except for p{read,write}64 */
502 static inline int regpairs_aligned(void *cpu_env
, int num
)
505 case TARGET_NR_pread64
:
506 case TARGET_NR_pwrite64
:
513 #elif defined(TARGET_XTENSA)
514 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
516 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 0; }
519 #define ERRNO_TABLE_SIZE 1200
521 /* target_to_host_errno_table[] is initialized from
522 * host_to_target_errno_table[] in syscall_init(). */
523 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
527 * This list is the union of errno values overridden in asm-<arch>/errno.h
528 * minus the errnos that are not actually generic to all archs.
530 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
531 [EAGAIN
] = TARGET_EAGAIN
,
532 [EIDRM
] = TARGET_EIDRM
,
533 [ECHRNG
] = TARGET_ECHRNG
,
534 [EL2NSYNC
] = TARGET_EL2NSYNC
,
535 [EL3HLT
] = TARGET_EL3HLT
,
536 [EL3RST
] = TARGET_EL3RST
,
537 [ELNRNG
] = TARGET_ELNRNG
,
538 [EUNATCH
] = TARGET_EUNATCH
,
539 [ENOCSI
] = TARGET_ENOCSI
,
540 [EL2HLT
] = TARGET_EL2HLT
,
541 [EDEADLK
] = TARGET_EDEADLK
,
542 [ENOLCK
] = TARGET_ENOLCK
,
543 [EBADE
] = TARGET_EBADE
,
544 [EBADR
] = TARGET_EBADR
,
545 [EXFULL
] = TARGET_EXFULL
,
546 [ENOANO
] = TARGET_ENOANO
,
547 [EBADRQC
] = TARGET_EBADRQC
,
548 [EBADSLT
] = TARGET_EBADSLT
,
549 [EBFONT
] = TARGET_EBFONT
,
550 [ENOSTR
] = TARGET_ENOSTR
,
551 [ENODATA
] = TARGET_ENODATA
,
552 [ETIME
] = TARGET_ETIME
,
553 [ENOSR
] = TARGET_ENOSR
,
554 [ENONET
] = TARGET_ENONET
,
555 [ENOPKG
] = TARGET_ENOPKG
,
556 [EREMOTE
] = TARGET_EREMOTE
,
557 [ENOLINK
] = TARGET_ENOLINK
,
558 [EADV
] = TARGET_EADV
,
559 [ESRMNT
] = TARGET_ESRMNT
,
560 [ECOMM
] = TARGET_ECOMM
,
561 [EPROTO
] = TARGET_EPROTO
,
562 [EDOTDOT
] = TARGET_EDOTDOT
,
563 [EMULTIHOP
] = TARGET_EMULTIHOP
,
564 [EBADMSG
] = TARGET_EBADMSG
,
565 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
566 [EOVERFLOW
] = TARGET_EOVERFLOW
,
567 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
568 [EBADFD
] = TARGET_EBADFD
,
569 [EREMCHG
] = TARGET_EREMCHG
,
570 [ELIBACC
] = TARGET_ELIBACC
,
571 [ELIBBAD
] = TARGET_ELIBBAD
,
572 [ELIBSCN
] = TARGET_ELIBSCN
,
573 [ELIBMAX
] = TARGET_ELIBMAX
,
574 [ELIBEXEC
] = TARGET_ELIBEXEC
,
575 [EILSEQ
] = TARGET_EILSEQ
,
576 [ENOSYS
] = TARGET_ENOSYS
,
577 [ELOOP
] = TARGET_ELOOP
,
578 [ERESTART
] = TARGET_ERESTART
,
579 [ESTRPIPE
] = TARGET_ESTRPIPE
,
580 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
581 [EUSERS
] = TARGET_EUSERS
,
582 [ENOTSOCK
] = TARGET_ENOTSOCK
,
583 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
584 [EMSGSIZE
] = TARGET_EMSGSIZE
,
585 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
586 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
587 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
588 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
589 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
590 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
591 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
592 [EADDRINUSE
] = TARGET_EADDRINUSE
,
593 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
594 [ENETDOWN
] = TARGET_ENETDOWN
,
595 [ENETUNREACH
] = TARGET_ENETUNREACH
,
596 [ENETRESET
] = TARGET_ENETRESET
,
597 [ECONNABORTED
] = TARGET_ECONNABORTED
,
598 [ECONNRESET
] = TARGET_ECONNRESET
,
599 [ENOBUFS
] = TARGET_ENOBUFS
,
600 [EISCONN
] = TARGET_EISCONN
,
601 [ENOTCONN
] = TARGET_ENOTCONN
,
602 [EUCLEAN
] = TARGET_EUCLEAN
,
603 [ENOTNAM
] = TARGET_ENOTNAM
,
604 [ENAVAIL
] = TARGET_ENAVAIL
,
605 [EISNAM
] = TARGET_EISNAM
,
606 [EREMOTEIO
] = TARGET_EREMOTEIO
,
607 [EDQUOT
] = TARGET_EDQUOT
,
608 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
609 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
610 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
611 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
612 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
613 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
614 [EALREADY
] = TARGET_EALREADY
,
615 [EINPROGRESS
] = TARGET_EINPROGRESS
,
616 [ESTALE
] = TARGET_ESTALE
,
617 [ECANCELED
] = TARGET_ECANCELED
,
618 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
619 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
621 [ENOKEY
] = TARGET_ENOKEY
,
624 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
627 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
630 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
633 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
635 #ifdef ENOTRECOVERABLE
636 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
639 [ENOMSG
] = TARGET_ENOMSG
,
642 [ERFKILL
] = TARGET_ERFKILL
,
645 [EHWPOISON
] = TARGET_EHWPOISON
,
649 static inline int host_to_target_errno(int err
)
651 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
652 host_to_target_errno_table
[err
]) {
653 return host_to_target_errno_table
[err
];
658 static inline int target_to_host_errno(int err
)
660 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
661 target_to_host_errno_table
[err
]) {
662 return target_to_host_errno_table
[err
];
667 static inline abi_long
get_errno(abi_long ret
)
670 return -host_to_target_errno(errno
);
675 const char *target_strerror(int err
)
677 if (err
== TARGET_ERESTARTSYS
) {
678 return "To be restarted";
680 if (err
== TARGET_QEMU_ESIGRETURN
) {
681 return "Successful exit from sigreturn";
684 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
687 return strerror(target_to_host_errno(err
));
690 #define safe_syscall0(type, name) \
691 static type safe_##name(void) \
693 return safe_syscall(__NR_##name); \
696 #define safe_syscall1(type, name, type1, arg1) \
697 static type safe_##name(type1 arg1) \
699 return safe_syscall(__NR_##name, arg1); \
702 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
703 static type safe_##name(type1 arg1, type2 arg2) \
705 return safe_syscall(__NR_##name, arg1, arg2); \
708 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
709 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
711 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
714 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
716 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
718 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
721 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
722 type4, arg4, type5, arg5) \
723 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
726 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
729 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
730 type4, arg4, type5, arg5, type6, arg6) \
731 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
732 type5 arg5, type6 arg6) \
734 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
737 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
738 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
739 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
740 int, flags
, mode_t
, mode
)
741 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
742 struct rusage
*, rusage
)
743 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
744 int, options
, struct rusage
*, rusage
)
745 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
746 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
747 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
748 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
749 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
751 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
752 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
754 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
755 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
756 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
757 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
758 safe_syscall2(int, tkill
, int, tid
, int, sig
)
759 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
760 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
761 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
762 safe_syscall5(ssize_t
, preadv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
763 unsigned long, pos_l
, unsigned long, pos_h
)
764 safe_syscall5(ssize_t
, pwritev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
765 unsigned long, pos_l
, unsigned long, pos_h
)
766 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
768 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
769 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
770 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
771 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
772 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
773 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
774 safe_syscall2(int, flock
, int, fd
, int, operation
)
775 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
776 const struct timespec
*, uts
, size_t, sigsetsize
)
777 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
779 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
780 struct timespec
*, rem
)
781 #ifdef TARGET_NR_clock_nanosleep
782 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
783 const struct timespec
*, req
, struct timespec
*, rem
)
786 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
787 void *, ptr
, long, fifth
)
790 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
794 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
795 long, msgtype
, int, flags
)
797 #ifdef __NR_semtimedop
798 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
799 unsigned, nsops
, const struct timespec
*, timeout
)
801 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
802 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
803 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
804 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
805 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
807 /* We do ioctl like this rather than via safe_syscall3 to preserve the
808 * "third argument might be integer or pointer or not present" behaviour of
811 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
812 /* Similarly for fcntl. Note that callers must always:
813 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
814 * use the flock64 struct rather than unsuffixed flock
815 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
818 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
820 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
823 static inline int host_to_target_sock_type(int host_type
)
827 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
829 target_type
= TARGET_SOCK_DGRAM
;
832 target_type
= TARGET_SOCK_STREAM
;
835 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
839 #if defined(SOCK_CLOEXEC)
840 if (host_type
& SOCK_CLOEXEC
) {
841 target_type
|= TARGET_SOCK_CLOEXEC
;
845 #if defined(SOCK_NONBLOCK)
846 if (host_type
& SOCK_NONBLOCK
) {
847 target_type
|= TARGET_SOCK_NONBLOCK
;
854 static abi_ulong target_brk
;
855 static abi_ulong target_original_brk
;
856 static abi_ulong brk_page
;
858 void target_set_brk(abi_ulong new_brk
)
860 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
861 brk_page
= HOST_PAGE_ALIGN(target_brk
);
864 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
865 #define DEBUGF_BRK(message, args...)
867 /* do_brk() must return target values and target errnos. */
868 abi_long
do_brk(abi_ulong new_brk
)
870 abi_long mapped_addr
;
871 abi_ulong new_alloc_size
;
873 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
876 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
879 if (new_brk
< target_original_brk
) {
880 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
885 /* If the new brk is less than the highest page reserved to the
886 * target heap allocation, set it and we're almost done... */
887 if (new_brk
<= brk_page
) {
888 /* Heap contents are initialized to zero, as for anonymous
890 if (new_brk
> target_brk
) {
891 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
893 target_brk
= new_brk
;
894 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
898 /* We need to allocate more memory after the brk... Note that
899 * we don't use MAP_FIXED because that will map over the top of
900 * any existing mapping (like the one with the host libc or qemu
901 * itself); instead we treat "mapped but at wrong address" as
902 * a failure and unmap again.
904 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
905 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
906 PROT_READ
|PROT_WRITE
,
907 MAP_ANON
|MAP_PRIVATE
, 0, 0));
909 if (mapped_addr
== brk_page
) {
910 /* Heap contents are initialized to zero, as for anonymous
911 * mapped pages. Technically the new pages are already
912 * initialized to zero since they *are* anonymous mapped
913 * pages, however we have to take care with the contents that
914 * come from the remaining part of the previous page: it may
915 * contains garbage data due to a previous heap usage (grown
917 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
919 target_brk
= new_brk
;
920 brk_page
= HOST_PAGE_ALIGN(target_brk
);
921 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
924 } else if (mapped_addr
!= -1) {
925 /* Mapped but at wrong address, meaning there wasn't actually
926 * enough space for this brk.
928 target_munmap(mapped_addr
, new_alloc_size
);
930 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
933 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
936 #if defined(TARGET_ALPHA)
937 /* We (partially) emulate OSF/1 on Alpha, which requires we
938 return a proper errno, not an unchanged brk value. */
939 return -TARGET_ENOMEM
;
941 /* For everything else, return the previous break. */
945 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
946 abi_ulong target_fds_addr
,
950 abi_ulong b
, *target_fds
;
952 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
953 if (!(target_fds
= lock_user(VERIFY_READ
,
955 sizeof(abi_ulong
) * nw
,
957 return -TARGET_EFAULT
;
961 for (i
= 0; i
< nw
; i
++) {
962 /* grab the abi_ulong */
963 __get_user(b
, &target_fds
[i
]);
964 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
965 /* check the bit inside the abi_ulong */
972 unlock_user(target_fds
, target_fds_addr
, 0);
977 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
978 abi_ulong target_fds_addr
,
981 if (target_fds_addr
) {
982 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
983 return -TARGET_EFAULT
;
991 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
997 abi_ulong
*target_fds
;
999 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1000 if (!(target_fds
= lock_user(VERIFY_WRITE
,
1002 sizeof(abi_ulong
) * nw
,
1004 return -TARGET_EFAULT
;
1007 for (i
= 0; i
< nw
; i
++) {
1009 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1010 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
1013 __put_user(v
, &target_fds
[i
]);
1016 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
1021 #if defined(__alpha__)
1022 #define HOST_HZ 1024
1027 static inline abi_long
host_to_target_clock_t(long ticks
)
1029 #if HOST_HZ == TARGET_HZ
1032 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
1036 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
1037 const struct rusage
*rusage
)
1039 struct target_rusage
*target_rusage
;
1041 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
1042 return -TARGET_EFAULT
;
1043 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1044 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1045 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1046 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1047 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1048 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1049 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1050 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1051 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1052 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1053 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1054 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1055 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1056 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1057 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1058 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1059 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1060 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1061 unlock_user_struct(target_rusage
, target_addr
, 1);
1066 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1068 abi_ulong target_rlim_swap
;
1071 target_rlim_swap
= tswapal(target_rlim
);
1072 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1073 return RLIM_INFINITY
;
1075 result
= target_rlim_swap
;
1076 if (target_rlim_swap
!= (rlim_t
)result
)
1077 return RLIM_INFINITY
;
1082 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1084 abi_ulong target_rlim_swap
;
1087 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1088 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1090 target_rlim_swap
= rlim
;
1091 result
= tswapal(target_rlim_swap
);
1096 static inline int target_to_host_resource(int code
)
1099 case TARGET_RLIMIT_AS
:
1101 case TARGET_RLIMIT_CORE
:
1103 case TARGET_RLIMIT_CPU
:
1105 case TARGET_RLIMIT_DATA
:
1107 case TARGET_RLIMIT_FSIZE
:
1108 return RLIMIT_FSIZE
;
1109 case TARGET_RLIMIT_LOCKS
:
1110 return RLIMIT_LOCKS
;
1111 case TARGET_RLIMIT_MEMLOCK
:
1112 return RLIMIT_MEMLOCK
;
1113 case TARGET_RLIMIT_MSGQUEUE
:
1114 return RLIMIT_MSGQUEUE
;
1115 case TARGET_RLIMIT_NICE
:
1117 case TARGET_RLIMIT_NOFILE
:
1118 return RLIMIT_NOFILE
;
1119 case TARGET_RLIMIT_NPROC
:
1120 return RLIMIT_NPROC
;
1121 case TARGET_RLIMIT_RSS
:
1123 case TARGET_RLIMIT_RTPRIO
:
1124 return RLIMIT_RTPRIO
;
1125 case TARGET_RLIMIT_SIGPENDING
:
1126 return RLIMIT_SIGPENDING
;
1127 case TARGET_RLIMIT_STACK
:
1128 return RLIMIT_STACK
;
1134 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1135 abi_ulong target_tv_addr
)
1137 struct target_timeval
*target_tv
;
1139 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1140 return -TARGET_EFAULT
;
1143 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1144 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1146 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1151 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1152 const struct timeval
*tv
)
1154 struct target_timeval
*target_tv
;
1156 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1157 return -TARGET_EFAULT
;
1160 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1161 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1163 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1168 static inline abi_long
copy_to_user_timeval64(abi_ulong target_tv_addr
,
1169 const struct timeval
*tv
)
1171 struct target__kernel_sock_timeval
*target_tv
;
1173 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1174 return -TARGET_EFAULT
;
1177 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1178 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1180 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1185 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
1186 abi_ulong target_addr
)
1188 struct target_timespec
*target_ts
;
1190 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1191 return -TARGET_EFAULT
;
1193 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1194 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1195 unlock_user_struct(target_ts
, target_addr
, 0);
1199 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
1200 struct timespec
*host_ts
)
1202 struct target_timespec
*target_ts
;
1204 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1205 return -TARGET_EFAULT
;
1207 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1208 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1209 unlock_user_struct(target_ts
, target_addr
, 1);
1213 static inline abi_long
host_to_target_timespec64(abi_ulong target_addr
,
1214 struct timespec
*host_ts
)
1216 struct target__kernel_timespec
*target_ts
;
1218 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1219 return -TARGET_EFAULT
;
1221 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1222 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1223 unlock_user_struct(target_ts
, target_addr
, 1);
1227 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1228 abi_ulong target_tz_addr
)
1230 struct target_timezone
*target_tz
;
1232 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1233 return -TARGET_EFAULT
;
1236 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1237 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1239 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1244 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1247 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1248 abi_ulong target_mq_attr_addr
)
1250 struct target_mq_attr
*target_mq_attr
;
1252 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1253 target_mq_attr_addr
, 1))
1254 return -TARGET_EFAULT
;
1256 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1257 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1258 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1259 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1261 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1266 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1267 const struct mq_attr
*attr
)
1269 struct target_mq_attr
*target_mq_attr
;
1271 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1272 target_mq_attr_addr
, 0))
1273 return -TARGET_EFAULT
;
1275 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1276 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1277 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1278 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1280 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1286 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1287 /* do_select() must return target values and target errnos. */
1288 static abi_long
do_select(int n
,
1289 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1290 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1292 fd_set rfds
, wfds
, efds
;
1293 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1295 struct timespec ts
, *ts_ptr
;
1298 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1302 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1306 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1311 if (target_tv_addr
) {
1312 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1313 return -TARGET_EFAULT
;
1314 ts
.tv_sec
= tv
.tv_sec
;
1315 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1321 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1324 if (!is_error(ret
)) {
1325 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1326 return -TARGET_EFAULT
;
1327 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1328 return -TARGET_EFAULT
;
1329 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1330 return -TARGET_EFAULT
;
1332 if (target_tv_addr
) {
1333 tv
.tv_sec
= ts
.tv_sec
;
1334 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1335 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1336 return -TARGET_EFAULT
;
1344 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1345 static abi_long
do_old_select(abi_ulong arg1
)
1347 struct target_sel_arg_struct
*sel
;
1348 abi_ulong inp
, outp
, exp
, tvp
;
1351 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1352 return -TARGET_EFAULT
;
1355 nsel
= tswapal(sel
->n
);
1356 inp
= tswapal(sel
->inp
);
1357 outp
= tswapal(sel
->outp
);
1358 exp
= tswapal(sel
->exp
);
1359 tvp
= tswapal(sel
->tvp
);
1361 unlock_user_struct(sel
, arg1
, 0);
1363 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1368 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1371 return pipe2(host_pipe
, flags
);
1377 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1378 int flags
, int is_pipe2
)
1382 ret
= flags ?
do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1385 return get_errno(ret
);
1387 /* Several targets have special calling conventions for the original
1388 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1390 #if defined(TARGET_ALPHA)
1391 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1392 return host_pipe
[0];
1393 #elif defined(TARGET_MIPS)
1394 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1395 return host_pipe
[0];
1396 #elif defined(TARGET_SH4)
1397 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1398 return host_pipe
[0];
1399 #elif defined(TARGET_SPARC)
1400 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1401 return host_pipe
[0];
1405 if (put_user_s32(host_pipe
[0], pipedes
)
1406 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1407 return -TARGET_EFAULT
;
1408 return get_errno(ret
);
1411 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1412 abi_ulong target_addr
,
1415 struct target_ip_mreqn
*target_smreqn
;
1417 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1419 return -TARGET_EFAULT
;
1420 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1421 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1422 if (len
== sizeof(struct target_ip_mreqn
))
1423 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1424 unlock_user(target_smreqn
, target_addr
, 0);
1429 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1430 abi_ulong target_addr
,
1433 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1434 sa_family_t sa_family
;
1435 struct target_sockaddr
*target_saddr
;
1437 if (fd_trans_target_to_host_addr(fd
)) {
1438 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1441 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1443 return -TARGET_EFAULT
;
1445 sa_family
= tswap16(target_saddr
->sa_family
);
1447 /* Oops. The caller might send a incomplete sun_path; sun_path
1448 * must be terminated by \0 (see the manual page), but
1449 * unfortunately it is quite common to specify sockaddr_un
1450 * length as "strlen(x->sun_path)" while it should be
1451 * "strlen(...) + 1". We'll fix that here if needed.
1452 * Linux kernel has a similar feature.
1455 if (sa_family
== AF_UNIX
) {
1456 if (len
< unix_maxlen
&& len
> 0) {
1457 char *cp
= (char*)target_saddr
;
1459 if ( cp
[len
-1] && !cp
[len
] )
1462 if (len
> unix_maxlen
)
1466 memcpy(addr
, target_saddr
, len
);
1467 addr
->sa_family
= sa_family
;
1468 if (sa_family
== AF_NETLINK
) {
1469 struct sockaddr_nl
*nladdr
;
1471 nladdr
= (struct sockaddr_nl
*)addr
;
1472 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1473 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1474 } else if (sa_family
== AF_PACKET
) {
1475 struct target_sockaddr_ll
*lladdr
;
1477 lladdr
= (struct target_sockaddr_ll
*)addr
;
1478 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1479 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1481 unlock_user(target_saddr
, target_addr
, 0);
1486 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1487 struct sockaddr
*addr
,
1490 struct target_sockaddr
*target_saddr
;
1497 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1499 return -TARGET_EFAULT
;
1500 memcpy(target_saddr
, addr
, len
);
1501 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1502 sizeof(target_saddr
->sa_family
)) {
1503 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1505 if (addr
->sa_family
== AF_NETLINK
&&
1506 len
>= sizeof(struct target_sockaddr_nl
)) {
1507 struct target_sockaddr_nl
*target_nl
=
1508 (struct target_sockaddr_nl
*)target_saddr
;
1509 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1510 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1511 } else if (addr
->sa_family
== AF_PACKET
) {
1512 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1513 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1514 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1515 } else if (addr
->sa_family
== AF_INET6
&&
1516 len
>= sizeof(struct target_sockaddr_in6
)) {
1517 struct target_sockaddr_in6
*target_in6
=
1518 (struct target_sockaddr_in6
*)target_saddr
;
1519 target_in6
->sin6_scope_id
= tswap16(target_in6
->sin6_scope_id
);
1521 unlock_user(target_saddr
, target_addr
, len
);
1526 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1527 struct target_msghdr
*target_msgh
)
1529 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1530 abi_long msg_controllen
;
1531 abi_ulong target_cmsg_addr
;
1532 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1533 socklen_t space
= 0;
1535 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1536 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1538 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1539 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1540 target_cmsg_start
= target_cmsg
;
1542 return -TARGET_EFAULT
;
1544 while (cmsg
&& target_cmsg
) {
1545 void *data
= CMSG_DATA(cmsg
);
1546 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1548 int len
= tswapal(target_cmsg
->cmsg_len
)
1549 - sizeof(struct target_cmsghdr
);
1551 space
+= CMSG_SPACE(len
);
1552 if (space
> msgh
->msg_controllen
) {
1553 space
-= CMSG_SPACE(len
);
1554 /* This is a QEMU bug, since we allocated the payload
1555 * area ourselves (unlike overflow in host-to-target
1556 * conversion, which is just the guest giving us a buffer
1557 * that's too small). It can't happen for the payload types
1558 * we currently support; if it becomes an issue in future
1559 * we would need to improve our allocation strategy to
1560 * something more intelligent than "twice the size of the
1561 * target buffer we're reading from".
1563 gemu_log("Host cmsg overflow\n");
1567 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1568 cmsg
->cmsg_level
= SOL_SOCKET
;
1570 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1572 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1573 cmsg
->cmsg_len
= CMSG_LEN(len
);
1575 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1576 int *fd
= (int *)data
;
1577 int *target_fd
= (int *)target_data
;
1578 int i
, numfds
= len
/ sizeof(int);
1580 for (i
= 0; i
< numfds
; i
++) {
1581 __get_user(fd
[i
], target_fd
+ i
);
1583 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1584 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1585 struct ucred
*cred
= (struct ucred
*)data
;
1586 struct target_ucred
*target_cred
=
1587 (struct target_ucred
*)target_data
;
1589 __get_user(cred
->pid
, &target_cred
->pid
);
1590 __get_user(cred
->uid
, &target_cred
->uid
);
1591 __get_user(cred
->gid
, &target_cred
->gid
);
1593 gemu_log("Unsupported ancillary data: %d/%d\n",
1594 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1595 memcpy(data
, target_data
, len
);
1598 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1599 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1602 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1604 msgh
->msg_controllen
= space
;
1608 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1609 struct msghdr
*msgh
)
1611 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1612 abi_long msg_controllen
;
1613 abi_ulong target_cmsg_addr
;
1614 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1615 socklen_t space
= 0;
1617 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1618 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1620 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1621 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1622 target_cmsg_start
= target_cmsg
;
1624 return -TARGET_EFAULT
;
1626 while (cmsg
&& target_cmsg
) {
1627 void *data
= CMSG_DATA(cmsg
);
1628 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1630 int len
= cmsg
->cmsg_len
- sizeof(struct cmsghdr
);
1631 int tgt_len
, tgt_space
;
1633 /* We never copy a half-header but may copy half-data;
1634 * this is Linux's behaviour in put_cmsg(). Note that
1635 * truncation here is a guest problem (which we report
1636 * to the guest via the CTRUNC bit), unlike truncation
1637 * in target_to_host_cmsg, which is a QEMU bug.
1639 if (msg_controllen
< sizeof(struct target_cmsghdr
)) {
1640 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1644 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1645 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1647 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1649 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1651 /* Payload types which need a different size of payload on
1652 * the target must adjust tgt_len here.
1655 switch (cmsg
->cmsg_level
) {
1657 switch (cmsg
->cmsg_type
) {
1659 tgt_len
= sizeof(struct target_timeval
);
1669 if (msg_controllen
< TARGET_CMSG_LEN(tgt_len
)) {
1670 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1671 tgt_len
= msg_controllen
- sizeof(struct target_cmsghdr
);
1674 /* We must now copy-and-convert len bytes of payload
1675 * into tgt_len bytes of destination space. Bear in mind
1676 * that in both source and destination we may be dealing
1677 * with a truncated value!
1679 switch (cmsg
->cmsg_level
) {
1681 switch (cmsg
->cmsg_type
) {
1684 int *fd
= (int *)data
;
1685 int *target_fd
= (int *)target_data
;
1686 int i
, numfds
= tgt_len
/ sizeof(int);
1688 for (i
= 0; i
< numfds
; i
++) {
1689 __put_user(fd
[i
], target_fd
+ i
);
1695 struct timeval
*tv
= (struct timeval
*)data
;
1696 struct target_timeval
*target_tv
=
1697 (struct target_timeval
*)target_data
;
1699 if (len
!= sizeof(struct timeval
) ||
1700 tgt_len
!= sizeof(struct target_timeval
)) {
1704 /* copy struct timeval to target */
1705 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1706 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1709 case SCM_CREDENTIALS
:
1711 struct ucred
*cred
= (struct ucred
*)data
;
1712 struct target_ucred
*target_cred
=
1713 (struct target_ucred
*)target_data
;
1715 __put_user(cred
->pid
, &target_cred
->pid
);
1716 __put_user(cred
->uid
, &target_cred
->uid
);
1717 __put_user(cred
->gid
, &target_cred
->gid
);
1726 switch (cmsg
->cmsg_type
) {
1729 uint32_t *v
= (uint32_t *)data
;
1730 uint32_t *t_int
= (uint32_t *)target_data
;
1732 if (len
!= sizeof(uint32_t) ||
1733 tgt_len
!= sizeof(uint32_t)) {
1736 __put_user(*v
, t_int
);
1742 struct sock_extended_err ee
;
1743 struct sockaddr_in offender
;
1745 struct errhdr_t
*errh
= (struct errhdr_t
*)data
;
1746 struct errhdr_t
*target_errh
=
1747 (struct errhdr_t
*)target_data
;
1749 if (len
!= sizeof(struct errhdr_t
) ||
1750 tgt_len
!= sizeof(struct errhdr_t
)) {
1753 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1754 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1755 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1756 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1757 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1758 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1759 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1760 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1761 (void *) &errh
->offender
, sizeof(errh
->offender
));
1770 switch (cmsg
->cmsg_type
) {
1773 uint32_t *v
= (uint32_t *)data
;
1774 uint32_t *t_int
= (uint32_t *)target_data
;
1776 if (len
!= sizeof(uint32_t) ||
1777 tgt_len
!= sizeof(uint32_t)) {
1780 __put_user(*v
, t_int
);
1786 struct sock_extended_err ee
;
1787 struct sockaddr_in6 offender
;
1789 struct errhdr6_t
*errh
= (struct errhdr6_t
*)data
;
1790 struct errhdr6_t
*target_errh
=
1791 (struct errhdr6_t
*)target_data
;
1793 if (len
!= sizeof(struct errhdr6_t
) ||
1794 tgt_len
!= sizeof(struct errhdr6_t
)) {
1797 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1798 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1799 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1800 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1801 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1802 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1803 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1804 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1805 (void *) &errh
->offender
, sizeof(errh
->offender
));
1815 gemu_log("Unsupported ancillary data: %d/%d\n",
1816 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1817 memcpy(target_data
, data
, MIN(len
, tgt_len
));
1818 if (tgt_len
> len
) {
1819 memset(target_data
+ len
, 0, tgt_len
- len
);
1823 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(tgt_len
));
1824 tgt_space
= TARGET_CMSG_SPACE(tgt_len
);
1825 if (msg_controllen
< tgt_space
) {
1826 tgt_space
= msg_controllen
;
1828 msg_controllen
-= tgt_space
;
1830 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1831 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1834 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1836 target_msgh
->msg_controllen
= tswapal(space
);
1840 /* do_setsockopt() Must return target values and target errnos. */
1841 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
1842 abi_ulong optval_addr
, socklen_t optlen
)
1846 struct ip_mreqn
*ip_mreq
;
1847 struct ip_mreq_source
*ip_mreq_source
;
1851 /* TCP options all take an 'int' value. */
1852 if (optlen
< sizeof(uint32_t))
1853 return -TARGET_EINVAL
;
1855 if (get_user_u32(val
, optval_addr
))
1856 return -TARGET_EFAULT
;
1857 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1864 case IP_ROUTER_ALERT
:
1868 case IP_MTU_DISCOVER
:
1875 case IP_MULTICAST_TTL
:
1876 case IP_MULTICAST_LOOP
:
1878 if (optlen
>= sizeof(uint32_t)) {
1879 if (get_user_u32(val
, optval_addr
))
1880 return -TARGET_EFAULT
;
1881 } else if (optlen
>= 1) {
1882 if (get_user_u8(val
, optval_addr
))
1883 return -TARGET_EFAULT
;
1885 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1887 case IP_ADD_MEMBERSHIP
:
1888 case IP_DROP_MEMBERSHIP
:
1889 if (optlen
< sizeof (struct target_ip_mreq
) ||
1890 optlen
> sizeof (struct target_ip_mreqn
))
1891 return -TARGET_EINVAL
;
1893 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
1894 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
1895 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
1898 case IP_BLOCK_SOURCE
:
1899 case IP_UNBLOCK_SOURCE
:
1900 case IP_ADD_SOURCE_MEMBERSHIP
:
1901 case IP_DROP_SOURCE_MEMBERSHIP
:
1902 if (optlen
!= sizeof (struct target_ip_mreq_source
))
1903 return -TARGET_EINVAL
;
1905 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1906 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
1907 unlock_user (ip_mreq_source
, optval_addr
, 0);
1916 case IPV6_MTU_DISCOVER
:
1919 case IPV6_RECVPKTINFO
:
1920 case IPV6_UNICAST_HOPS
:
1921 case IPV6_MULTICAST_HOPS
:
1922 case IPV6_MULTICAST_LOOP
:
1924 case IPV6_RECVHOPLIMIT
:
1925 case IPV6_2292HOPLIMIT
:
1928 case IPV6_2292PKTINFO
:
1929 case IPV6_RECVTCLASS
:
1930 case IPV6_RECVRTHDR
:
1931 case IPV6_2292RTHDR
:
1932 case IPV6_RECVHOPOPTS
:
1933 case IPV6_2292HOPOPTS
:
1934 case IPV6_RECVDSTOPTS
:
1935 case IPV6_2292DSTOPTS
:
1937 #ifdef IPV6_RECVPATHMTU
1938 case IPV6_RECVPATHMTU
:
1940 #ifdef IPV6_TRANSPARENT
1941 case IPV6_TRANSPARENT
:
1943 #ifdef IPV6_FREEBIND
1946 #ifdef IPV6_RECVORIGDSTADDR
1947 case IPV6_RECVORIGDSTADDR
:
1950 if (optlen
< sizeof(uint32_t)) {
1951 return -TARGET_EINVAL
;
1953 if (get_user_u32(val
, optval_addr
)) {
1954 return -TARGET_EFAULT
;
1956 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1957 &val
, sizeof(val
)));
1961 struct in6_pktinfo pki
;
1963 if (optlen
< sizeof(pki
)) {
1964 return -TARGET_EINVAL
;
1967 if (copy_from_user(&pki
, optval_addr
, sizeof(pki
))) {
1968 return -TARGET_EFAULT
;
1971 pki
.ipi6_ifindex
= tswap32(pki
.ipi6_ifindex
);
1973 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1974 &pki
, sizeof(pki
)));
1977 case IPV6_ADD_MEMBERSHIP
:
1978 case IPV6_DROP_MEMBERSHIP
:
1980 struct ipv6_mreq ipv6mreq
;
1982 if (optlen
< sizeof(ipv6mreq
)) {
1983 return -TARGET_EINVAL
;
1986 if (copy_from_user(&ipv6mreq
, optval_addr
, sizeof(ipv6mreq
))) {
1987 return -TARGET_EFAULT
;
1990 ipv6mreq
.ipv6mr_interface
= tswap32(ipv6mreq
.ipv6mr_interface
);
1992 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1993 &ipv6mreq
, sizeof(ipv6mreq
)));
2004 struct icmp6_filter icmp6f
;
2006 if (optlen
> sizeof(icmp6f
)) {
2007 optlen
= sizeof(icmp6f
);
2010 if (copy_from_user(&icmp6f
, optval_addr
, optlen
)) {
2011 return -TARGET_EFAULT
;
2014 for (val
= 0; val
< 8; val
++) {
2015 icmp6f
.data
[val
] = tswap32(icmp6f
.data
[val
]);
2018 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2030 /* those take an u32 value */
2031 if (optlen
< sizeof(uint32_t)) {
2032 return -TARGET_EINVAL
;
2035 if (get_user_u32(val
, optval_addr
)) {
2036 return -TARGET_EFAULT
;
2038 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2039 &val
, sizeof(val
)));
2046 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2051 char *alg_key
= g_malloc(optlen
);
2054 return -TARGET_ENOMEM
;
2056 if (copy_from_user(alg_key
, optval_addr
, optlen
)) {
2058 return -TARGET_EFAULT
;
2060 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2065 case ALG_SET_AEAD_AUTHSIZE
:
2067 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2076 case TARGET_SOL_SOCKET
:
2078 case TARGET_SO_RCVTIMEO
:
2082 optname
= SO_RCVTIMEO
;
2085 if (optlen
!= sizeof(struct target_timeval
)) {
2086 return -TARGET_EINVAL
;
2089 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2090 return -TARGET_EFAULT
;
2093 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2097 case TARGET_SO_SNDTIMEO
:
2098 optname
= SO_SNDTIMEO
;
2100 case TARGET_SO_ATTACH_FILTER
:
2102 struct target_sock_fprog
*tfprog
;
2103 struct target_sock_filter
*tfilter
;
2104 struct sock_fprog fprog
;
2105 struct sock_filter
*filter
;
2108 if (optlen
!= sizeof(*tfprog
)) {
2109 return -TARGET_EINVAL
;
2111 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2112 return -TARGET_EFAULT
;
2114 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2115 tswapal(tfprog
->filter
), 0)) {
2116 unlock_user_struct(tfprog
, optval_addr
, 1);
2117 return -TARGET_EFAULT
;
2120 fprog
.len
= tswap16(tfprog
->len
);
2121 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2122 if (filter
== NULL
) {
2123 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2124 unlock_user_struct(tfprog
, optval_addr
, 1);
2125 return -TARGET_ENOMEM
;
2127 for (i
= 0; i
< fprog
.len
; i
++) {
2128 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2129 filter
[i
].jt
= tfilter
[i
].jt
;
2130 filter
[i
].jf
= tfilter
[i
].jf
;
2131 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2133 fprog
.filter
= filter
;
2135 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2136 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2139 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2140 unlock_user_struct(tfprog
, optval_addr
, 1);
2143 case TARGET_SO_BINDTODEVICE
:
2145 char *dev_ifname
, *addr_ifname
;
2147 if (optlen
> IFNAMSIZ
- 1) {
2148 optlen
= IFNAMSIZ
- 1;
2150 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2152 return -TARGET_EFAULT
;
2154 optname
= SO_BINDTODEVICE
;
2155 addr_ifname
= alloca(IFNAMSIZ
);
2156 memcpy(addr_ifname
, dev_ifname
, optlen
);
2157 addr_ifname
[optlen
] = 0;
2158 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2159 addr_ifname
, optlen
));
2160 unlock_user (dev_ifname
, optval_addr
, 0);
2163 case TARGET_SO_LINGER
:
2166 struct target_linger
*tlg
;
2168 if (optlen
!= sizeof(struct target_linger
)) {
2169 return -TARGET_EINVAL
;
2171 if (!lock_user_struct(VERIFY_READ
, tlg
, optval_addr
, 1)) {
2172 return -TARGET_EFAULT
;
2174 __get_user(lg
.l_onoff
, &tlg
->l_onoff
);
2175 __get_user(lg
.l_linger
, &tlg
->l_linger
);
2176 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, SO_LINGER
,
2178 unlock_user_struct(tlg
, optval_addr
, 0);
2181 /* Options with 'int' argument. */
2182 case TARGET_SO_DEBUG
:
2185 case TARGET_SO_REUSEADDR
:
2186 optname
= SO_REUSEADDR
;
2189 case TARGET_SO_REUSEPORT
:
2190 optname
= SO_REUSEPORT
;
2193 case TARGET_SO_TYPE
:
2196 case TARGET_SO_ERROR
:
2199 case TARGET_SO_DONTROUTE
:
2200 optname
= SO_DONTROUTE
;
2202 case TARGET_SO_BROADCAST
:
2203 optname
= SO_BROADCAST
;
2205 case TARGET_SO_SNDBUF
:
2206 optname
= SO_SNDBUF
;
2208 case TARGET_SO_SNDBUFFORCE
:
2209 optname
= SO_SNDBUFFORCE
;
2211 case TARGET_SO_RCVBUF
:
2212 optname
= SO_RCVBUF
;
2214 case TARGET_SO_RCVBUFFORCE
:
2215 optname
= SO_RCVBUFFORCE
;
2217 case TARGET_SO_KEEPALIVE
:
2218 optname
= SO_KEEPALIVE
;
2220 case TARGET_SO_OOBINLINE
:
2221 optname
= SO_OOBINLINE
;
2223 case TARGET_SO_NO_CHECK
:
2224 optname
= SO_NO_CHECK
;
2226 case TARGET_SO_PRIORITY
:
2227 optname
= SO_PRIORITY
;
2230 case TARGET_SO_BSDCOMPAT
:
2231 optname
= SO_BSDCOMPAT
;
2234 case TARGET_SO_PASSCRED
:
2235 optname
= SO_PASSCRED
;
2237 case TARGET_SO_PASSSEC
:
2238 optname
= SO_PASSSEC
;
2240 case TARGET_SO_TIMESTAMP
:
2241 optname
= SO_TIMESTAMP
;
2243 case TARGET_SO_RCVLOWAT
:
2244 optname
= SO_RCVLOWAT
;
2249 if (optlen
< sizeof(uint32_t))
2250 return -TARGET_EINVAL
;
2252 if (get_user_u32(val
, optval_addr
))
2253 return -TARGET_EFAULT
;
2254 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2259 case NETLINK_PKTINFO
:
2260 case NETLINK_ADD_MEMBERSHIP
:
2261 case NETLINK_DROP_MEMBERSHIP
:
2262 case NETLINK_BROADCAST_ERROR
:
2263 case NETLINK_NO_ENOBUFS
:
2264 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2265 case NETLINK_LISTEN_ALL_NSID
:
2266 case NETLINK_CAP_ACK
:
2267 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2268 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2269 case NETLINK_EXT_ACK
:
2270 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2271 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2272 case NETLINK_GET_STRICT_CHK
:
2273 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2279 if (optlen
< sizeof(uint32_t)) {
2280 return -TARGET_EINVAL
;
2282 if (get_user_u32(val
, optval_addr
)) {
2283 return -TARGET_EFAULT
;
2285 ret
= get_errno(setsockopt(sockfd
, SOL_NETLINK
, optname
, &val
,
2288 #endif /* SOL_NETLINK */
2291 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
2292 ret
= -TARGET_ENOPROTOOPT
;
2297 /* do_getsockopt() Must return target values and target errnos. */
2298 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2299 abi_ulong optval_addr
, abi_ulong optlen
)
2306 case TARGET_SOL_SOCKET
:
2309 /* These don't just return a single integer */
2310 case TARGET_SO_RCVTIMEO
:
2311 case TARGET_SO_SNDTIMEO
:
2312 case TARGET_SO_PEERNAME
:
2314 case TARGET_SO_PEERCRED
: {
2317 struct target_ucred
*tcr
;
2319 if (get_user_u32(len
, optlen
)) {
2320 return -TARGET_EFAULT
;
2323 return -TARGET_EINVAL
;
2327 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
2335 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
2336 return -TARGET_EFAULT
;
2338 __put_user(cr
.pid
, &tcr
->pid
);
2339 __put_user(cr
.uid
, &tcr
->uid
);
2340 __put_user(cr
.gid
, &tcr
->gid
);
2341 unlock_user_struct(tcr
, optval_addr
, 1);
2342 if (put_user_u32(len
, optlen
)) {
2343 return -TARGET_EFAULT
;
2347 case TARGET_SO_LINGER
:
2351 struct target_linger
*tlg
;
2353 if (get_user_u32(len
, optlen
)) {
2354 return -TARGET_EFAULT
;
2357 return -TARGET_EINVAL
;
2361 ret
= get_errno(getsockopt(sockfd
, level
, SO_LINGER
,
2369 if (!lock_user_struct(VERIFY_WRITE
, tlg
, optval_addr
, 0)) {
2370 return -TARGET_EFAULT
;
2372 __put_user(lg
.l_onoff
, &tlg
->l_onoff
);
2373 __put_user(lg
.l_linger
, &tlg
->l_linger
);
2374 unlock_user_struct(tlg
, optval_addr
, 1);
2375 if (put_user_u32(len
, optlen
)) {
2376 return -TARGET_EFAULT
;
2380 /* Options with 'int' argument. */
2381 case TARGET_SO_DEBUG
:
2384 case TARGET_SO_REUSEADDR
:
2385 optname
= SO_REUSEADDR
;
2388 case TARGET_SO_REUSEPORT
:
2389 optname
= SO_REUSEPORT
;
2392 case TARGET_SO_TYPE
:
2395 case TARGET_SO_ERROR
:
2398 case TARGET_SO_DONTROUTE
:
2399 optname
= SO_DONTROUTE
;
2401 case TARGET_SO_BROADCAST
:
2402 optname
= SO_BROADCAST
;
2404 case TARGET_SO_SNDBUF
:
2405 optname
= SO_SNDBUF
;
2407 case TARGET_SO_RCVBUF
:
2408 optname
= SO_RCVBUF
;
2410 case TARGET_SO_KEEPALIVE
:
2411 optname
= SO_KEEPALIVE
;
2413 case TARGET_SO_OOBINLINE
:
2414 optname
= SO_OOBINLINE
;
2416 case TARGET_SO_NO_CHECK
:
2417 optname
= SO_NO_CHECK
;
2419 case TARGET_SO_PRIORITY
:
2420 optname
= SO_PRIORITY
;
2423 case TARGET_SO_BSDCOMPAT
:
2424 optname
= SO_BSDCOMPAT
;
2427 case TARGET_SO_PASSCRED
:
2428 optname
= SO_PASSCRED
;
2430 case TARGET_SO_TIMESTAMP
:
2431 optname
= SO_TIMESTAMP
;
2433 case TARGET_SO_RCVLOWAT
:
2434 optname
= SO_RCVLOWAT
;
2436 case TARGET_SO_ACCEPTCONN
:
2437 optname
= SO_ACCEPTCONN
;
2444 /* TCP options all take an 'int' value. */
2446 if (get_user_u32(len
, optlen
))
2447 return -TARGET_EFAULT
;
2449 return -TARGET_EINVAL
;
2451 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2454 if (optname
== SO_TYPE
) {
2455 val
= host_to_target_sock_type(val
);
2460 if (put_user_u32(val
, optval_addr
))
2461 return -TARGET_EFAULT
;
2463 if (put_user_u8(val
, optval_addr
))
2464 return -TARGET_EFAULT
;
2466 if (put_user_u32(len
, optlen
))
2467 return -TARGET_EFAULT
;
2474 case IP_ROUTER_ALERT
:
2478 case IP_MTU_DISCOVER
:
2484 case IP_MULTICAST_TTL
:
2485 case IP_MULTICAST_LOOP
:
2486 if (get_user_u32(len
, optlen
))
2487 return -TARGET_EFAULT
;
2489 return -TARGET_EINVAL
;
2491 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2494 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2496 if (put_user_u32(len
, optlen
)
2497 || put_user_u8(val
, optval_addr
))
2498 return -TARGET_EFAULT
;
2500 if (len
> sizeof(int))
2502 if (put_user_u32(len
, optlen
)
2503 || put_user_u32(val
, optval_addr
))
2504 return -TARGET_EFAULT
;
2508 ret
= -TARGET_ENOPROTOOPT
;
2514 case IPV6_MTU_DISCOVER
:
2517 case IPV6_RECVPKTINFO
:
2518 case IPV6_UNICAST_HOPS
:
2519 case IPV6_MULTICAST_HOPS
:
2520 case IPV6_MULTICAST_LOOP
:
2522 case IPV6_RECVHOPLIMIT
:
2523 case IPV6_2292HOPLIMIT
:
2526 case IPV6_2292PKTINFO
:
2527 case IPV6_RECVTCLASS
:
2528 case IPV6_RECVRTHDR
:
2529 case IPV6_2292RTHDR
:
2530 case IPV6_RECVHOPOPTS
:
2531 case IPV6_2292HOPOPTS
:
2532 case IPV6_RECVDSTOPTS
:
2533 case IPV6_2292DSTOPTS
:
2535 #ifdef IPV6_RECVPATHMTU
2536 case IPV6_RECVPATHMTU
:
2538 #ifdef IPV6_TRANSPARENT
2539 case IPV6_TRANSPARENT
:
2541 #ifdef IPV6_FREEBIND
2544 #ifdef IPV6_RECVORIGDSTADDR
2545 case IPV6_RECVORIGDSTADDR
:
2547 if (get_user_u32(len
, optlen
))
2548 return -TARGET_EFAULT
;
2550 return -TARGET_EINVAL
;
2552 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2555 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2557 if (put_user_u32(len
, optlen
)
2558 || put_user_u8(val
, optval_addr
))
2559 return -TARGET_EFAULT
;
2561 if (len
> sizeof(int))
2563 if (put_user_u32(len
, optlen
)
2564 || put_user_u32(val
, optval_addr
))
2565 return -TARGET_EFAULT
;
2569 ret
= -TARGET_ENOPROTOOPT
;
2576 case NETLINK_PKTINFO
:
2577 case NETLINK_BROADCAST_ERROR
:
2578 case NETLINK_NO_ENOBUFS
:
2579 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2580 case NETLINK_LISTEN_ALL_NSID
:
2581 case NETLINK_CAP_ACK
:
2582 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2583 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2584 case NETLINK_EXT_ACK
:
2585 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2586 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2587 case NETLINK_GET_STRICT_CHK
:
2588 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2589 if (get_user_u32(len
, optlen
)) {
2590 return -TARGET_EFAULT
;
2592 if (len
!= sizeof(val
)) {
2593 return -TARGET_EINVAL
;
2596 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2600 if (put_user_u32(lv
, optlen
)
2601 || put_user_u32(val
, optval_addr
)) {
2602 return -TARGET_EFAULT
;
2605 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2606 case NETLINK_LIST_MEMBERSHIPS
:
2610 if (get_user_u32(len
, optlen
)) {
2611 return -TARGET_EFAULT
;
2614 return -TARGET_EINVAL
;
2616 results
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 1);
2618 return -TARGET_EFAULT
;
2621 ret
= get_errno(getsockopt(sockfd
, level
, optname
, results
, &lv
));
2623 unlock_user(results
, optval_addr
, 0);
2626 /* swap host endianess to target endianess. */
2627 for (i
= 0; i
< (len
/ sizeof(uint32_t)); i
++) {
2628 results
[i
] = tswap32(results
[i
]);
2630 if (put_user_u32(lv
, optlen
)) {
2631 return -TARGET_EFAULT
;
2633 unlock_user(results
, optval_addr
, 0);
2636 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2641 #endif /* SOL_NETLINK */
2644 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
2646 ret
= -TARGET_EOPNOTSUPP
;
2652 /* Convert target low/high pair representing file offset into the host
2653 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2654 * as the kernel doesn't handle them either.
2656 static void target_to_host_low_high(abi_ulong tlow
,
2658 unsigned long *hlow
,
2659 unsigned long *hhigh
)
2661 uint64_t off
= tlow
|
2662 ((unsigned long long)thigh
<< TARGET_LONG_BITS
/ 2) <<
2663 TARGET_LONG_BITS
/ 2;
2666 *hhigh
= (off
>> HOST_LONG_BITS
/ 2) >> HOST_LONG_BITS
/ 2;
2669 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
2670 abi_ulong count
, int copy
)
2672 struct target_iovec
*target_vec
;
2674 abi_ulong total_len
, max_len
;
2677 bool bad_address
= false;
2683 if (count
> IOV_MAX
) {
2688 vec
= g_try_new0(struct iovec
, count
);
2694 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2695 count
* sizeof(struct target_iovec
), 1);
2696 if (target_vec
== NULL
) {
2701 /* ??? If host page size > target page size, this will result in a
2702 value larger than what we can actually support. */
2703 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
2706 for (i
= 0; i
< count
; i
++) {
2707 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2708 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2713 } else if (len
== 0) {
2714 /* Zero length pointer is ignored. */
2715 vec
[i
].iov_base
= 0;
2717 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
2718 /* If the first buffer pointer is bad, this is a fault. But
2719 * subsequent bad buffers will result in a partial write; this
2720 * is realized by filling the vector with null pointers and
2722 if (!vec
[i
].iov_base
) {
2733 if (len
> max_len
- total_len
) {
2734 len
= max_len
- total_len
;
2737 vec
[i
].iov_len
= len
;
2741 unlock_user(target_vec
, target_addr
, 0);
2746 if (tswapal(target_vec
[i
].iov_len
) > 0) {
2747 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
2750 unlock_user(target_vec
, target_addr
, 0);
2757 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
2758 abi_ulong count
, int copy
)
2760 struct target_iovec
*target_vec
;
2763 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2764 count
* sizeof(struct target_iovec
), 1);
2766 for (i
= 0; i
< count
; i
++) {
2767 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2768 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2772 unlock_user(vec
[i
].iov_base
, base
, copy ? vec
[i
].iov_len
: 0);
2774 unlock_user(target_vec
, target_addr
, 0);
2780 static inline int target_to_host_sock_type(int *type
)
2783 int target_type
= *type
;
2785 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
2786 case TARGET_SOCK_DGRAM
:
2787 host_type
= SOCK_DGRAM
;
2789 case TARGET_SOCK_STREAM
:
2790 host_type
= SOCK_STREAM
;
2793 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
2796 if (target_type
& TARGET_SOCK_CLOEXEC
) {
2797 #if defined(SOCK_CLOEXEC)
2798 host_type
|= SOCK_CLOEXEC
;
2800 return -TARGET_EINVAL
;
2803 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2804 #if defined(SOCK_NONBLOCK)
2805 host_type
|= SOCK_NONBLOCK
;
2806 #elif !defined(O_NONBLOCK)
2807 return -TARGET_EINVAL
;
2814 /* Try to emulate socket type flags after socket creation. */
2815 static int sock_flags_fixup(int fd
, int target_type
)
2817 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2818 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2819 int flags
= fcntl(fd
, F_GETFL
);
2820 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
2822 return -TARGET_EINVAL
;
2829 /* do_socket() Must return target values and target errnos. */
2830 static abi_long
do_socket(int domain
, int type
, int protocol
)
2832 int target_type
= type
;
2835 ret
= target_to_host_sock_type(&type
);
2840 if (domain
== PF_NETLINK
&& !(
2841 #ifdef CONFIG_RTNETLINK
2842 protocol
== NETLINK_ROUTE
||
2844 protocol
== NETLINK_KOBJECT_UEVENT
||
2845 protocol
== NETLINK_AUDIT
)) {
2846 return -EPFNOSUPPORT
;
2849 if (domain
== AF_PACKET
||
2850 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
2851 protocol
= tswap16(protocol
);
2854 ret
= get_errno(socket(domain
, type
, protocol
));
2856 ret
= sock_flags_fixup(ret
, target_type
);
2857 if (type
== SOCK_PACKET
) {
2858 /* Manage an obsolete case :
2859 * if socket type is SOCK_PACKET, bind by name
2861 fd_trans_register(ret
, &target_packet_trans
);
2862 } else if (domain
== PF_NETLINK
) {
2864 #ifdef CONFIG_RTNETLINK
2866 fd_trans_register(ret
, &target_netlink_route_trans
);
2869 case NETLINK_KOBJECT_UEVENT
:
2870 /* nothing to do: messages are strings */
2873 fd_trans_register(ret
, &target_netlink_audit_trans
);
2876 g_assert_not_reached();
2883 /* do_bind() Must return target values and target errnos. */
2884 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
2890 if ((int)addrlen
< 0) {
2891 return -TARGET_EINVAL
;
2894 addr
= alloca(addrlen
+1);
2896 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
2900 return get_errno(bind(sockfd
, addr
, addrlen
));
2903 /* do_connect() Must return target values and target errnos. */
2904 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
2910 if ((int)addrlen
< 0) {
2911 return -TARGET_EINVAL
;
2914 addr
= alloca(addrlen
+1);
2916 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
2920 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
2923 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
2924 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
2925 int flags
, int send
)
2931 abi_ulong target_vec
;
2933 if (msgp
->msg_name
) {
2934 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
2935 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
2936 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
2937 tswapal(msgp
->msg_name
),
2939 if (ret
== -TARGET_EFAULT
) {
2940 /* For connected sockets msg_name and msg_namelen must
2941 * be ignored, so returning EFAULT immediately is wrong.
2942 * Instead, pass a bad msg_name to the host kernel, and
2943 * let it decide whether to return EFAULT or not.
2945 msg
.msg_name
= (void *)-1;
2950 msg
.msg_name
= NULL
;
2951 msg
.msg_namelen
= 0;
2953 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
2954 msg
.msg_control
= alloca(msg
.msg_controllen
);
2955 memset(msg
.msg_control
, 0, msg
.msg_controllen
);
2957 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
2959 count
= tswapal(msgp
->msg_iovlen
);
2960 target_vec
= tswapal(msgp
->msg_iov
);
2962 if (count
> IOV_MAX
) {
2963 /* sendrcvmsg returns a different errno for this condition than
2964 * readv/writev, so we must catch it here before lock_iovec() does.
2966 ret
= -TARGET_EMSGSIZE
;
2970 vec
= lock_iovec(send ? VERIFY_READ
: VERIFY_WRITE
,
2971 target_vec
, count
, send
);
2973 ret
= -host_to_target_errno(errno
);
2976 msg
.msg_iovlen
= count
;
2980 if (fd_trans_target_to_host_data(fd
)) {
2983 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
2984 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
2985 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
2986 msg
.msg_iov
->iov_len
);
2988 msg
.msg_iov
->iov_base
= host_msg
;
2989 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
2993 ret
= target_to_host_cmsg(&msg
, msgp
);
2995 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
2999 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3000 if (!is_error(ret
)) {
3002 if (fd_trans_host_to_target_data(fd
)) {
3003 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3004 MIN(msg
.msg_iov
->iov_len
, len
));
3006 ret
= host_to_target_cmsg(msgp
, &msg
);
3008 if (!is_error(ret
)) {
3009 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3010 msgp
->msg_flags
= tswap32(msg
.msg_flags
);
3011 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
3012 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3013 msg
.msg_name
, msg
.msg_namelen
);
3025 unlock_iovec(vec
, target_vec
, count
, !send
);
3030 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3031 int flags
, int send
)
3034 struct target_msghdr
*msgp
;
3036 if (!lock_user_struct(send ? VERIFY_READ
: VERIFY_WRITE
,
3040 return -TARGET_EFAULT
;
3042 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3043 unlock_user_struct(msgp
, target_msg
, send ?
0 : 1);
3047 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3048 * so it might not have this *mmsg-specific flag either.
3050 #ifndef MSG_WAITFORONE
3051 #define MSG_WAITFORONE 0x10000
3054 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3055 unsigned int vlen
, unsigned int flags
,
3058 struct target_mmsghdr
*mmsgp
;
3062 if (vlen
> UIO_MAXIOV
) {
3066 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3068 return -TARGET_EFAULT
;
3071 for (i
= 0; i
< vlen
; i
++) {
3072 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3073 if (is_error(ret
)) {
3076 mmsgp
[i
].msg_len
= tswap32(ret
);
3077 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3078 if (flags
& MSG_WAITFORONE
) {
3079 flags
|= MSG_DONTWAIT
;
3083 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3085 /* Return number of datagrams sent if we sent any at all;
3086 * otherwise return the error.
3094 /* do_accept4() Must return target values and target errnos. */
3095 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3096 abi_ulong target_addrlen_addr
, int flags
)
3098 socklen_t addrlen
, ret_addrlen
;
3103 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3105 if (target_addr
== 0) {
3106 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3109 /* linux returns EINVAL if addrlen pointer is invalid */
3110 if (get_user_u32(addrlen
, target_addrlen_addr
))
3111 return -TARGET_EINVAL
;
3113 if ((int)addrlen
< 0) {
3114 return -TARGET_EINVAL
;
3117 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3118 return -TARGET_EINVAL
;
3120 addr
= alloca(addrlen
);
3122 ret_addrlen
= addrlen
;
3123 ret
= get_errno(safe_accept4(fd
, addr
, &ret_addrlen
, host_flags
));
3124 if (!is_error(ret
)) {
3125 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3126 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3127 ret
= -TARGET_EFAULT
;
3133 /* do_getpeername() Must return target values and target errnos. */
3134 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3135 abi_ulong target_addrlen_addr
)
3137 socklen_t addrlen
, ret_addrlen
;
3141 if (get_user_u32(addrlen
, target_addrlen_addr
))
3142 return -TARGET_EFAULT
;
3144 if ((int)addrlen
< 0) {
3145 return -TARGET_EINVAL
;
3148 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3149 return -TARGET_EFAULT
;
3151 addr
= alloca(addrlen
);
3153 ret_addrlen
= addrlen
;
3154 ret
= get_errno(getpeername(fd
, addr
, &ret_addrlen
));
3155 if (!is_error(ret
)) {
3156 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3157 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3158 ret
= -TARGET_EFAULT
;
3164 /* do_getsockname() Must return target values and target errnos. */
3165 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3166 abi_ulong target_addrlen_addr
)
3168 socklen_t addrlen
, ret_addrlen
;
3172 if (get_user_u32(addrlen
, target_addrlen_addr
))
3173 return -TARGET_EFAULT
;
3175 if ((int)addrlen
< 0) {
3176 return -TARGET_EINVAL
;
3179 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3180 return -TARGET_EFAULT
;
3182 addr
= alloca(addrlen
);
3184 ret_addrlen
= addrlen
;
3185 ret
= get_errno(getsockname(fd
, addr
, &ret_addrlen
));
3186 if (!is_error(ret
)) {
3187 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3188 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3189 ret
= -TARGET_EFAULT
;
3195 /* do_socketpair() Must return target values and target errnos. */
3196 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3197 abi_ulong target_tab_addr
)
3202 target_to_host_sock_type(&type
);
3204 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3205 if (!is_error(ret
)) {
3206 if (put_user_s32(tab
[0], target_tab_addr
)
3207 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3208 ret
= -TARGET_EFAULT
;
3213 /* do_sendto() Must return target values and target errnos. */
3214 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3215 abi_ulong target_addr
, socklen_t addrlen
)
3219 void *copy_msg
= NULL
;
3222 if ((int)addrlen
< 0) {
3223 return -TARGET_EINVAL
;
3226 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3228 return -TARGET_EFAULT
;
3229 if (fd_trans_target_to_host_data(fd
)) {
3230 copy_msg
= host_msg
;
3231 host_msg
= g_malloc(len
);
3232 memcpy(host_msg
, copy_msg
, len
);
3233 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3239 addr
= alloca(addrlen
+1);
3240 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3244 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3246 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3251 host_msg
= copy_msg
;
3253 unlock_user(host_msg
, msg
, 0);
3257 /* do_recvfrom() Must return target values and target errnos. */
3258 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3259 abi_ulong target_addr
,
3260 abi_ulong target_addrlen
)
3262 socklen_t addrlen
, ret_addrlen
;
3267 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3269 return -TARGET_EFAULT
;
3271 if (get_user_u32(addrlen
, target_addrlen
)) {
3272 ret
= -TARGET_EFAULT
;
3275 if ((int)addrlen
< 0) {
3276 ret
= -TARGET_EINVAL
;
3279 addr
= alloca(addrlen
);
3280 ret_addrlen
= addrlen
;
3281 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3282 addr
, &ret_addrlen
));
3284 addr
= NULL
; /* To keep compiler quiet. */
3285 addrlen
= 0; /* To keep compiler quiet. */
3286 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3288 if (!is_error(ret
)) {
3289 if (fd_trans_host_to_target_data(fd
)) {
3291 trans
= fd_trans_host_to_target_data(fd
)(host_msg
, MIN(ret
, len
));
3292 if (is_error(trans
)) {
3298 host_to_target_sockaddr(target_addr
, addr
,
3299 MIN(addrlen
, ret_addrlen
));
3300 if (put_user_u32(ret_addrlen
, target_addrlen
)) {
3301 ret
= -TARGET_EFAULT
;
3305 unlock_user(host_msg
, msg
, len
);
3308 unlock_user(host_msg
, msg
, 0);
3313 #ifdef TARGET_NR_socketcall
3314 /* do_socketcall() must return target values and target errnos. */
3315 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3317 static const unsigned nargs
[] = { /* number of arguments per operation */
3318 [TARGET_SYS_SOCKET
] = 3, /* domain, type, protocol */
3319 [TARGET_SYS_BIND
] = 3, /* fd, addr, addrlen */
3320 [TARGET_SYS_CONNECT
] = 3, /* fd, addr, addrlen */
3321 [TARGET_SYS_LISTEN
] = 2, /* fd, backlog */
3322 [TARGET_SYS_ACCEPT
] = 3, /* fd, addr, addrlen */
3323 [TARGET_SYS_GETSOCKNAME
] = 3, /* fd, addr, addrlen */
3324 [TARGET_SYS_GETPEERNAME
] = 3, /* fd, addr, addrlen */
3325 [TARGET_SYS_SOCKETPAIR
] = 4, /* domain, type, protocol, tab */
3326 [TARGET_SYS_SEND
] = 4, /* fd, msg, len, flags */
3327 [TARGET_SYS_RECV
] = 4, /* fd, msg, len, flags */
3328 [TARGET_SYS_SENDTO
] = 6, /* fd, msg, len, flags, addr, addrlen */
3329 [TARGET_SYS_RECVFROM
] = 6, /* fd, msg, len, flags, addr, addrlen */
3330 [TARGET_SYS_SHUTDOWN
] = 2, /* fd, how */
3331 [TARGET_SYS_SETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3332 [TARGET_SYS_GETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3333 [TARGET_SYS_SENDMSG
] = 3, /* fd, msg, flags */
3334 [TARGET_SYS_RECVMSG
] = 3, /* fd, msg, flags */
3335 [TARGET_SYS_ACCEPT4
] = 4, /* fd, addr, addrlen, flags */
3336 [TARGET_SYS_RECVMMSG
] = 4, /* fd, msgvec, vlen, flags */
3337 [TARGET_SYS_SENDMMSG
] = 4, /* fd, msgvec, vlen, flags */
3339 abi_long a
[6]; /* max 6 args */
3342 /* check the range of the first argument num */
3343 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3344 if (num
< 1 || num
> TARGET_SYS_SENDMMSG
) {
3345 return -TARGET_EINVAL
;
3347 /* ensure we have space for args */
3348 if (nargs
[num
] > ARRAY_SIZE(a
)) {
3349 return -TARGET_EINVAL
;
3351 /* collect the arguments in a[] according to nargs[] */
3352 for (i
= 0; i
< nargs
[num
]; ++i
) {
3353 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3354 return -TARGET_EFAULT
;
3357 /* now when we have the args, invoke the appropriate underlying function */
3359 case TARGET_SYS_SOCKET
: /* domain, type, protocol */
3360 return do_socket(a
[0], a
[1], a
[2]);
3361 case TARGET_SYS_BIND
: /* sockfd, addr, addrlen */
3362 return do_bind(a
[0], a
[1], a
[2]);
3363 case TARGET_SYS_CONNECT
: /* sockfd, addr, addrlen */
3364 return do_connect(a
[0], a
[1], a
[2]);
3365 case TARGET_SYS_LISTEN
: /* sockfd, backlog */
3366 return get_errno(listen(a
[0], a
[1]));
3367 case TARGET_SYS_ACCEPT
: /* sockfd, addr, addrlen */
3368 return do_accept4(a
[0], a
[1], a
[2], 0);
3369 case TARGET_SYS_GETSOCKNAME
: /* sockfd, addr, addrlen */
3370 return do_getsockname(a
[0], a
[1], a
[2]);
3371 case TARGET_SYS_GETPEERNAME
: /* sockfd, addr, addrlen */
3372 return do_getpeername(a
[0], a
[1], a
[2]);
3373 case TARGET_SYS_SOCKETPAIR
: /* domain, type, protocol, tab */
3374 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3375 case TARGET_SYS_SEND
: /* sockfd, msg, len, flags */
3376 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3377 case TARGET_SYS_RECV
: /* sockfd, msg, len, flags */
3378 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3379 case TARGET_SYS_SENDTO
: /* sockfd, msg, len, flags, addr, addrlen */
3380 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3381 case TARGET_SYS_RECVFROM
: /* sockfd, msg, len, flags, addr, addrlen */
3382 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3383 case TARGET_SYS_SHUTDOWN
: /* sockfd, how */
3384 return get_errno(shutdown(a
[0], a
[1]));
3385 case TARGET_SYS_SETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3386 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3387 case TARGET_SYS_GETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3388 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3389 case TARGET_SYS_SENDMSG
: /* sockfd, msg, flags */
3390 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3391 case TARGET_SYS_RECVMSG
: /* sockfd, msg, flags */
3392 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3393 case TARGET_SYS_ACCEPT4
: /* sockfd, addr, addrlen, flags */
3394 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3395 case TARGET_SYS_RECVMMSG
: /* sockfd, msgvec, vlen, flags */
3396 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3397 case TARGET_SYS_SENDMMSG
: /* sockfd, msgvec, vlen, flags */
3398 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3400 gemu_log("Unsupported socketcall: %d\n", num
);
3401 return -TARGET_EINVAL
;
3406 #define N_SHM_REGIONS 32
3408 static struct shm_region
{
3412 } shm_regions
[N_SHM_REGIONS
];
3414 #ifndef TARGET_SEMID64_DS
3415 /* asm-generic version of this struct */
3416 struct target_semid64_ds
3418 struct target_ipc_perm sem_perm
;
3419 abi_ulong sem_otime
;
3420 #if TARGET_ABI_BITS == 32
3421 abi_ulong __unused1
;
3423 abi_ulong sem_ctime
;
3424 #if TARGET_ABI_BITS == 32
3425 abi_ulong __unused2
;
3427 abi_ulong sem_nsems
;
3428 abi_ulong __unused3
;
3429 abi_ulong __unused4
;
3433 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
3434 abi_ulong target_addr
)
3436 struct target_ipc_perm
*target_ip
;
3437 struct target_semid64_ds
*target_sd
;
3439 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3440 return -TARGET_EFAULT
;
3441 target_ip
= &(target_sd
->sem_perm
);
3442 host_ip
->__key
= tswap32(target_ip
->__key
);
3443 host_ip
->uid
= tswap32(target_ip
->uid
);
3444 host_ip
->gid
= tswap32(target_ip
->gid
);
3445 host_ip
->cuid
= tswap32(target_ip
->cuid
);
3446 host_ip
->cgid
= tswap32(target_ip
->cgid
);
3447 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3448 host_ip
->mode
= tswap32(target_ip
->mode
);
3450 host_ip
->mode
= tswap16(target_ip
->mode
);
3452 #if defined(TARGET_PPC)
3453 host_ip
->__seq
= tswap32(target_ip
->__seq
);
3455 host_ip
->__seq
= tswap16(target_ip
->__seq
);
3457 unlock_user_struct(target_sd
, target_addr
, 0);
3461 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
3462 struct ipc_perm
*host_ip
)
3464 struct target_ipc_perm
*target_ip
;
3465 struct target_semid64_ds
*target_sd
;
3467 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3468 return -TARGET_EFAULT
;
3469 target_ip
= &(target_sd
->sem_perm
);
3470 target_ip
->__key
= tswap32(host_ip
->__key
);
3471 target_ip
->uid
= tswap32(host_ip
->uid
);
3472 target_ip
->gid
= tswap32(host_ip
->gid
);
3473 target_ip
->cuid
= tswap32(host_ip
->cuid
);
3474 target_ip
->cgid
= tswap32(host_ip
->cgid
);
3475 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3476 target_ip
->mode
= tswap32(host_ip
->mode
);
3478 target_ip
->mode
= tswap16(host_ip
->mode
);
3480 #if defined(TARGET_PPC)
3481 target_ip
->__seq
= tswap32(host_ip
->__seq
);
3483 target_ip
->__seq
= tswap16(host_ip
->__seq
);
3485 unlock_user_struct(target_sd
, target_addr
, 1);
3489 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
3490 abi_ulong target_addr
)
3492 struct target_semid64_ds
*target_sd
;
3494 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3495 return -TARGET_EFAULT
;
3496 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
3497 return -TARGET_EFAULT
;
3498 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
3499 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
3500 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
3501 unlock_user_struct(target_sd
, target_addr
, 0);
3505 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
3506 struct semid_ds
*host_sd
)
3508 struct target_semid64_ds
*target_sd
;
3510 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3511 return -TARGET_EFAULT
;
3512 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
3513 return -TARGET_EFAULT
;
3514 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
3515 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
3516 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
3517 unlock_user_struct(target_sd
, target_addr
, 1);
3521 struct target_seminfo
{
3534 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
3535 struct seminfo
*host_seminfo
)
3537 struct target_seminfo
*target_seminfo
;
3538 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
3539 return -TARGET_EFAULT
;
3540 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
3541 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
3542 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
3543 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
3544 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
3545 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
3546 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
3547 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
3548 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
3549 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
3550 unlock_user_struct(target_seminfo
, target_addr
, 1);
3556 struct semid_ds
*buf
;
3557 unsigned short *array
;
3558 struct seminfo
*__buf
;
3561 union target_semun
{
3568 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
3569 abi_ulong target_addr
)
3572 unsigned short *array
;
3574 struct semid_ds semid_ds
;
3577 semun
.buf
= &semid_ds
;
3579 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3581 return get_errno(ret
);
3583 nsems
= semid_ds
.sem_nsems
;
3585 *host_array
= g_try_new(unsigned short, nsems
);
3587 return -TARGET_ENOMEM
;
3589 array
= lock_user(VERIFY_READ
, target_addr
,
3590 nsems
*sizeof(unsigned short), 1);
3592 g_free(*host_array
);
3593 return -TARGET_EFAULT
;
3596 for(i
=0; i
<nsems
; i
++) {
3597 __get_user((*host_array
)[i
], &array
[i
]);
3599 unlock_user(array
, target_addr
, 0);
3604 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
3605 unsigned short **host_array
)
3608 unsigned short *array
;
3610 struct semid_ds semid_ds
;
3613 semun
.buf
= &semid_ds
;
3615 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3617 return get_errno(ret
);
3619 nsems
= semid_ds
.sem_nsems
;
3621 array
= lock_user(VERIFY_WRITE
, target_addr
,
3622 nsems
*sizeof(unsigned short), 0);
3624 return -TARGET_EFAULT
;
3626 for(i
=0; i
<nsems
; i
++) {
3627 __put_user((*host_array
)[i
], &array
[i
]);
3629 g_free(*host_array
);
3630 unlock_user(array
, target_addr
, 1);
3635 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
3636 abi_ulong target_arg
)
3638 union target_semun target_su
= { .buf
= target_arg
};
3640 struct semid_ds dsarg
;
3641 unsigned short *array
= NULL
;
3642 struct seminfo seminfo
;
3643 abi_long ret
= -TARGET_EINVAL
;
3650 /* In 64 bit cross-endian situations, we will erroneously pick up
3651 * the wrong half of the union for the "val" element. To rectify
3652 * this, the entire 8-byte structure is byteswapped, followed by
3653 * a swap of the 4 byte val field. In other cases, the data is
3654 * already in proper host byte order. */
3655 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
3656 target_su
.buf
= tswapal(target_su
.buf
);
3657 arg
.val
= tswap32(target_su
.val
);
3659 arg
.val
= target_su
.val
;
3661 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3665 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
3669 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3670 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
3677 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
3681 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
3682 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
3688 arg
.__buf
= &seminfo
;
3689 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));