4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
31 #include <sys/mount.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
38 #include <linux/capability.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
46 #include <sys/times.h>
49 #include <sys/statfs.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include <linux/icmpv6.h>
59 #include <linux/errqueue.h>
60 #include <linux/random.h>
62 #include <sys/timerfd.h>
65 #include <sys/eventfd.h>
68 #include <sys/epoll.h>
71 #include "qemu/xattr.h"
73 #ifdef CONFIG_SENDFILE
74 #include <sys/sendfile.h>
80 #define termios host_termios
81 #define winsize host_winsize
82 #define termio host_termio
83 #define sgttyb host_sgttyb /* same as target */
84 #define tchars host_tchars /* same as target */
85 #define ltchars host_ltchars /* same as target */
87 #include <linux/termios.h>
88 #include <linux/unistd.h>
89 #include <linux/cdrom.h>
90 #include <linux/hdreg.h>
91 #include <linux/soundcard.h>
93 #include <linux/mtio.h>
96 #if defined(CONFIG_FIEMAP)
97 #include <linux/fiemap.h>
100 #if defined(CONFIG_USBFS)
101 #include <linux/usbdevice_fs.h>
102 #include <linux/usb/ch9.h>
104 #include <linux/vt.h>
105 #include <linux/dm-ioctl.h>
106 #include <linux/reboot.h>
107 #include <linux/route.h>
108 #include <linux/filter.h>
109 #include <linux/blkpg.h>
110 #include <netpacket/packet.h>
111 #include <linux/netlink.h>
112 #include <linux/if_alg.h>
113 #include <linux/rtc.h>
114 #include <sound/asound.h>
116 #include <linux/btrfs.h>
119 #include <libdrm/drm.h>
120 #include <libdrm/i915_drm.h>
122 #include "linux_loop.h"
126 #include "qemu/guest-random.h"
127 #include "qemu/selfmap.h"
128 #include "user/syscall-trace.h"
129 #include "qapi/error.h"
130 #include "fd-trans.h"
134 #define CLONE_IO 0x80000000 /* Clone io context */
137 /* We can't directly call the host clone syscall, because this will
138 * badly confuse libc (breaking mutexes, for example). So we must
139 * divide clone flags into:
140 * * flag combinations that look like pthread_create()
141 * * flag combinations that look like fork()
142 * * flags we can implement within QEMU itself
143 * * flags we can't support and will return an error for
145 /* For thread creation, all these flags must be present; for
146 * fork, none must be present.
148 #define CLONE_THREAD_FLAGS \
149 (CLONE_VM | CLONE_FS | CLONE_FILES | \
150 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
152 /* These flags are ignored:
153 * CLONE_DETACHED is now ignored by the kernel;
154 * CLONE_IO is just an optimisation hint to the I/O scheduler
156 #define CLONE_IGNORED_FLAGS \
157 (CLONE_DETACHED | CLONE_IO)
159 /* Flags for fork which we can implement within QEMU itself */
160 #define CLONE_OPTIONAL_FORK_FLAGS \
161 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
162 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
164 /* Flags for thread creation which we can implement within QEMU itself */
165 #define CLONE_OPTIONAL_THREAD_FLAGS \
166 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
167 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
169 #define CLONE_INVALID_FORK_FLAGS \
170 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
172 #define CLONE_INVALID_THREAD_FLAGS \
173 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
174 CLONE_IGNORED_FLAGS))
176 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
177 * have almost all been allocated. We cannot support any of
178 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
179 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
180 * The checks against the invalid thread masks above will catch these.
181 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
184 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
185 * once. This exercises the codepaths for restart.
187 //#define DEBUG_ERESTARTSYS
189 //#include <linux/msdos_fs.h>
190 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
191 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
201 #define _syscall0(type,name) \
202 static type name (void) \
204 return syscall(__NR_##name); \
207 #define _syscall1(type,name,type1,arg1) \
208 static type name (type1 arg1) \
210 return syscall(__NR_##name, arg1); \
213 #define _syscall2(type,name,type1,arg1,type2,arg2) \
214 static type name (type1 arg1,type2 arg2) \
216 return syscall(__NR_##name, arg1, arg2); \
219 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
220 static type name (type1 arg1,type2 arg2,type3 arg3) \
222 return syscall(__NR_##name, arg1, arg2, arg3); \
225 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
226 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
228 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
231 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
233 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
235 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
239 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
240 type5,arg5,type6,arg6) \
241 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
244 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
248 #define __NR_sys_uname __NR_uname
249 #define __NR_sys_getcwd1 __NR_getcwd
250 #define __NR_sys_getdents __NR_getdents
251 #define __NR_sys_getdents64 __NR_getdents64
252 #define __NR_sys_getpriority __NR_getpriority
253 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
254 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
255 #define __NR_sys_syslog __NR_syslog
256 #if defined(__NR_futex)
257 # define __NR_sys_futex __NR_futex
259 #if defined(__NR_futex_time64)
260 # define __NR_sys_futex_time64 __NR_futex_time64
262 #define __NR_sys_inotify_init __NR_inotify_init
263 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
264 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
265 #define __NR_sys_statx __NR_statx
267 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
268 #define __NR__llseek __NR_lseek
271 /* Newer kernel ports have llseek() instead of _llseek() */
272 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
273 #define TARGET_NR__llseek TARGET_NR_llseek
276 #define __NR_sys_gettid __NR_gettid
277 _syscall0(int, sys_gettid
)
279 /* For the 64-bit guest on 32-bit host case we must emulate
280 * getdents using getdents64, because otherwise the host
281 * might hand us back more dirent records than we can fit
282 * into the guest buffer after structure format conversion.
283 * Otherwise we emulate getdents with getdents if the host has it.
285 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
286 #define EMULATE_GETDENTS_WITH_GETDENTS
289 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
290 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
292 #if (defined(TARGET_NR_getdents) && \
293 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
294 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
295 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
297 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
298 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
299 loff_t
*, res
, uint
, wh
);
301 _syscall3(int, sys_rt_sigqueueinfo
, pid_t
, pid
, int, sig
, siginfo_t
*, uinfo
)
302 _syscall4(int, sys_rt_tgsigqueueinfo
, pid_t
, pid
, pid_t
, tid
, int, sig
,
304 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
305 #ifdef __NR_exit_group
306 _syscall1(int,exit_group
,int,error_code
)
308 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
309 _syscall1(int,set_tid_address
,int *,tidptr
)
311 #if defined(__NR_futex)
312 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
313 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
315 #if defined(__NR_futex_time64)
316 _syscall6(int,sys_futex_time64
,int *,uaddr
,int,op
,int,val
,
317 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
319 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
320 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
321 unsigned long *, user_mask_ptr
);
322 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
323 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
324 unsigned long *, user_mask_ptr
);
325 #define __NR_sys_getcpu __NR_getcpu
326 _syscall3(int, sys_getcpu
, unsigned *, cpu
, unsigned *, node
, void *, tcache
);
327 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
329 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
330 struct __user_cap_data_struct
*, data
);
331 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
332 struct __user_cap_data_struct
*, data
);
333 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
334 _syscall2(int, ioprio_get
, int, which
, int, who
)
336 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
337 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
339 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
340 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
343 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
344 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
345 unsigned long, idx1
, unsigned long, idx2
)
349 * It is assumed that struct statx is architecture independent.
351 #if defined(TARGET_NR_statx) && defined(__NR_statx)
352 _syscall5(int, sys_statx
, int, dirfd
, const char *, pathname
, int, flags
,
353 unsigned int, mask
, struct target_statx
*, statxbuf
)
355 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
356 _syscall2(int, membarrier
, int, cmd
, int, flags
)
359 static bitmask_transtbl fcntl_flags_tbl
[] = {
360 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
361 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
362 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
363 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
364 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
365 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
366 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
367 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
368 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
369 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
370 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
371 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
372 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
373 #if defined(O_DIRECT)
374 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
376 #if defined(O_NOATIME)
377 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
379 #if defined(O_CLOEXEC)
380 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
383 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
385 #if defined(O_TMPFILE)
386 { TARGET_O_TMPFILE
, TARGET_O_TMPFILE
, O_TMPFILE
, O_TMPFILE
},
388 /* Don't terminate the list prematurely on 64-bit host+guest. */
389 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
390 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
395 _syscall2(int, sys_getcwd1
, char *, buf
, size_t, size
)
397 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
398 #if defined(__NR_utimensat)
399 #define __NR_sys_utimensat __NR_utimensat
400 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
401 const struct timespec
*,tsp
,int,flags
)
403 static int sys_utimensat(int dirfd
, const char *pathname
,
404 const struct timespec times
[2], int flags
)
410 #endif /* TARGET_NR_utimensat */
412 #ifdef TARGET_NR_renameat2
413 #if defined(__NR_renameat2)
414 #define __NR_sys_renameat2 __NR_renameat2
415 _syscall5(int, sys_renameat2
, int, oldfd
, const char *, old
, int, newfd
,
416 const char *, new, unsigned int, flags
)
418 static int sys_renameat2(int oldfd
, const char *old
,
419 int newfd
, const char *new, int flags
)
422 return renameat(oldfd
, old
, newfd
, new);
428 #endif /* TARGET_NR_renameat2 */
430 #ifdef CONFIG_INOTIFY
431 #include <sys/inotify.h>
433 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
434 static int sys_inotify_init(void)
436 return (inotify_init());
439 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
440 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
442 return (inotify_add_watch(fd
, pathname
, mask
));
445 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
446 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
448 return (inotify_rm_watch(fd
, wd
));
451 #ifdef CONFIG_INOTIFY1
452 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
453 static int sys_inotify_init1(int flags
)
455 return (inotify_init1(flags
));
460 /* Userspace can usually survive runtime without inotify */
461 #undef TARGET_NR_inotify_init
462 #undef TARGET_NR_inotify_init1
463 #undef TARGET_NR_inotify_add_watch
464 #undef TARGET_NR_inotify_rm_watch
465 #endif /* CONFIG_INOTIFY */
467 #if defined(TARGET_NR_prlimit64)
468 #ifndef __NR_prlimit64
469 # define __NR_prlimit64 -1
471 #define __NR_sys_prlimit64 __NR_prlimit64
472 /* The glibc rlimit structure may not be that used by the underlying syscall */
473 struct host_rlimit64
{
477 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
478 const struct host_rlimit64
*, new_limit
,
479 struct host_rlimit64
*, old_limit
)
483 #if defined(TARGET_NR_timer_create)
484 /* Maximum of 32 active POSIX timers allowed at any one time. */
485 static timer_t g_posix_timers
[32] = { 0, } ;
487 static inline int next_free_host_timer(void)
490 /* FIXME: Does finding the next free slot require a lock? */
491 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
492 if (g_posix_timers
[k
] == 0) {
493 g_posix_timers
[k
] = (timer_t
) 1;
501 #define ERRNO_TABLE_SIZE 1200
503 /* target_to_host_errno_table[] is initialized from
504 * host_to_target_errno_table[] in syscall_init(). */
505 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
509 * This list is the union of errno values overridden in asm-<arch>/errno.h
510 * minus the errnos that are not actually generic to all archs.
512 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
513 [EAGAIN
] = TARGET_EAGAIN
,
514 [EIDRM
] = TARGET_EIDRM
,
515 [ECHRNG
] = TARGET_ECHRNG
,
516 [EL2NSYNC
] = TARGET_EL2NSYNC
,
517 [EL3HLT
] = TARGET_EL3HLT
,
518 [EL3RST
] = TARGET_EL3RST
,
519 [ELNRNG
] = TARGET_ELNRNG
,
520 [EUNATCH
] = TARGET_EUNATCH
,
521 [ENOCSI
] = TARGET_ENOCSI
,
522 [EL2HLT
] = TARGET_EL2HLT
,
523 [EDEADLK
] = TARGET_EDEADLK
,
524 [ENOLCK
] = TARGET_ENOLCK
,
525 [EBADE
] = TARGET_EBADE
,
526 [EBADR
] = TARGET_EBADR
,
527 [EXFULL
] = TARGET_EXFULL
,
528 [ENOANO
] = TARGET_ENOANO
,
529 [EBADRQC
] = TARGET_EBADRQC
,
530 [EBADSLT
] = TARGET_EBADSLT
,
531 [EBFONT
] = TARGET_EBFONT
,
532 [ENOSTR
] = TARGET_ENOSTR
,
533 [ENODATA
] = TARGET_ENODATA
,
534 [ETIME
] = TARGET_ETIME
,
535 [ENOSR
] = TARGET_ENOSR
,
536 [ENONET
] = TARGET_ENONET
,
537 [ENOPKG
] = TARGET_ENOPKG
,
538 [EREMOTE
] = TARGET_EREMOTE
,
539 [ENOLINK
] = TARGET_ENOLINK
,
540 [EADV
] = TARGET_EADV
,
541 [ESRMNT
] = TARGET_ESRMNT
,
542 [ECOMM
] = TARGET_ECOMM
,
543 [EPROTO
] = TARGET_EPROTO
,
544 [EDOTDOT
] = TARGET_EDOTDOT
,
545 [EMULTIHOP
] = TARGET_EMULTIHOP
,
546 [EBADMSG
] = TARGET_EBADMSG
,
547 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
548 [EOVERFLOW
] = TARGET_EOVERFLOW
,
549 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
550 [EBADFD
] = TARGET_EBADFD
,
551 [EREMCHG
] = TARGET_EREMCHG
,
552 [ELIBACC
] = TARGET_ELIBACC
,
553 [ELIBBAD
] = TARGET_ELIBBAD
,
554 [ELIBSCN
] = TARGET_ELIBSCN
,
555 [ELIBMAX
] = TARGET_ELIBMAX
,
556 [ELIBEXEC
] = TARGET_ELIBEXEC
,
557 [EILSEQ
] = TARGET_EILSEQ
,
558 [ENOSYS
] = TARGET_ENOSYS
,
559 [ELOOP
] = TARGET_ELOOP
,
560 [ERESTART
] = TARGET_ERESTART
,
561 [ESTRPIPE
] = TARGET_ESTRPIPE
,
562 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
563 [EUSERS
] = TARGET_EUSERS
,
564 [ENOTSOCK
] = TARGET_ENOTSOCK
,
565 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
566 [EMSGSIZE
] = TARGET_EMSGSIZE
,
567 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
568 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
569 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
570 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
571 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
572 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
573 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
574 [EADDRINUSE
] = TARGET_EADDRINUSE
,
575 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
576 [ENETDOWN
] = TARGET_ENETDOWN
,
577 [ENETUNREACH
] = TARGET_ENETUNREACH
,
578 [ENETRESET
] = TARGET_ENETRESET
,
579 [ECONNABORTED
] = TARGET_ECONNABORTED
,
580 [ECONNRESET
] = TARGET_ECONNRESET
,
581 [ENOBUFS
] = TARGET_ENOBUFS
,
582 [EISCONN
] = TARGET_EISCONN
,
583 [ENOTCONN
] = TARGET_ENOTCONN
,
584 [EUCLEAN
] = TARGET_EUCLEAN
,
585 [ENOTNAM
] = TARGET_ENOTNAM
,
586 [ENAVAIL
] = TARGET_ENAVAIL
,
587 [EISNAM
] = TARGET_EISNAM
,
588 [EREMOTEIO
] = TARGET_EREMOTEIO
,
589 [EDQUOT
] = TARGET_EDQUOT
,
590 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
591 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
592 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
593 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
594 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
595 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
596 [EALREADY
] = TARGET_EALREADY
,
597 [EINPROGRESS
] = TARGET_EINPROGRESS
,
598 [ESTALE
] = TARGET_ESTALE
,
599 [ECANCELED
] = TARGET_ECANCELED
,
600 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
601 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
603 [ENOKEY
] = TARGET_ENOKEY
,
606 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
609 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
612 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
615 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
617 #ifdef ENOTRECOVERABLE
618 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
621 [ENOMSG
] = TARGET_ENOMSG
,
624 [ERFKILL
] = TARGET_ERFKILL
,
627 [EHWPOISON
] = TARGET_EHWPOISON
,
631 static inline int host_to_target_errno(int err
)
633 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
634 host_to_target_errno_table
[err
]) {
635 return host_to_target_errno_table
[err
];
640 static inline int target_to_host_errno(int err
)
642 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
643 target_to_host_errno_table
[err
]) {
644 return target_to_host_errno_table
[err
];
649 static inline abi_long
get_errno(abi_long ret
)
652 return -host_to_target_errno(errno
);
657 const char *target_strerror(int err
)
659 if (err
== TARGET_ERESTARTSYS
) {
660 return "To be restarted";
662 if (err
== TARGET_QEMU_ESIGRETURN
) {
663 return "Successful exit from sigreturn";
666 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
669 return strerror(target_to_host_errno(err
));
672 #define safe_syscall0(type, name) \
673 static type safe_##name(void) \
675 return safe_syscall(__NR_##name); \
678 #define safe_syscall1(type, name, type1, arg1) \
679 static type safe_##name(type1 arg1) \
681 return safe_syscall(__NR_##name, arg1); \
684 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
685 static type safe_##name(type1 arg1, type2 arg2) \
687 return safe_syscall(__NR_##name, arg1, arg2); \
690 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
691 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
693 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
696 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
698 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
700 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
703 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
704 type4, arg4, type5, arg5) \
705 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
708 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
711 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
712 type4, arg4, type5, arg5, type6, arg6) \
713 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
714 type5 arg5, type6 arg6) \
716 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
719 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
720 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
721 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
722 int, flags
, mode_t
, mode
)
723 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
724 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
725 struct rusage
*, rusage
)
727 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
728 int, options
, struct rusage
*, rusage
)
729 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
730 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
731 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
732 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
733 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
735 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
736 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
737 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
740 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
741 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
743 #if defined(__NR_futex)
744 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
745 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
747 #if defined(__NR_futex_time64)
748 safe_syscall6(int,futex_time64
,int *,uaddr
,int,op
,int,val
, \
749 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
751 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
752 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
753 safe_syscall2(int, tkill
, int, tid
, int, sig
)
754 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
755 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
756 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
757 safe_syscall5(ssize_t
, preadv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
758 unsigned long, pos_l
, unsigned long, pos_h
)
759 safe_syscall5(ssize_t
, pwritev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
760 unsigned long, pos_l
, unsigned long, pos_h
)
761 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
763 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
764 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
765 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
766 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
767 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
768 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
769 safe_syscall2(int, flock
, int, fd
, int, operation
)
770 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
771 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
772 const struct timespec
*, uts
, size_t, sigsetsize
)
774 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
776 #if defined(TARGET_NR_nanosleep)
777 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
778 struct timespec
*, rem
)
780 #if defined(TARGET_NR_clock_nanosleep) || \
781 defined(TARGET_NR_clock_nanosleep_time64)
782 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
783 const struct timespec
*, req
, struct timespec
*, rem
)
787 safe_syscall5(int, ipc
, int, call
, long, first
, long, second
, long, third
,
790 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
791 void *, ptr
, long, fifth
)
795 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
799 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
800 long, msgtype
, int, flags
)
802 #ifdef __NR_semtimedop
803 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
804 unsigned, nsops
, const struct timespec
*, timeout
)
806 #if defined(TARGET_NR_mq_timedsend) || \
807 defined(TARGET_NR_mq_timedsend_time64)
808 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
809 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
811 #if defined(TARGET_NR_mq_timedreceive) || \
812 defined(TARGET_NR_mq_timedreceive_time64)
813 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
814 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
816 /* We do ioctl like this rather than via safe_syscall3 to preserve the
817 * "third argument might be integer or pointer or not present" behaviour of
820 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
821 /* Similarly for fcntl. Note that callers must always:
822 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
823 * use the flock64 struct rather than unsuffixed flock
824 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
827 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
829 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
832 static inline int host_to_target_sock_type(int host_type
)
836 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
838 target_type
= TARGET_SOCK_DGRAM
;
841 target_type
= TARGET_SOCK_STREAM
;
844 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
848 #if defined(SOCK_CLOEXEC)
849 if (host_type
& SOCK_CLOEXEC
) {
850 target_type
|= TARGET_SOCK_CLOEXEC
;
854 #if defined(SOCK_NONBLOCK)
855 if (host_type
& SOCK_NONBLOCK
) {
856 target_type
|= TARGET_SOCK_NONBLOCK
;
863 static abi_ulong target_brk
;
864 static abi_ulong target_original_brk
;
865 static abi_ulong brk_page
;
867 void target_set_brk(abi_ulong new_brk
)
869 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
870 brk_page
= HOST_PAGE_ALIGN(target_brk
);
873 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
874 #define DEBUGF_BRK(message, args...)
876 /* do_brk() must return target values and target errnos. */
877 abi_long
do_brk(abi_ulong new_brk
)
879 abi_long mapped_addr
;
880 abi_ulong new_alloc_size
;
882 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
885 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
888 if (new_brk
< target_original_brk
) {
889 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
894 /* If the new brk is less than the highest page reserved to the
895 * target heap allocation, set it and we're almost done... */
896 if (new_brk
<= brk_page
) {
897 /* Heap contents are initialized to zero, as for anonymous
899 if (new_brk
> target_brk
) {
900 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
902 target_brk
= new_brk
;
903 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
907 /* We need to allocate more memory after the brk... Note that
908 * we don't use MAP_FIXED because that will map over the top of
909 * any existing mapping (like the one with the host libc or qemu
910 * itself); instead we treat "mapped but at wrong address" as
911 * a failure and unmap again.
913 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
914 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
915 PROT_READ
|PROT_WRITE
,
916 MAP_ANON
|MAP_PRIVATE
, 0, 0));
918 if (mapped_addr
== brk_page
) {
919 /* Heap contents are initialized to zero, as for anonymous
920 * mapped pages. Technically the new pages are already
921 * initialized to zero since they *are* anonymous mapped
922 * pages, however we have to take care with the contents that
923 * come from the remaining part of the previous page: it may
924 * contains garbage data due to a previous heap usage (grown
926 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
928 target_brk
= new_brk
;
929 brk_page
= HOST_PAGE_ALIGN(target_brk
);
930 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
933 } else if (mapped_addr
!= -1) {
934 /* Mapped but at wrong address, meaning there wasn't actually
935 * enough space for this brk.
937 target_munmap(mapped_addr
, new_alloc_size
);
939 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
942 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
945 #if defined(TARGET_ALPHA)
946 /* We (partially) emulate OSF/1 on Alpha, which requires we
947 return a proper errno, not an unchanged brk value. */
948 return -TARGET_ENOMEM
;
950 /* For everything else, return the previous break. */
954 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
955 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
956 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
957 abi_ulong target_fds_addr
,
961 abi_ulong b
, *target_fds
;
963 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
964 if (!(target_fds
= lock_user(VERIFY_READ
,
966 sizeof(abi_ulong
) * nw
,
968 return -TARGET_EFAULT
;
972 for (i
= 0; i
< nw
; i
++) {
973 /* grab the abi_ulong */
974 __get_user(b
, &target_fds
[i
]);
975 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
976 /* check the bit inside the abi_ulong */
983 unlock_user(target_fds
, target_fds_addr
, 0);
988 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
989 abi_ulong target_fds_addr
,
992 if (target_fds_addr
) {
993 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
994 return -TARGET_EFAULT
;
1002 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
1008 abi_ulong
*target_fds
;
1010 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1011 if (!(target_fds
= lock_user(VERIFY_WRITE
,
1013 sizeof(abi_ulong
) * nw
,
1015 return -TARGET_EFAULT
;
1018 for (i
= 0; i
< nw
; i
++) {
1020 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1021 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
1024 __put_user(v
, &target_fds
[i
]);
1027 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
1033 #if defined(__alpha__)
1034 #define HOST_HZ 1024
1039 static inline abi_long
host_to_target_clock_t(long ticks
)
1041 #if HOST_HZ == TARGET_HZ
1044 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
1048 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
1049 const struct rusage
*rusage
)
1051 struct target_rusage
*target_rusage
;
1053 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
1054 return -TARGET_EFAULT
;
1055 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1056 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1057 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1058 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1059 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1060 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1061 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1062 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1063 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1064 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1065 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1066 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1067 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1068 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1069 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1070 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1071 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1072 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1073 unlock_user_struct(target_rusage
, target_addr
, 1);
1078 #ifdef TARGET_NR_setrlimit
1079 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1081 abi_ulong target_rlim_swap
;
1084 target_rlim_swap
= tswapal(target_rlim
);
1085 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1086 return RLIM_INFINITY
;
1088 result
= target_rlim_swap
;
1089 if (target_rlim_swap
!= (rlim_t
)result
)
1090 return RLIM_INFINITY
;
1096 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1097 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1099 abi_ulong target_rlim_swap
;
1102 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1103 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1105 target_rlim_swap
= rlim
;
1106 result
= tswapal(target_rlim_swap
);
1112 static inline int target_to_host_resource(int code
)
1115 case TARGET_RLIMIT_AS
:
1117 case TARGET_RLIMIT_CORE
:
1119 case TARGET_RLIMIT_CPU
:
1121 case TARGET_RLIMIT_DATA
:
1123 case TARGET_RLIMIT_FSIZE
:
1124 return RLIMIT_FSIZE
;
1125 case TARGET_RLIMIT_LOCKS
:
1126 return RLIMIT_LOCKS
;
1127 case TARGET_RLIMIT_MEMLOCK
:
1128 return RLIMIT_MEMLOCK
;
1129 case TARGET_RLIMIT_MSGQUEUE
:
1130 return RLIMIT_MSGQUEUE
;
1131 case TARGET_RLIMIT_NICE
:
1133 case TARGET_RLIMIT_NOFILE
:
1134 return RLIMIT_NOFILE
;
1135 case TARGET_RLIMIT_NPROC
:
1136 return RLIMIT_NPROC
;
1137 case TARGET_RLIMIT_RSS
:
1139 case TARGET_RLIMIT_RTPRIO
:
1140 return RLIMIT_RTPRIO
;
1141 case TARGET_RLIMIT_SIGPENDING
:
1142 return RLIMIT_SIGPENDING
;
1143 case TARGET_RLIMIT_STACK
:
1144 return RLIMIT_STACK
;
1150 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1151 abi_ulong target_tv_addr
)
1153 struct target_timeval
*target_tv
;
1155 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1156 return -TARGET_EFAULT
;
1159 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1160 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1162 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1167 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1168 const struct timeval
*tv
)
1170 struct target_timeval
*target_tv
;
1172 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1173 return -TARGET_EFAULT
;
1176 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1177 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1179 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1184 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1185 static inline abi_long
copy_from_user_timeval64(struct timeval
*tv
,
1186 abi_ulong target_tv_addr
)
1188 struct target__kernel_sock_timeval
*target_tv
;
1190 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1191 return -TARGET_EFAULT
;
1194 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1195 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1197 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1203 static inline abi_long
copy_to_user_timeval64(abi_ulong target_tv_addr
,
1204 const struct timeval
*tv
)
1206 struct target__kernel_sock_timeval
*target_tv
;
1208 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1209 return -TARGET_EFAULT
;
1212 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1213 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1215 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1220 #if defined(TARGET_NR_futex) || \
1221 defined(TARGET_NR_rt_sigtimedwait) || \
1222 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1223 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1224 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1225 defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1226 defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1227 defined(TARGET_NR_timer_settime) || \
1228 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1229 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
1230 abi_ulong target_addr
)
1232 struct target_timespec
*target_ts
;
1234 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1235 return -TARGET_EFAULT
;
1237 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1238 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1239 unlock_user_struct(target_ts
, target_addr
, 0);
1244 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1245 defined(TARGET_NR_timer_settime64) || \
1246 defined(TARGET_NR_mq_timedsend_time64) || \
1247 defined(TARGET_NR_mq_timedreceive_time64) || \
1248 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1249 defined(TARGET_NR_clock_nanosleep_time64) || \
1250 defined(TARGET_NR_rt_sigtimedwait_time64) || \
1251 defined(TARGET_NR_utimensat) || \
1252 defined(TARGET_NR_utimensat_time64) || \
1253 defined(TARGET_NR_semtimedop_time64) || \
1254 defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1255 static inline abi_long
target_to_host_timespec64(struct timespec
*host_ts
,
1256 abi_ulong target_addr
)
1258 struct target__kernel_timespec
*target_ts
;
1260 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1261 return -TARGET_EFAULT
;
1263 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1264 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1265 /* in 32bit mode, this drops the padding */
1266 host_ts
->tv_nsec
= (long)(abi_long
)host_ts
->tv_nsec
;
1267 unlock_user_struct(target_ts
, target_addr
, 0);
1272 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
1273 struct timespec
*host_ts
)
1275 struct target_timespec
*target_ts
;
1277 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1278 return -TARGET_EFAULT
;
1280 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1281 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1282 unlock_user_struct(target_ts
, target_addr
, 1);
1286 static inline abi_long
host_to_target_timespec64(abi_ulong target_addr
,
1287 struct timespec
*host_ts
)
1289 struct target__kernel_timespec
*target_ts
;
1291 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1292 return -TARGET_EFAULT
;
1294 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1295 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1296 unlock_user_struct(target_ts
, target_addr
, 1);
1300 #if defined(TARGET_NR_gettimeofday)
1301 static inline abi_long
copy_to_user_timezone(abi_ulong target_tz_addr
,
1302 struct timezone
*tz
)
1304 struct target_timezone
*target_tz
;
1306 if (!lock_user_struct(VERIFY_WRITE
, target_tz
, target_tz_addr
, 1)) {
1307 return -TARGET_EFAULT
;
1310 __put_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1311 __put_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1313 unlock_user_struct(target_tz
, target_tz_addr
, 1);
1319 #if defined(TARGET_NR_settimeofday)
1320 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1321 abi_ulong target_tz_addr
)
1323 struct target_timezone
*target_tz
;
1325 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1326 return -TARGET_EFAULT
;
1329 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1330 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1332 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1338 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1341 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1342 abi_ulong target_mq_attr_addr
)
1344 struct target_mq_attr
*target_mq_attr
;
1346 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1347 target_mq_attr_addr
, 1))
1348 return -TARGET_EFAULT
;
1350 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1351 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1352 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1353 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1355 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1360 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1361 const struct mq_attr
*attr
)
1363 struct target_mq_attr
*target_mq_attr
;
1365 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1366 target_mq_attr_addr
, 0))
1367 return -TARGET_EFAULT
;
1369 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1370 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1371 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1372 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1374 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1380 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1381 /* do_select() must return target values and target errnos. */
1382 static abi_long
do_select(int n
,
1383 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1384 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1386 fd_set rfds
, wfds
, efds
;
1387 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1389 struct timespec ts
, *ts_ptr
;
1392 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1396 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1400 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1405 if (target_tv_addr
) {
1406 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1407 return -TARGET_EFAULT
;
1408 ts
.tv_sec
= tv
.tv_sec
;
1409 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1415 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1418 if (!is_error(ret
)) {
1419 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1420 return -TARGET_EFAULT
;
1421 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1422 return -TARGET_EFAULT
;
1423 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1424 return -TARGET_EFAULT
;
1426 if (target_tv_addr
) {
1427 tv
.tv_sec
= ts
.tv_sec
;
1428 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1429 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1430 return -TARGET_EFAULT
;
1438 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1439 static abi_long
do_old_select(abi_ulong arg1
)
1441 struct target_sel_arg_struct
*sel
;
1442 abi_ulong inp
, outp
, exp
, tvp
;
1445 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1446 return -TARGET_EFAULT
;
1449 nsel
= tswapal(sel
->n
);
1450 inp
= tswapal(sel
->inp
);
1451 outp
= tswapal(sel
->outp
);
1452 exp
= tswapal(sel
->exp
);
1453 tvp
= tswapal(sel
->tvp
);
1455 unlock_user_struct(sel
, arg1
, 0);
1457 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1462 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1463 static abi_long
do_pselect6(abi_long arg1
, abi_long arg2
, abi_long arg3
,
1464 abi_long arg4
, abi_long arg5
, abi_long arg6
,
1467 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
1468 fd_set rfds
, wfds
, efds
;
1469 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1470 struct timespec ts
, *ts_ptr
;
1474 * The 6th arg is actually two args smashed together,
1475 * so we cannot use the C library.
1483 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
1484 target_sigset_t
*target_sigset
;
1492 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1496 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1500 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1506 * This takes a timespec, and not a timeval, so we cannot
1507 * use the do_select() helper ...
1511 if (target_to_host_timespec64(&ts
, ts_addr
)) {
1512 return -TARGET_EFAULT
;
1515 if (target_to_host_timespec(&ts
, ts_addr
)) {
1516 return -TARGET_EFAULT
;
1524 /* Extract the two packed args for the sigset */
1527 sig
.size
= SIGSET_T_SIZE
;
1529 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
1531 return -TARGET_EFAULT
;
1533 arg_sigset
= tswapal(arg7
[0]);
1534 arg_sigsize
= tswapal(arg7
[1]);
1535 unlock_user(arg7
, arg6
, 0);
1539 if (arg_sigsize
!= sizeof(*target_sigset
)) {
1540 /* Like the kernel, we enforce correct size sigsets */
1541 return -TARGET_EINVAL
;
1543 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
1544 sizeof(*target_sigset
), 1);
1545 if (!target_sigset
) {
1546 return -TARGET_EFAULT
;
1548 target_to_host_sigset(&set
, target_sigset
);
1549 unlock_user(target_sigset
, arg_sigset
, 0);
1557 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1560 if (!is_error(ret
)) {
1561 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
)) {
1562 return -TARGET_EFAULT
;
1564 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
)) {
1565 return -TARGET_EFAULT
;
1567 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
)) {
1568 return -TARGET_EFAULT
;
1571 if (ts_addr
&& host_to_target_timespec64(ts_addr
, &ts
)) {
1572 return -TARGET_EFAULT
;
1575 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
)) {
1576 return -TARGET_EFAULT
;
1584 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1585 defined(TARGET_NR_ppoll_time64)
1586 static abi_long
do_ppoll(abi_long arg1
, abi_long arg2
, abi_long arg3
,
1587 abi_long arg4
, abi_long arg5
, bool ppoll
, bool time64
)
1589 struct target_pollfd
*target_pfd
;
1590 unsigned int nfds
= arg2
;
1598 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
1599 return -TARGET_EINVAL
;
1601 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
1602 sizeof(struct target_pollfd
) * nfds
, 1);
1604 return -TARGET_EFAULT
;
1607 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
1608 for (i
= 0; i
< nfds
; i
++) {
1609 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
1610 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
1614 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
1615 target_sigset_t
*target_set
;
1616 sigset_t _set
, *set
= &_set
;
1620 if (target_to_host_timespec64(timeout_ts
, arg3
)) {
1621 unlock_user(target_pfd
, arg1
, 0);
1622 return -TARGET_EFAULT
;
1625 if (target_to_host_timespec(timeout_ts
, arg3
)) {
1626 unlock_user(target_pfd
, arg1
, 0);
1627 return -TARGET_EFAULT
;
1635 if (arg5
!= sizeof(target_sigset_t
)) {
1636 unlock_user(target_pfd
, arg1
, 0);
1637 return -TARGET_EINVAL
;
1640 target_set
= lock_user(VERIFY_READ
, arg4
,
1641 sizeof(target_sigset_t
), 1);
1643 unlock_user(target_pfd
, arg1
, 0);
1644 return -TARGET_EFAULT
;
1646 target_to_host_sigset(set
, target_set
);
1651 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
1652 set
, SIGSET_T_SIZE
));
1654 if (!is_error(ret
) && arg3
) {
1656 if (host_to_target_timespec64(arg3
, timeout_ts
)) {
1657 return -TARGET_EFAULT
;
1660 if (host_to_target_timespec(arg3
, timeout_ts
)) {
1661 return -TARGET_EFAULT
;
1666 unlock_user(target_set
, arg4
, 0);
1669 struct timespec ts
, *pts
;
1672 /* Convert ms to secs, ns */
1673 ts
.tv_sec
= arg3
/ 1000;
1674 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
1677 /* -ve poll() timeout means "infinite" */
1680 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
1683 if (!is_error(ret
)) {
1684 for (i
= 0; i
< nfds
; i
++) {
1685 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
1688 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
1693 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1696 return pipe2(host_pipe
, flags
);
1702 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1703 int flags
, int is_pipe2
)
1707 ret
= flags ?
do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1710 return get_errno(ret
);
1712 /* Several targets have special calling conventions for the original
1713 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1715 #if defined(TARGET_ALPHA)
1716 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1717 return host_pipe
[0];
1718 #elif defined(TARGET_MIPS)
1719 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1720 return host_pipe
[0];
1721 #elif defined(TARGET_SH4)
1722 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1723 return host_pipe
[0];
1724 #elif defined(TARGET_SPARC)
1725 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1726 return host_pipe
[0];
1730 if (put_user_s32(host_pipe
[0], pipedes
)
1731 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1732 return -TARGET_EFAULT
;
1733 return get_errno(ret
);
1736 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1737 abi_ulong target_addr
,
1740 struct target_ip_mreqn
*target_smreqn
;
1742 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1744 return -TARGET_EFAULT
;
1745 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1746 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1747 if (len
== sizeof(struct target_ip_mreqn
))
1748 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1749 unlock_user(target_smreqn
, target_addr
, 0);
1754 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1755 abi_ulong target_addr
,
1758 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1759 sa_family_t sa_family
;
1760 struct target_sockaddr
*target_saddr
;
1762 if (fd_trans_target_to_host_addr(fd
)) {
1763 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1766 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1768 return -TARGET_EFAULT
;
1770 sa_family
= tswap16(target_saddr
->sa_family
);
1772 /* Oops. The caller might send a incomplete sun_path; sun_path
1773 * must be terminated by \0 (see the manual page), but
1774 * unfortunately it is quite common to specify sockaddr_un
1775 * length as "strlen(x->sun_path)" while it should be
1776 * "strlen(...) + 1". We'll fix that here if needed.
1777 * Linux kernel has a similar feature.
1780 if (sa_family
== AF_UNIX
) {
1781 if (len
< unix_maxlen
&& len
> 0) {
1782 char *cp
= (char*)target_saddr
;
1784 if ( cp
[len
-1] && !cp
[len
] )
1787 if (len
> unix_maxlen
)
1791 memcpy(addr
, target_saddr
, len
);
1792 addr
->sa_family
= sa_family
;
1793 if (sa_family
== AF_NETLINK
) {
1794 struct sockaddr_nl
*nladdr
;
1796 nladdr
= (struct sockaddr_nl
*)addr
;
1797 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1798 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1799 } else if (sa_family
== AF_PACKET
) {
1800 struct target_sockaddr_ll
*lladdr
;
1802 lladdr
= (struct target_sockaddr_ll
*)addr
;
1803 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1804 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1806 unlock_user(target_saddr
, target_addr
, 0);
1811 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1812 struct sockaddr
*addr
,
1815 struct target_sockaddr
*target_saddr
;
1822 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1824 return -TARGET_EFAULT
;
1825 memcpy(target_saddr
, addr
, len
);
1826 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1827 sizeof(target_saddr
->sa_family
)) {
1828 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1830 if (addr
->sa_family
== AF_NETLINK
&&
1831 len
>= sizeof(struct target_sockaddr_nl
)) {
1832 struct target_sockaddr_nl
*target_nl
=
1833 (struct target_sockaddr_nl
*)target_saddr
;
1834 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1835 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1836 } else if (addr
->sa_family
== AF_PACKET
) {
1837 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1838 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1839 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1840 } else if (addr
->sa_family
== AF_INET6
&&
1841 len
>= sizeof(struct target_sockaddr_in6
)) {
1842 struct target_sockaddr_in6
*target_in6
=
1843 (struct target_sockaddr_in6
*)target_saddr
;
1844 target_in6
->sin6_scope_id
= tswap16(target_in6
->sin6_scope_id
);
1846 unlock_user(target_saddr
, target_addr
, len
);
1851 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1852 struct target_msghdr
*target_msgh
)
1854 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1855 abi_long msg_controllen
;
1856 abi_ulong target_cmsg_addr
;
1857 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1858 socklen_t space
= 0;
1860 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1861 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1863 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1864 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1865 target_cmsg_start
= target_cmsg
;
1867 return -TARGET_EFAULT
;
1869 while (cmsg
&& target_cmsg
) {
1870 void *data
= CMSG_DATA(cmsg
);
1871 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1873 int len
= tswapal(target_cmsg
->cmsg_len
)
1874 - sizeof(struct target_cmsghdr
);
1876 space
+= CMSG_SPACE(len
);
1877 if (space
> msgh
->msg_controllen
) {
1878 space
-= CMSG_SPACE(len
);
1879 /* This is a QEMU bug, since we allocated the payload
1880 * area ourselves (unlike overflow in host-to-target
1881 * conversion, which is just the guest giving us a buffer
1882 * that's too small). It can't happen for the payload types
1883 * we currently support; if it becomes an issue in future
1884 * we would need to improve our allocation strategy to
1885 * something more intelligent than "twice the size of the
1886 * target buffer we're reading from".
1888 qemu_log_mask(LOG_UNIMP
,
1889 ("Unsupported ancillary data %d/%d: "
1890 "unhandled msg size\n"),
1891 tswap32(target_cmsg
->cmsg_level
),
1892 tswap32(target_cmsg
->cmsg_type
));
1896 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1897 cmsg
->cmsg_level
= SOL_SOCKET
;
1899 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1901 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1902 cmsg
->cmsg_len
= CMSG_LEN(len
);
1904 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1905 int *fd
= (int *)data
;
1906 int *target_fd
= (int *)target_data
;
1907 int i
, numfds
= len
/ sizeof(int);
1909 for (i
= 0; i
< numfds
; i
++) {
1910 __get_user(fd
[i
], target_fd
+ i
);
1912 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1913 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1914 struct ucred
*cred
= (struct ucred
*)data
;
1915 struct target_ucred
*target_cred
=
1916 (struct target_ucred
*)target_data
;
1918 __get_user(cred
->pid
, &target_cred
->pid
);
1919 __get_user(cred
->uid
, &target_cred
->uid
);
1920 __get_user(cred
->gid
, &target_cred
->gid
);
1922 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
1923 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1924 memcpy(data
, target_data
, len
);
1927 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1928 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1931 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1933 msgh
->msg_controllen
= space
;
1937 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1938 struct msghdr
*msgh
)
1940 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1941 abi_long msg_controllen
;
1942 abi_ulong target_cmsg_addr
;
1943 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1944 socklen_t space
= 0;
1946 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1947 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1949 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1950 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1951 target_cmsg_start
= target_cmsg
;
1953 return -TARGET_EFAULT
;
1955 while (cmsg
&& target_cmsg
) {
1956 void *data
= CMSG_DATA(cmsg
);
1957 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1959 int len
= cmsg
->cmsg_len
- sizeof(struct cmsghdr
);
1960 int tgt_len
, tgt_space
;
1962 /* We never copy a half-header but may copy half-data;
1963 * this is Linux's behaviour in put_cmsg(). Note that
1964 * truncation here is a guest problem (which we report
1965 * to the guest via the CTRUNC bit), unlike truncation
1966 * in target_to_host_cmsg, which is a QEMU bug.
1968 if (msg_controllen
< sizeof(struct target_cmsghdr
)) {
1969 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1973 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1974 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1976 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1978 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1980 /* Payload types which need a different size of payload on
1981 * the target must adjust tgt_len here.
1984 switch (cmsg
->cmsg_level
) {
1986 switch (cmsg
->cmsg_type
) {
1988 tgt_len
= sizeof(struct target_timeval
);
1998 if (msg_controllen
< TARGET_CMSG_LEN(tgt_len
)) {
1999 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
2000 tgt_len
= msg_controllen
- sizeof(struct target_cmsghdr
);
2003 /* We must now copy-and-convert len bytes of payload
2004 * into tgt_len bytes of destination space. Bear in mind
2005 * that in both source and destination we may be dealing
2006 * with a truncated value!
2008 switch (cmsg
->cmsg_level
) {
2010 switch (cmsg
->cmsg_type
) {
2013 int *fd
= (int *)data
;
2014 int *target_fd
= (int *)target_data
;
2015 int i
, numfds
= tgt_len
/ sizeof(int);
2017 for (i
= 0; i
< numfds
; i
++) {
2018 __put_user(fd
[i
], target_fd
+ i
);
2024 struct timeval
*tv
= (struct timeval
*)data
;
2025 struct target_timeval
*target_tv
=
2026 (struct target_timeval
*)target_data
;
2028 if (len
!= sizeof(struct timeval
) ||
2029 tgt_len
!= sizeof(struct target_timeval
)) {
2033 /* copy struct timeval to target */
2034 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
2035 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
2038 case SCM_CREDENTIALS
:
2040 struct ucred
*cred
= (struct ucred
*)data
;
2041 struct target_ucred
*target_cred
=
2042 (struct target_ucred
*)target_data
;
2044 __put_user(cred
->pid
, &target_cred
->pid
);
2045 __put_user(cred
->uid
, &target_cred
->uid
);
2046 __put_user(cred
->gid
, &target_cred
->gid
);
2055 switch (cmsg
->cmsg_type
) {
2058 uint32_t *v
= (uint32_t *)data
;
2059 uint32_t *t_int
= (uint32_t *)target_data
;
2061 if (len
!= sizeof(uint32_t) ||
2062 tgt_len
!= sizeof(uint32_t)) {
2065 __put_user(*v
, t_int
);
2071 struct sock_extended_err ee
;
2072 struct sockaddr_in offender
;
2074 struct errhdr_t
*errh
= (struct errhdr_t
*)data
;
2075 struct errhdr_t
*target_errh
=
2076 (struct errhdr_t
*)target_data
;
2078 if (len
!= sizeof(struct errhdr_t
) ||
2079 tgt_len
!= sizeof(struct errhdr_t
)) {
2082 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
2083 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
2084 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
2085 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
2086 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
2087 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
2088 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
2089 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
2090 (void *) &errh
->offender
, sizeof(errh
->offender
));
2099 switch (cmsg
->cmsg_type
) {
2102 uint32_t *v
= (uint32_t *)data
;
2103 uint32_t *t_int
= (uint32_t *)target_data
;
2105 if (len
!= sizeof(uint32_t) ||
2106 tgt_len
!= sizeof(uint32_t)) {
2109 __put_user(*v
, t_int
);
2115 struct sock_extended_err ee
;
2116 struct sockaddr_in6 offender
;
2118 struct errhdr6_t
*errh
= (struct errhdr6_t
*)data
;
2119 struct errhdr6_t
*target_errh
=
2120 (struct errhdr6_t
*)target_data
;
2122 if (len
!= sizeof(struct errhdr6_t
) ||
2123 tgt_len
!= sizeof(struct errhdr6_t
)) {
2126 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
2127 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
2128 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
2129 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
2130 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
2131 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
2132 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
2133 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
2134 (void *) &errh
->offender
, sizeof(errh
->offender
));
2144 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
2145 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
2146 memcpy(target_data
, data
, MIN(len
, tgt_len
));
2147 if (tgt_len
> len
) {
2148 memset(target_data
+ len
, 0, tgt_len
- len
);
2152 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(tgt_len
));
2153 tgt_space
= TARGET_CMSG_SPACE(tgt_len
);
2154 if (msg_controllen
< tgt_space
) {
2155 tgt_space
= msg_controllen
;
2157 msg_controllen
-= tgt_space
;
2159 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
2160 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
2163 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
2165 target_msgh
->msg_controllen
= tswapal(space
);
2169 /* do_setsockopt() Must return target values and target errnos. */
2170 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
2171 abi_ulong optval_addr
, socklen_t optlen
)
2175 struct ip_mreqn
*ip_mreq
;
2176 struct ip_mreq_source
*ip_mreq_source
;
2180 /* TCP options all take an 'int' value. */
2181 if (optlen
< sizeof(uint32_t))
2182 return -TARGET_EINVAL
;
2184 if (get_user_u32(val
, optval_addr
))
2185 return -TARGET_EFAULT
;
2186 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2193 case IP_ROUTER_ALERT
:
2197 case IP_MTU_DISCOVER
:
2204 case IP_MULTICAST_TTL
:
2205 case IP_MULTICAST_LOOP
:
2207 if (optlen
>= sizeof(uint32_t)) {
2208 if (get_user_u32(val
, optval_addr
))
2209 return -TARGET_EFAULT
;
2210 } else if (optlen
>= 1) {
2211 if (get_user_u8(val
, optval_addr
))
2212 return -TARGET_EFAULT
;
2214 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2216 case IP_ADD_MEMBERSHIP
:
2217 case IP_DROP_MEMBERSHIP
:
2218 if (optlen
< sizeof (struct target_ip_mreq
) ||
2219 optlen
> sizeof (struct target_ip_mreqn
))
2220 return -TARGET_EINVAL
;
2222 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
2223 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
2224 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
2227 case IP_BLOCK_SOURCE
:
2228 case IP_UNBLOCK_SOURCE
:
2229 case IP_ADD_SOURCE_MEMBERSHIP
:
2230 case IP_DROP_SOURCE_MEMBERSHIP
:
2231 if (optlen
!= sizeof (struct target_ip_mreq_source
))
2232 return -TARGET_EINVAL
;
2234 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2235 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
2236 unlock_user (ip_mreq_source
, optval_addr
, 0);
2245 case IPV6_MTU_DISCOVER
:
2248 case IPV6_RECVPKTINFO
:
2249 case IPV6_UNICAST_HOPS
:
2250 case IPV6_MULTICAST_HOPS
:
2251 case IPV6_MULTICAST_LOOP
:
2253 case IPV6_RECVHOPLIMIT
:
2254 case IPV6_2292HOPLIMIT
:
2257 case IPV6_2292PKTINFO
:
2258 case IPV6_RECVTCLASS
:
2259 case IPV6_RECVRTHDR
:
2260 case IPV6_2292RTHDR
:
2261 case IPV6_RECVHOPOPTS
:
2262 case IPV6_2292HOPOPTS
:
2263 case IPV6_RECVDSTOPTS
:
2264 case IPV6_2292DSTOPTS
:
2266 #ifdef IPV6_RECVPATHMTU
2267 case IPV6_RECVPATHMTU
:
2269 #ifdef IPV6_TRANSPARENT
2270 case IPV6_TRANSPARENT
:
2272 #ifdef IPV6_FREEBIND
2275 #ifdef IPV6_RECVORIGDSTADDR
2276 case IPV6_RECVORIGDSTADDR
:
2279 if (optlen
< sizeof(uint32_t)) {
2280 return -TARGET_EINVAL
;
2282 if (get_user_u32(val
, optval_addr
)) {
2283 return -TARGET_EFAULT
;
2285 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2286 &val
, sizeof(val
)));
2290 struct in6_pktinfo pki
;
2292 if (optlen
< sizeof(pki
)) {
2293 return -TARGET_EINVAL
;
2296 if (copy_from_user(&pki
, optval_addr
, sizeof(pki
))) {
2297 return -TARGET_EFAULT
;
2300 pki
.ipi6_ifindex
= tswap32(pki
.ipi6_ifindex
);
2302 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2303 &pki
, sizeof(pki
)));
2306 case IPV6_ADD_MEMBERSHIP
:
2307 case IPV6_DROP_MEMBERSHIP
:
2309 struct ipv6_mreq ipv6mreq
;
2311 if (optlen
< sizeof(ipv6mreq
)) {
2312 return -TARGET_EINVAL
;
2315 if (copy_from_user(&ipv6mreq
, optval_addr
, sizeof(ipv6mreq
))) {
2316 return -TARGET_EFAULT
;
2319 ipv6mreq
.ipv6mr_interface
= tswap32(ipv6mreq
.ipv6mr_interface
);
2321 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2322 &ipv6mreq
, sizeof(ipv6mreq
)));
2333 struct icmp6_filter icmp6f
;
2335 if (optlen
> sizeof(icmp6f
)) {
2336 optlen
= sizeof(icmp6f
);
2339 if (copy_from_user(&icmp6f
, optval_addr
, optlen
)) {
2340 return -TARGET_EFAULT
;
2343 for (val
= 0; val
< 8; val
++) {
2344 icmp6f
.data
[val
] = tswap32(icmp6f
.data
[val
]);
2347 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2359 /* those take an u32 value */
2360 if (optlen
< sizeof(uint32_t)) {
2361 return -TARGET_EINVAL
;
2364 if (get_user_u32(val
, optval_addr
)) {
2365 return -TARGET_EFAULT
;
2367 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2368 &val
, sizeof(val
)));
2375 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2380 char *alg_key
= g_malloc(optlen
);
2383 return -TARGET_ENOMEM
;
2385 if (copy_from_user(alg_key
, optval_addr
, optlen
)) {
2387 return -TARGET_EFAULT
;
2389 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2394 case ALG_SET_AEAD_AUTHSIZE
:
2396 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2405 case TARGET_SOL_SOCKET
:
2407 case TARGET_SO_RCVTIMEO
:
2411 optname
= SO_RCVTIMEO
;
2414 if (optlen
!= sizeof(struct target_timeval
)) {
2415 return -TARGET_EINVAL
;
2418 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2419 return -TARGET_EFAULT
;
2422 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2426 case TARGET_SO_SNDTIMEO
:
2427 optname
= SO_SNDTIMEO
;
2429 case TARGET_SO_ATTACH_FILTER
:
2431 struct target_sock_fprog
*tfprog
;
2432 struct target_sock_filter
*tfilter
;
2433 struct sock_fprog fprog
;
2434 struct sock_filter
*filter
;
2437 if (optlen
!= sizeof(*tfprog
)) {
2438 return -TARGET_EINVAL
;
2440 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2441 return -TARGET_EFAULT
;
2443 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2444 tswapal(tfprog
->filter
), 0)) {
2445 unlock_user_struct(tfprog
, optval_addr
, 1);
2446 return -TARGET_EFAULT
;
2449 fprog
.len
= tswap16(tfprog
->len
);
2450 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2451 if (filter
== NULL
) {
2452 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2453 unlock_user_struct(tfprog
, optval_addr
, 1);
2454 return -TARGET_ENOMEM
;
2456 for (i
= 0; i
< fprog
.len
; i
++) {
2457 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2458 filter
[i
].jt
= tfilter
[i
].jt
;
2459 filter
[i
].jf
= tfilter
[i
].jf
;
2460 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2462 fprog
.filter
= filter
;
2464 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2465 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2468 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2469 unlock_user_struct(tfprog
, optval_addr
, 1);
2472 case TARGET_SO_BINDTODEVICE
:
2474 char *dev_ifname
, *addr_ifname
;
2476 if (optlen
> IFNAMSIZ
- 1) {
2477 optlen
= IFNAMSIZ
- 1;
2479 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2481 return -TARGET_EFAULT
;
2483 optname
= SO_BINDTODEVICE
;
2484 addr_ifname
= alloca(IFNAMSIZ
);
2485 memcpy(addr_ifname
, dev_ifname
, optlen
);
2486 addr_ifname
[optlen
] = 0;
2487 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2488 addr_ifname
, optlen
));
2489 unlock_user (dev_ifname
, optval_addr
, 0);
2492 case TARGET_SO_LINGER
:
2495 struct target_linger
*tlg
;
2497 if (optlen
!= sizeof(struct target_linger
)) {
2498 return -TARGET_EINVAL
;
2500 if (!lock_user_struct(VERIFY_READ
, tlg
, optval_addr
, 1)) {
2501 return -TARGET_EFAULT
;
2503 __get_user(lg
.l_onoff
, &tlg
->l_onoff
);
2504 __get_user(lg
.l_linger
, &tlg
->l_linger
);
2505 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, SO_LINGER
,
2507 unlock_user_struct(tlg
, optval_addr
, 0);
2510 /* Options with 'int' argument. */
2511 case TARGET_SO_DEBUG
:
2514 case TARGET_SO_REUSEADDR
:
2515 optname
= SO_REUSEADDR
;
2518 case TARGET_SO_REUSEPORT
:
2519 optname
= SO_REUSEPORT
;
2522 case TARGET_SO_TYPE
:
2525 case TARGET_SO_ERROR
:
2528 case TARGET_SO_DONTROUTE
:
2529 optname
= SO_DONTROUTE
;
2531 case TARGET_SO_BROADCAST
:
2532 optname
= SO_BROADCAST
;
2534 case TARGET_SO_SNDBUF
:
2535 optname
= SO_SNDBUF
;
2537 case TARGET_SO_SNDBUFFORCE
:
2538 optname
= SO_SNDBUFFORCE
;
2540 case TARGET_SO_RCVBUF
:
2541 optname
= SO_RCVBUF
;
2543 case TARGET_SO_RCVBUFFORCE
:
2544 optname
= SO_RCVBUFFORCE
;
2546 case TARGET_SO_KEEPALIVE
:
2547 optname
= SO_KEEPALIVE
;
2549 case TARGET_SO_OOBINLINE
:
2550 optname
= SO_OOBINLINE
;
2552 case TARGET_SO_NO_CHECK
:
2553 optname
= SO_NO_CHECK
;
2555 case TARGET_SO_PRIORITY
:
2556 optname
= SO_PRIORITY
;
2559 case TARGET_SO_BSDCOMPAT
:
2560 optname
= SO_BSDCOMPAT
;
2563 case TARGET_SO_PASSCRED
:
2564 optname
= SO_PASSCRED
;
2566 case TARGET_SO_PASSSEC
:
2567 optname
= SO_PASSSEC
;
2569 case TARGET_SO_TIMESTAMP
:
2570 optname
= SO_TIMESTAMP
;
2572 case TARGET_SO_RCVLOWAT
:
2573 optname
= SO_RCVLOWAT
;
2578 if (optlen
< sizeof(uint32_t))
2579 return -TARGET_EINVAL
;
2581 if (get_user_u32(val
, optval_addr
))
2582 return -TARGET_EFAULT
;
2583 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2588 case NETLINK_PKTINFO
:
2589 case NETLINK_ADD_MEMBERSHIP
:
2590 case NETLINK_DROP_MEMBERSHIP
:
2591 case NETLINK_BROADCAST_ERROR
:
2592 case NETLINK_NO_ENOBUFS
:
2593 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2594 case NETLINK_LISTEN_ALL_NSID
:
2595 case NETLINK_CAP_ACK
:
2596 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2597 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2598 case NETLINK_EXT_ACK
:
2599 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2600 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2601 case NETLINK_GET_STRICT_CHK
:
2602 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2608 if (optlen
< sizeof(uint32_t)) {
2609 return -TARGET_EINVAL
;
2611 if (get_user_u32(val
, optval_addr
)) {
2612 return -TARGET_EFAULT
;
2614 ret
= get_errno(setsockopt(sockfd
, SOL_NETLINK
, optname
, &val
,
2617 #endif /* SOL_NETLINK */
2620 qemu_log_mask(LOG_UNIMP
, "Unsupported setsockopt level=%d optname=%d\n",
2622 ret
= -TARGET_ENOPROTOOPT
;
2627 /* do_getsockopt() Must return target values and target errnos. */
2628 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2629 abi_ulong optval_addr
, abi_ulong optlen
)
2636 case TARGET_SOL_SOCKET
:
2639 /* These don't just return a single integer */
2640 case TARGET_SO_PEERNAME
:
2642 case TARGET_SO_RCVTIMEO
: {
2646 optname
= SO_RCVTIMEO
;
2649 if (get_user_u32(len
, optlen
)) {
2650 return -TARGET_EFAULT
;
2653 return -TARGET_EINVAL
;
2657 ret
= get_errno(getsockopt(sockfd
, level
, optname
,
2662 if (len
> sizeof(struct target_timeval
)) {
2663 len
= sizeof(struct target_timeval
);
2665 if (copy_to_user_timeval(optval_addr
, &tv
)) {
2666 return -TARGET_EFAULT
;
2668 if (put_user_u32(len
, optlen
)) {
2669 return -TARGET_EFAULT
;
2673 case TARGET_SO_SNDTIMEO
:
2674 optname
= SO_SNDTIMEO
;
2676 case TARGET_SO_PEERCRED
: {
2679 struct target_ucred
*tcr
;
2681 if (get_user_u32(len
, optlen
)) {
2682 return -TARGET_EFAULT
;
2685 return -TARGET_EINVAL
;
2689 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
2697 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
2698 return -TARGET_EFAULT
;
2700 __put_user(cr
.pid
, &tcr
->pid
);
2701 __put_user(cr
.uid
, &tcr
->uid
);
2702 __put_user(cr
.gid
, &tcr
->gid
);
2703 unlock_user_struct(tcr
, optval_addr
, 1);
2704 if (put_user_u32(len
, optlen
)) {
2705 return -TARGET_EFAULT
;
2709 case TARGET_SO_PEERSEC
: {
2712 if (get_user_u32(len
, optlen
)) {
2713 return -TARGET_EFAULT
;
2716 return -TARGET_EINVAL
;
2718 name
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 0);
2720 return -TARGET_EFAULT
;
2723 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERSEC
,
2725 if (put_user_u32(lv
, optlen
)) {
2726 ret
= -TARGET_EFAULT
;
2728 unlock_user(name
, optval_addr
, lv
);
2731 case TARGET_SO_LINGER
:
2735 struct target_linger
*tlg
;
2737 if (get_user_u32(len
, optlen
)) {
2738 return -TARGET_EFAULT
;
2741 return -TARGET_EINVAL
;
2745 ret
= get_errno(getsockopt(sockfd
, level
, SO_LINGER
,
2753 if (!lock_user_struct(VERIFY_WRITE
, tlg
, optval_addr
, 0)) {
2754 return -TARGET_EFAULT
;
2756 __put_user(lg
.l_onoff
, &tlg
->l_onoff
);
2757 __put_user(lg
.l_linger
, &tlg
->l_linger
);
2758 unlock_user_struct(tlg
, optval_addr
, 1);
2759 if (put_user_u32(len
, optlen
)) {
2760 return -TARGET_EFAULT
;
2764 /* Options with 'int' argument. */
2765 case TARGET_SO_DEBUG
:
2768 case TARGET_SO_REUSEADDR
:
2769 optname
= SO_REUSEADDR
;
2772 case TARGET_SO_REUSEPORT
:
2773 optname
= SO_REUSEPORT
;
2776 case TARGET_SO_TYPE
:
2779 case TARGET_SO_ERROR
:
2782 case TARGET_SO_DONTROUTE
:
2783 optname
= SO_DONTROUTE
;
2785 case TARGET_SO_BROADCAST
:
2786 optname
= SO_BROADCAST
;
2788 case TARGET_SO_SNDBUF
:
2789 optname
= SO_SNDBUF
;
2791 case TARGET_SO_RCVBUF
:
2792 optname
= SO_RCVBUF
;
2794 case TARGET_SO_KEEPALIVE
:
2795 optname
= SO_KEEPALIVE
;
2797 case TARGET_SO_OOBINLINE
:
2798 optname
= SO_OOBINLINE
;
2800 case TARGET_SO_NO_CHECK
:
2801 optname
= SO_NO_CHECK
;
2803 case TARGET_SO_PRIORITY
:
2804 optname
= SO_PRIORITY
;
2807 case TARGET_SO_BSDCOMPAT
:
2808 optname
= SO_BSDCOMPAT
;
2811 case TARGET_SO_PASSCRED
:
2812 optname
= SO_PASSCRED
;
2814 case TARGET_SO_TIMESTAMP
:
2815 optname
= SO_TIMESTAMP
;
2817 case TARGET_SO_RCVLOWAT
:
2818 optname
= SO_RCVLOWAT
;
2820 case TARGET_SO_ACCEPTCONN
:
2821 optname
= SO_ACCEPTCONN
;
2828 /* TCP options all take an 'int' value. */
2830 if (get_user_u32(len
, optlen
))
2831 return -TARGET_EFAULT
;
2833 return -TARGET_EINVAL
;
2835 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2838 if (optname
== SO_TYPE
) {
2839 val
= host_to_target_sock_type(val
);
2844 if (put_user_u32(val
, optval_addr
))
2845 return -TARGET_EFAULT
;
2847 if (put_user_u8(val
, optval_addr
))
2848 return -TARGET_EFAULT
;
2850 if (put_user_u32(len
, optlen
))
2851 return -TARGET_EFAULT
;
2858 case IP_ROUTER_ALERT
:
2862 case IP_MTU_DISCOVER
:
2868 case IP_MULTICAST_TTL
:
2869 case IP_MULTICAST_LOOP
:
2870 if (get_user_u32(len
, optlen
))
2871 return -TARGET_EFAULT
;
2873 return -TARGET_EINVAL
;
2875 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2878 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2880 if (put_user_u32(len
, optlen
)
2881 || put_user_u8(val
, optval_addr
))
2882 return -TARGET_EFAULT
;
2884 if (len
> sizeof(int))
2886 if (put_user_u32(len
, optlen
)
2887 || put_user_u32(val
, optval_addr
))
2888 return -TARGET_EFAULT
;
2892 ret
= -TARGET_ENOPROTOOPT
;
2898 case IPV6_MTU_DISCOVER
:
2901 case IPV6_RECVPKTINFO
:
2902 case IPV6_UNICAST_HOPS
:
2903 case IPV6_MULTICAST_HOPS
:
2904 case IPV6_MULTICAST_LOOP
:
2906 case IPV6_RECVHOPLIMIT
:
2907 case IPV6_2292HOPLIMIT
:
2910 case IPV6_2292PKTINFO
:
2911 case IPV6_RECVTCLASS
:
2912 case IPV6_RECVRTHDR
:
2913 case IPV6_2292RTHDR
:
2914 case IPV6_RECVHOPOPTS
:
2915 case IPV6_2292HOPOPTS
:
2916 case IPV6_RECVDSTOPTS
:
2917 case IPV6_2292DSTOPTS
:
2919 #ifdef IPV6_RECVPATHMTU
2920 case IPV6_RECVPATHMTU
:
2922 #ifdef IPV6_TRANSPARENT
2923 case IPV6_TRANSPARENT
:
2925 #ifdef IPV6_FREEBIND
2928 #ifdef IPV6_RECVORIGDSTADDR
2929 case IPV6_RECVORIGDSTADDR
:
2931 if (get_user_u32(len
, optlen
))
2932 return -TARGET_EFAULT
;
2934 return -TARGET_EINVAL
;
2936 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2939 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2941 if (put_user_u32(len
, optlen
)
2942 || put_user_u8(val
, optval_addr
))
2943 return -TARGET_EFAULT
;
2945 if (len
> sizeof(int))
2947 if (put_user_u32(len
, optlen
)
2948 || put_user_u32(val
, optval_addr
))
2949 return -TARGET_EFAULT
;
2953 ret
= -TARGET_ENOPROTOOPT
;
2960 case NETLINK_PKTINFO
:
2961 case NETLINK_BROADCAST_ERROR
:
2962 case NETLINK_NO_ENOBUFS
:
2963 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2964 case NETLINK_LISTEN_ALL_NSID
:
2965 case NETLINK_CAP_ACK
:
2966 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2967 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2968 case NETLINK_EXT_ACK
:
2969 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2970 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2971 case NETLINK_GET_STRICT_CHK
:
2972 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2973 if (get_user_u32(len
, optlen
)) {
2974 return -TARGET_EFAULT
;
2976 if (len
!= sizeof(val
)) {
2977 return -TARGET_EINVAL
;
2980 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2984 if (put_user_u32(lv
, optlen
)
2985 || put_user_u32(val
, optval_addr
)) {
2986 return -TARGET_EFAULT
;
2989 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2990 case NETLINK_LIST_MEMBERSHIPS
:
2994 if (get_user_u32(len
, optlen
)) {
2995 return -TARGET_EFAULT
;
2998 return -TARGET_EINVAL
;
3000 results
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 1);
3002 return -TARGET_EFAULT
;
3005 ret
= get_errno(getsockopt(sockfd
, level
, optname
, results
, &lv
));
3007 unlock_user(results
, optval_addr
, 0);
3010 /* swap host endianess to target endianess. */
3011 for (i
= 0; i
< (len
/ sizeof(uint32_t)); i
++) {
3012 results
[i
] = tswap32(results
[i
]);
3014 if (put_user_u32(lv
, optlen
)) {
3015 return -TARGET_EFAULT
;
3017 unlock_user(results
, optval_addr
, 0);
3020 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
3025 #endif /* SOL_NETLINK */
3028 qemu_log_mask(LOG_UNIMP
,
3029 "getsockopt level=%d optname=%d not yet supported\n",
3031 ret
= -TARGET_EOPNOTSUPP
;
3037 /* Convert target low/high pair representing file offset into the host
3038 * low/high pair. This function doesn't handle offsets bigger than 64 bits
3039 * as the kernel doesn't handle them either.
3041 static void target_to_host_low_high(abi_ulong tlow
,
3043 unsigned long *hlow
,
3044 unsigned long *hhigh
)
3046 uint64_t off
= tlow
|
3047 ((unsigned long long)thigh
<< TARGET_LONG_BITS
/ 2) <<
3048 TARGET_LONG_BITS
/ 2;
3051 *hhigh
= (off
>> HOST_LONG_BITS
/ 2) >> HOST_LONG_BITS
/ 2;
3054 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
3055 abi_ulong count
, int copy
)
3057 struct target_iovec
*target_vec
;
3059 abi_ulong total_len
, max_len
;
3062 bool bad_address
= false;
3068 if (count
> IOV_MAX
) {
3073 vec
= g_try_new0(struct iovec
, count
);
3079 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3080 count
* sizeof(struct target_iovec
), 1);
3081 if (target_vec
== NULL
) {
3086 /* ??? If host page size > target page size, this will result in a
3087 value larger than what we can actually support. */
3088 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
3091 for (i
= 0; i
< count
; i
++) {
3092 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3093 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3098 } else if (len
== 0) {
3099 /* Zero length pointer is ignored. */
3100 vec
[i
].iov_base
= 0;
3102 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
3103 /* If the first buffer pointer is bad, this is a fault. But
3104 * subsequent bad buffers will result in a partial write; this
3105 * is realized by filling the vector with null pointers and
3107 if (!vec
[i
].iov_base
) {
3118 if (len
> max_len
- total_len
) {
3119 len
= max_len
- total_len
;
3122 vec
[i
].iov_len
= len
;
3126 unlock_user(target_vec
, target_addr
, 0);
3131 if (tswapal(target_vec
[i
].iov_len
) > 0) {
3132 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
3135 unlock_user(target_vec
, target_addr
, 0);
3142 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
3143 abi_ulong count
, int copy
)
3145 struct target_iovec
*target_vec
;
3148 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3149 count
* sizeof(struct target_iovec
), 1);
3151 for (i
= 0; i
< count
; i
++) {
3152 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3153 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3157 unlock_user(vec
[i
].iov_base
, base
, copy ? vec
[i
].iov_len
: 0);
3159 unlock_user(target_vec
, target_addr
, 0);
3165 static inline int target_to_host_sock_type(int *type
)
3168 int target_type
= *type
;
3170 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
3171 case TARGET_SOCK_DGRAM
:
3172 host_type
= SOCK_DGRAM
;
3174 case TARGET_SOCK_STREAM
:
3175 host_type
= SOCK_STREAM
;
3178 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
3181 if (target_type
& TARGET_SOCK_CLOEXEC
) {
3182 #if defined(SOCK_CLOEXEC)
3183 host_type
|= SOCK_CLOEXEC
;
3185 return -TARGET_EINVAL
;
3188 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3189 #if defined(SOCK_NONBLOCK)
3190 host_type
|= SOCK_NONBLOCK
;
3191 #elif !defined(O_NONBLOCK)
3192 return -TARGET_EINVAL
;
3199 /* Try to emulate socket type flags after socket creation. */
3200 static int sock_flags_fixup(int fd
, int target_type
)
3202 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3203 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3204 int flags
= fcntl(fd
, F_GETFL
);
3205 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
3207 return -TARGET_EINVAL
;
3214 /* do_socket() Must return target values and target errnos. */
3215 static abi_long
do_socket(int domain
, int type
, int protocol
)
3217 int target_type
= type
;
3220 ret
= target_to_host_sock_type(&type
);
3225 if (domain
== PF_NETLINK
&& !(
3226 #ifdef CONFIG_RTNETLINK
3227 protocol
== NETLINK_ROUTE
||
3229 protocol
== NETLINK_KOBJECT_UEVENT
||
3230 protocol
== NETLINK_AUDIT
)) {
3231 return -TARGET_EPROTONOSUPPORT
;
3234 if (domain
== AF_PACKET
||
3235 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
3236 protocol
= tswap16(protocol
);
3239 ret
= get_errno(socket(domain
, type
, protocol
));
3241 ret
= sock_flags_fixup(ret
, target_type
);
3242 if (type
== SOCK_PACKET
) {
3243 /* Manage an obsolete case :
3244 * if socket type is SOCK_PACKET, bind by name
3246 fd_trans_register(ret
, &target_packet_trans
);
3247 } else if (domain
== PF_NETLINK
) {
3249 #ifdef CONFIG_RTNETLINK
3251 fd_trans_register(ret
, &target_netlink_route_trans
);
3254 case NETLINK_KOBJECT_UEVENT
:
3255 /* nothing to do: messages are strings */
3258 fd_trans_register(ret
, &target_netlink_audit_trans
);
3261 g_assert_not_reached();
3268 /* do_bind() Must return target values and target errnos. */
3269 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
3275 if ((int)addrlen
< 0) {
3276 return -TARGET_EINVAL
;
3279 addr
= alloca(addrlen
+1);
3281 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3285 return get_errno(bind(sockfd
, addr
, addrlen
));
3288 /* do_connect() Must return target values and target errnos. */
3289 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
3295 if ((int)addrlen
< 0) {
3296 return -TARGET_EINVAL
;
3299 addr
= alloca(addrlen
+1);
3301 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3305 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
3308 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3309 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
3310 int flags
, int send
)