4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
31 #include <sys/mount.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
38 #include <linux/capability.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
46 #include <sys/times.h>
49 #include <sys/statfs.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include <linux/icmpv6.h>
59 #include <linux/errqueue.h>
60 #include <linux/random.h>
62 #include <sys/timerfd.h>
65 #include <sys/eventfd.h>
68 #include <sys/epoll.h>
71 #include "qemu/xattr.h"
73 #ifdef CONFIG_SENDFILE
74 #include <sys/sendfile.h>
80 #define termios host_termios
81 #define winsize host_winsize
82 #define termio host_termio
83 #define sgttyb host_sgttyb /* same as target */
84 #define tchars host_tchars /* same as target */
85 #define ltchars host_ltchars /* same as target */
87 #include <linux/termios.h>
88 #include <linux/unistd.h>
89 #include <linux/cdrom.h>
90 #include <linux/hdreg.h>
91 #include <linux/soundcard.h>
93 #include <linux/mtio.h>
96 #if defined(CONFIG_FIEMAP)
97 #include <linux/fiemap.h>
100 #if defined(CONFIG_USBFS)
101 #include <linux/usbdevice_fs.h>
102 #include <linux/usb/ch9.h>
104 #include <linux/vt.h>
105 #include <linux/dm-ioctl.h>
106 #include <linux/reboot.h>
107 #include <linux/route.h>
108 #include <linux/filter.h>
109 #include <linux/blkpg.h>
110 #include <netpacket/packet.h>
111 #include <linux/netlink.h>
112 #include <linux/if_alg.h>
113 #include <linux/rtc.h>
114 #include <sound/asound.h>
116 #include <libdrm/drm.h>
118 #include "linux_loop.h"
122 #include "qemu/guest-random.h"
123 #include "qemu/selfmap.h"
124 #include "user/syscall-trace.h"
125 #include "qapi/error.h"
126 #include "fd-trans.h"
130 #define CLONE_IO 0x80000000 /* Clone io context */
133 /* We can't directly call the host clone syscall, because this will
134 * badly confuse libc (breaking mutexes, for example). So we must
135 * divide clone flags into:
136 * * flag combinations that look like pthread_create()
137 * * flag combinations that look like fork()
138 * * flags we can implement within QEMU itself
139 * * flags we can't support and will return an error for
141 /* For thread creation, all these flags must be present; for
142 * fork, none must be present.
144 #define CLONE_THREAD_FLAGS \
145 (CLONE_VM | CLONE_FS | CLONE_FILES | \
146 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
148 /* These flags are ignored:
149 * CLONE_DETACHED is now ignored by the kernel;
150 * CLONE_IO is just an optimisation hint to the I/O scheduler
152 #define CLONE_IGNORED_FLAGS \
153 (CLONE_DETACHED | CLONE_IO)
155 /* Flags for fork which we can implement within QEMU itself */
156 #define CLONE_OPTIONAL_FORK_FLAGS \
157 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
158 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
160 /* Flags for thread creation which we can implement within QEMU itself */
161 #define CLONE_OPTIONAL_THREAD_FLAGS \
162 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
163 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
165 #define CLONE_INVALID_FORK_FLAGS \
166 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
168 #define CLONE_INVALID_THREAD_FLAGS \
169 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
170 CLONE_IGNORED_FLAGS))
172 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
173 * have almost all been allocated. We cannot support any of
174 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
175 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
176 * The checks against the invalid thread masks above will catch these.
177 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
180 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
181 * once. This exercises the codepaths for restart.
183 //#define DEBUG_ERESTARTSYS
185 //#include <linux/msdos_fs.h>
186 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
187 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
197 #define _syscall0(type,name) \
198 static type name (void) \
200 return syscall(__NR_##name); \
203 #define _syscall1(type,name,type1,arg1) \
204 static type name (type1 arg1) \
206 return syscall(__NR_##name, arg1); \
209 #define _syscall2(type,name,type1,arg1,type2,arg2) \
210 static type name (type1 arg1,type2 arg2) \
212 return syscall(__NR_##name, arg1, arg2); \
215 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
216 static type name (type1 arg1,type2 arg2,type3 arg3) \
218 return syscall(__NR_##name, arg1, arg2, arg3); \
221 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
222 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
224 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
227 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
229 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
231 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
235 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
236 type5,arg5,type6,arg6) \
237 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
240 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
244 #define __NR_sys_uname __NR_uname
245 #define __NR_sys_getcwd1 __NR_getcwd
246 #define __NR_sys_getdents __NR_getdents
247 #define __NR_sys_getdents64 __NR_getdents64
248 #define __NR_sys_getpriority __NR_getpriority
249 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
250 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
251 #define __NR_sys_syslog __NR_syslog
252 #if defined(__NR_futex)
253 # define __NR_sys_futex __NR_futex
255 #if defined(__NR_futex_time64)
256 # define __NR_sys_futex_time64 __NR_futex_time64
258 #define __NR_sys_inotify_init __NR_inotify_init
259 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
260 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
261 #define __NR_sys_statx __NR_statx
263 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
264 #define __NR__llseek __NR_lseek
267 /* Newer kernel ports have llseek() instead of _llseek() */
268 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
269 #define TARGET_NR__llseek TARGET_NR_llseek
272 #define __NR_sys_gettid __NR_gettid
273 _syscall0(int, sys_gettid
)
275 /* For the 64-bit guest on 32-bit host case we must emulate
276 * getdents using getdents64, because otherwise the host
277 * might hand us back more dirent records than we can fit
278 * into the guest buffer after structure format conversion.
279 * Otherwise we emulate getdents with getdents if the host has it.
281 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
282 #define EMULATE_GETDENTS_WITH_GETDENTS
285 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
286 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
288 #if (defined(TARGET_NR_getdents) && \
289 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
290 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
291 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
293 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
294 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
295 loff_t
*, res
, uint
, wh
);
297 _syscall3(int, sys_rt_sigqueueinfo
, pid_t
, pid
, int, sig
, siginfo_t
*, uinfo
)
298 _syscall4(int, sys_rt_tgsigqueueinfo
, pid_t
, pid
, pid_t
, tid
, int, sig
,
300 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
301 #ifdef __NR_exit_group
302 _syscall1(int,exit_group
,int,error_code
)
304 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
305 _syscall1(int,set_tid_address
,int *,tidptr
)
307 #if defined(__NR_futex)
308 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
309 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
311 #if defined(__NR_futex_time64)
312 _syscall6(int,sys_futex_time64
,int *,uaddr
,int,op
,int,val
,
313 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
315 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
316 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
317 unsigned long *, user_mask_ptr
);
318 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
319 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
320 unsigned long *, user_mask_ptr
);
321 #define __NR_sys_getcpu __NR_getcpu
322 _syscall3(int, sys_getcpu
, unsigned *, cpu
, unsigned *, node
, void *, tcache
);
323 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
325 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
326 struct __user_cap_data_struct
*, data
);
327 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
328 struct __user_cap_data_struct
*, data
);
329 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
330 _syscall2(int, ioprio_get
, int, which
, int, who
)
332 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
333 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
335 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
336 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
339 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
340 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
341 unsigned long, idx1
, unsigned long, idx2
)
345 * It is assumed that struct statx is architecture independent.
347 #if defined(TARGET_NR_statx) && defined(__NR_statx)
348 _syscall5(int, sys_statx
, int, dirfd
, const char *, pathname
, int, flags
,
349 unsigned int, mask
, struct target_statx
*, statxbuf
)
351 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
352 _syscall2(int, membarrier
, int, cmd
, int, flags
)
355 static bitmask_transtbl fcntl_flags_tbl
[] = {
356 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
357 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
358 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
359 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
360 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
361 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
362 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
363 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
364 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
365 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
366 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
367 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
368 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
369 #if defined(O_DIRECT)
370 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
372 #if defined(O_NOATIME)
373 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
375 #if defined(O_CLOEXEC)
376 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
379 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
381 #if defined(O_TMPFILE)
382 { TARGET_O_TMPFILE
, TARGET_O_TMPFILE
, O_TMPFILE
, O_TMPFILE
},
384 /* Don't terminate the list prematurely on 64-bit host+guest. */
385 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
386 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
391 static int sys_getcwd1(char *buf
, size_t size
)
393 if (getcwd(buf
, size
) == NULL
) {
394 /* getcwd() sets errno */
397 return strlen(buf
)+1;
400 #ifdef TARGET_NR_utimensat
401 #if defined(__NR_utimensat)
402 #define __NR_sys_utimensat __NR_utimensat
403 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
404 const struct timespec
*,tsp
,int,flags
)
406 static int sys_utimensat(int dirfd
, const char *pathname
,
407 const struct timespec times
[2], int flags
)
413 #endif /* TARGET_NR_utimensat */
415 #ifdef TARGET_NR_renameat2
416 #if defined(__NR_renameat2)
417 #define __NR_sys_renameat2 __NR_renameat2
418 _syscall5(int, sys_renameat2
, int, oldfd
, const char *, old
, int, newfd
,
419 const char *, new, unsigned int, flags
)
421 static int sys_renameat2(int oldfd
, const char *old
,
422 int newfd
, const char *new, int flags
)
425 return renameat(oldfd
, old
, newfd
, new);
431 #endif /* TARGET_NR_renameat2 */
433 #ifdef CONFIG_INOTIFY
434 #include <sys/inotify.h>
436 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
437 static int sys_inotify_init(void)
439 return (inotify_init());
442 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
443 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
445 return (inotify_add_watch(fd
, pathname
, mask
));
448 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
449 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
451 return (inotify_rm_watch(fd
, wd
));
454 #ifdef CONFIG_INOTIFY1
455 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
456 static int sys_inotify_init1(int flags
)
458 return (inotify_init1(flags
));
463 /* Userspace can usually survive runtime without inotify */
464 #undef TARGET_NR_inotify_init
465 #undef TARGET_NR_inotify_init1
466 #undef TARGET_NR_inotify_add_watch
467 #undef TARGET_NR_inotify_rm_watch
468 #endif /* CONFIG_INOTIFY */
470 #if defined(TARGET_NR_prlimit64)
471 #ifndef __NR_prlimit64
472 # define __NR_prlimit64 -1
474 #define __NR_sys_prlimit64 __NR_prlimit64
475 /* The glibc rlimit structure may not be that used by the underlying syscall */
476 struct host_rlimit64
{
480 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
481 const struct host_rlimit64
*, new_limit
,
482 struct host_rlimit64
*, old_limit
)
486 #if defined(TARGET_NR_timer_create)
487 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
488 static timer_t g_posix_timers
[32] = { 0, } ;
490 static inline int next_free_host_timer(void)
493 /* FIXME: Does finding the next free slot require a lock? */
494 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
495 if (g_posix_timers
[k
] == 0) {
496 g_posix_timers
[k
] = (timer_t
) 1;
504 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
506 static inline int regpairs_aligned(void *cpu_env
, int num
)
508 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
510 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
511 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
512 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
513 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
514 * of registers which translates to the same as ARM/MIPS, because we start with
516 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
517 #elif defined(TARGET_SH4)
518 /* SH4 doesn't align register pairs, except for p{read,write}64 */
519 static inline int regpairs_aligned(void *cpu_env
, int num
)
522 case TARGET_NR_pread64
:
523 case TARGET_NR_pwrite64
:
530 #elif defined(TARGET_XTENSA)
531 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
533 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 0; }
536 #define ERRNO_TABLE_SIZE 1200
538 /* target_to_host_errno_table[] is initialized from
539 * host_to_target_errno_table[] in syscall_init(). */
540 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
544 * This list is the union of errno values overridden in asm-<arch>/errno.h
545 * minus the errnos that are not actually generic to all archs.
547 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
548 [EAGAIN
] = TARGET_EAGAIN
,
549 [EIDRM
] = TARGET_EIDRM
,
550 [ECHRNG
] = TARGET_ECHRNG
,
551 [EL2NSYNC
] = TARGET_EL2NSYNC
,
552 [EL3HLT
] = TARGET_EL3HLT
,
553 [EL3RST
] = TARGET_EL3RST
,
554 [ELNRNG
] = TARGET_ELNRNG
,
555 [EUNATCH
] = TARGET_EUNATCH
,
556 [ENOCSI
] = TARGET_ENOCSI
,
557 [EL2HLT
] = TARGET_EL2HLT
,
558 [EDEADLK
] = TARGET_EDEADLK
,
559 [ENOLCK
] = TARGET_ENOLCK
,
560 [EBADE
] = TARGET_EBADE
,
561 [EBADR
] = TARGET_EBADR
,
562 [EXFULL
] = TARGET_EXFULL
,
563 [ENOANO
] = TARGET_ENOANO
,
564 [EBADRQC
] = TARGET_EBADRQC
,
565 [EBADSLT
] = TARGET_EBADSLT
,
566 [EBFONT
] = TARGET_EBFONT
,
567 [ENOSTR
] = TARGET_ENOSTR
,
568 [ENODATA
] = TARGET_ENODATA
,
569 [ETIME
] = TARGET_ETIME
,
570 [ENOSR
] = TARGET_ENOSR
,
571 [ENONET
] = TARGET_ENONET
,
572 [ENOPKG
] = TARGET_ENOPKG
,
573 [EREMOTE
] = TARGET_EREMOTE
,
574 [ENOLINK
] = TARGET_ENOLINK
,
575 [EADV
] = TARGET_EADV
,
576 [ESRMNT
] = TARGET_ESRMNT
,
577 [ECOMM
] = TARGET_ECOMM
,
578 [EPROTO
] = TARGET_EPROTO
,
579 [EDOTDOT
] = TARGET_EDOTDOT
,
580 [EMULTIHOP
] = TARGET_EMULTIHOP
,
581 [EBADMSG
] = TARGET_EBADMSG
,
582 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
583 [EOVERFLOW
] = TARGET_EOVERFLOW
,
584 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
585 [EBADFD
] = TARGET_EBADFD
,
586 [EREMCHG
] = TARGET_EREMCHG
,
587 [ELIBACC
] = TARGET_ELIBACC
,
588 [ELIBBAD
] = TARGET_ELIBBAD
,
589 [ELIBSCN
] = TARGET_ELIBSCN
,
590 [ELIBMAX
] = TARGET_ELIBMAX
,
591 [ELIBEXEC
] = TARGET_ELIBEXEC
,
592 [EILSEQ
] = TARGET_EILSEQ
,
593 [ENOSYS
] = TARGET_ENOSYS
,
594 [ELOOP
] = TARGET_ELOOP
,
595 [ERESTART
] = TARGET_ERESTART
,
596 [ESTRPIPE
] = TARGET_ESTRPIPE
,
597 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
598 [EUSERS
] = TARGET_EUSERS
,
599 [ENOTSOCK
] = TARGET_ENOTSOCK
,
600 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
601 [EMSGSIZE
] = TARGET_EMSGSIZE
,
602 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
603 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
604 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
605 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
606 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
607 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
608 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
609 [EADDRINUSE
] = TARGET_EADDRINUSE
,
610 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
611 [ENETDOWN
] = TARGET_ENETDOWN
,
612 [ENETUNREACH
] = TARGET_ENETUNREACH
,
613 [ENETRESET
] = TARGET_ENETRESET
,
614 [ECONNABORTED
] = TARGET_ECONNABORTED
,
615 [ECONNRESET
] = TARGET_ECONNRESET
,
616 [ENOBUFS
] = TARGET_ENOBUFS
,
617 [EISCONN
] = TARGET_EISCONN
,
618 [ENOTCONN
] = TARGET_ENOTCONN
,
619 [EUCLEAN
] = TARGET_EUCLEAN
,
620 [ENOTNAM
] = TARGET_ENOTNAM
,
621 [ENAVAIL
] = TARGET_ENAVAIL
,
622 [EISNAM
] = TARGET_EISNAM
,
623 [EREMOTEIO
] = TARGET_EREMOTEIO
,
624 [EDQUOT
] = TARGET_EDQUOT
,
625 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
626 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
627 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
628 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
629 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
630 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
631 [EALREADY
] = TARGET_EALREADY
,
632 [EINPROGRESS
] = TARGET_EINPROGRESS
,
633 [ESTALE
] = TARGET_ESTALE
,
634 [ECANCELED
] = TARGET_ECANCELED
,
635 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
636 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
638 [ENOKEY
] = TARGET_ENOKEY
,
641 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
644 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
647 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
650 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
652 #ifdef ENOTRECOVERABLE
653 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
656 [ENOMSG
] = TARGET_ENOMSG
,
659 [ERFKILL
] = TARGET_ERFKILL
,
662 [EHWPOISON
] = TARGET_EHWPOISON
,
666 static inline int host_to_target_errno(int err
)
668 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
669 host_to_target_errno_table
[err
]) {
670 return host_to_target_errno_table
[err
];
675 static inline int target_to_host_errno(int err
)
677 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
678 target_to_host_errno_table
[err
]) {
679 return target_to_host_errno_table
[err
];
684 static inline abi_long
get_errno(abi_long ret
)
687 return -host_to_target_errno(errno
);
692 const char *target_strerror(int err
)
694 if (err
== TARGET_ERESTARTSYS
) {
695 return "To be restarted";
697 if (err
== TARGET_QEMU_ESIGRETURN
) {
698 return "Successful exit from sigreturn";
701 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
704 return strerror(target_to_host_errno(err
));
707 #define safe_syscall0(type, name) \
708 static type safe_##name(void) \
710 return safe_syscall(__NR_##name); \
713 #define safe_syscall1(type, name, type1, arg1) \
714 static type safe_##name(type1 arg1) \
716 return safe_syscall(__NR_##name, arg1); \
719 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
720 static type safe_##name(type1 arg1, type2 arg2) \
722 return safe_syscall(__NR_##name, arg1, arg2); \
725 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
726 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
728 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
731 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
733 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
735 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
738 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
739 type4, arg4, type5, arg5) \
740 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
743 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
746 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
747 type4, arg4, type5, arg5, type6, arg6) \
748 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
749 type5 arg5, type6 arg6) \
751 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
754 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
755 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
756 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
757 int, flags
, mode_t
, mode
)
758 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
759 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
760 struct rusage
*, rusage
)
762 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
763 int, options
, struct rusage
*, rusage
)
764 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
765 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
766 defined(TARGET_NR_pselect6)
767 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
768 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
770 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_poll)
771 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
772 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
775 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
776 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
778 #if defined(__NR_futex)
779 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
780 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
782 #if defined(__NR_futex_time64)
783 safe_syscall6(int,futex_time64
,int *,uaddr
,int,op
,int,val
, \
784 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
786 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
787 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
788 safe_syscall2(int, tkill
, int, tid
, int, sig
)
789 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
790 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
791 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
792 safe_syscall5(ssize_t
, preadv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
793 unsigned long, pos_l
, unsigned long, pos_h
)
794 safe_syscall5(ssize_t
, pwritev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
795 unsigned long, pos_l
, unsigned long, pos_h
)
796 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
798 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
799 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
800 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
801 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
802 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
803 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
804 safe_syscall2(int, flock
, int, fd
, int, operation
)
805 #ifdef TARGET_NR_rt_sigtimedwait
806 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
807 const struct timespec
*, uts
, size_t, sigsetsize
)
809 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
811 #if defined(TARGET_NR_nanosleep)
812 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
813 struct timespec
*, rem
)
815 #ifdef TARGET_NR_clock_nanosleep
816 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
817 const struct timespec
*, req
, struct timespec
*, rem
)
820 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
821 void *, ptr
, long, fifth
)
824 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
828 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
829 long, msgtype
, int, flags
)
831 #ifdef __NR_semtimedop
832 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
833 unsigned, nsops
, const struct timespec
*, timeout
)
835 #ifdef TARGET_NR_mq_timedsend
836 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
837 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
839 #ifdef TARGET_NR_mq_timedreceive
840 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
841 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
843 /* We do ioctl like this rather than via safe_syscall3 to preserve the
844 * "third argument might be integer or pointer or not present" behaviour of
847 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
848 /* Similarly for fcntl. Note that callers must always:
849 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
850 * use the flock64 struct rather than unsuffixed flock
851 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
854 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
856 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
859 static inline int host_to_target_sock_type(int host_type
)
863 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
865 target_type
= TARGET_SOCK_DGRAM
;
868 target_type
= TARGET_SOCK_STREAM
;
871 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
875 #if defined(SOCK_CLOEXEC)
876 if (host_type
& SOCK_CLOEXEC
) {
877 target_type
|= TARGET_SOCK_CLOEXEC
;
881 #if defined(SOCK_NONBLOCK)
882 if (host_type
& SOCK_NONBLOCK
) {
883 target_type
|= TARGET_SOCK_NONBLOCK
;
890 static abi_ulong target_brk
;
891 static abi_ulong target_original_brk
;
892 static abi_ulong brk_page
;
894 void target_set_brk(abi_ulong new_brk
)
896 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
897 brk_page
= HOST_PAGE_ALIGN(target_brk
);
900 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
901 #define DEBUGF_BRK(message, args...)
903 /* do_brk() must return target values and target errnos. */
904 abi_long
do_brk(abi_ulong new_brk
)
906 abi_long mapped_addr
;
907 abi_ulong new_alloc_size
;
909 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
912 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
915 if (new_brk
< target_original_brk
) {
916 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
921 /* If the new brk is less than the highest page reserved to the
922 * target heap allocation, set it and we're almost done... */
923 if (new_brk
<= brk_page
) {
924 /* Heap contents are initialized to zero, as for anonymous
926 if (new_brk
> target_brk
) {
927 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
929 target_brk
= new_brk
;
930 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
934 /* We need to allocate more memory after the brk... Note that
935 * we don't use MAP_FIXED because that will map over the top of
936 * any existing mapping (like the one with the host libc or qemu
937 * itself); instead we treat "mapped but at wrong address" as
938 * a failure and unmap again.
940 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
941 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
942 PROT_READ
|PROT_WRITE
,
943 MAP_ANON
|MAP_PRIVATE
, 0, 0));
945 if (mapped_addr
== brk_page
) {
946 /* Heap contents are initialized to zero, as for anonymous
947 * mapped pages. Technically the new pages are already
948 * initialized to zero since they *are* anonymous mapped
949 * pages, however we have to take care with the contents that
950 * come from the remaining part of the previous page: it may
951 * contains garbage data due to a previous heap usage (grown
953 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
955 target_brk
= new_brk
;
956 brk_page
= HOST_PAGE_ALIGN(target_brk
);
957 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
960 } else if (mapped_addr
!= -1) {
961 /* Mapped but at wrong address, meaning there wasn't actually
962 * enough space for this brk.
964 target_munmap(mapped_addr
, new_alloc_size
);
966 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
969 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
972 #if defined(TARGET_ALPHA)
973 /* We (partially) emulate OSF/1 on Alpha, which requires we
974 return a proper errno, not an unchanged brk value. */
975 return -TARGET_ENOMEM
;
977 /* For everything else, return the previous break. */
981 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
982 defined(TARGET_NR_pselect6)
983 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
984 abi_ulong target_fds_addr
,
988 abi_ulong b
, *target_fds
;
990 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
991 if (!(target_fds
= lock_user(VERIFY_READ
,
993 sizeof(abi_ulong
) * nw
,
995 return -TARGET_EFAULT
;
999 for (i
= 0; i
< nw
; i
++) {
1000 /* grab the abi_ulong */
1001 __get_user(b
, &target_fds
[i
]);
1002 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1003 /* check the bit inside the abi_ulong */
1010 unlock_user(target_fds
, target_fds_addr
, 0);
1015 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
1016 abi_ulong target_fds_addr
,
1019 if (target_fds_addr
) {
1020 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
1021 return -TARGET_EFAULT
;
1029 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
1035 abi_ulong
*target_fds
;
1037 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1038 if (!(target_fds
= lock_user(VERIFY_WRITE
,
1040 sizeof(abi_ulong
) * nw
,
1042 return -TARGET_EFAULT
;
1045 for (i
= 0; i
< nw
; i
++) {
1047 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1048 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
1051 __put_user(v
, &target_fds
[i
]);
1054 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
1060 #if defined(__alpha__)
1061 #define HOST_HZ 1024
1066 static inline abi_long
host_to_target_clock_t(long ticks
)
1068 #if HOST_HZ == TARGET_HZ
1071 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
1075 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
1076 const struct rusage
*rusage
)
1078 struct target_rusage
*target_rusage
;
1080 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
1081 return -TARGET_EFAULT
;
1082 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1083 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1084 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1085 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1086 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1087 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1088 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1089 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1090 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1091 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1092 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1093 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1094 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1095 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1096 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1097 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1098 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1099 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1100 unlock_user_struct(target_rusage
, target_addr
, 1);
1105 #ifdef TARGET_NR_setrlimit
1106 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1108 abi_ulong target_rlim_swap
;
1111 target_rlim_swap
= tswapal(target_rlim
);
1112 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1113 return RLIM_INFINITY
;
1115 result
= target_rlim_swap
;
1116 if (target_rlim_swap
!= (rlim_t
)result
)
1117 return RLIM_INFINITY
;
1123 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1124 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1126 abi_ulong target_rlim_swap
;
1129 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1130 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1132 target_rlim_swap
= rlim
;
1133 result
= tswapal(target_rlim_swap
);
1139 static inline int target_to_host_resource(int code
)
1142 case TARGET_RLIMIT_AS
:
1144 case TARGET_RLIMIT_CORE
:
1146 case TARGET_RLIMIT_CPU
:
1148 case TARGET_RLIMIT_DATA
:
1150 case TARGET_RLIMIT_FSIZE
:
1151 return RLIMIT_FSIZE
;
1152 case TARGET_RLIMIT_LOCKS
:
1153 return RLIMIT_LOCKS
;
1154 case TARGET_RLIMIT_MEMLOCK
:
1155 return RLIMIT_MEMLOCK
;
1156 case TARGET_RLIMIT_MSGQUEUE
:
1157 return RLIMIT_MSGQUEUE
;
1158 case TARGET_RLIMIT_NICE
:
1160 case TARGET_RLIMIT_NOFILE
:
1161 return RLIMIT_NOFILE
;
1162 case TARGET_RLIMIT_NPROC
:
1163 return RLIMIT_NPROC
;
1164 case TARGET_RLIMIT_RSS
:
1166 case TARGET_RLIMIT_RTPRIO
:
1167 return RLIMIT_RTPRIO
;
1168 case TARGET_RLIMIT_SIGPENDING
:
1169 return RLIMIT_SIGPENDING
;
1170 case TARGET_RLIMIT_STACK
:
1171 return RLIMIT_STACK
;
1177 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1178 abi_ulong target_tv_addr
)
1180 struct target_timeval
*target_tv
;
1182 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1183 return -TARGET_EFAULT
;
1186 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1187 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1189 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1194 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1195 const struct timeval
*tv
)
1197 struct target_timeval
*target_tv
;
1199 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1200 return -TARGET_EFAULT
;
1203 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1204 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1206 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1211 static inline abi_long
copy_to_user_timeval64(abi_ulong target_tv_addr
,
1212 const struct timeval
*tv
)
1214 struct target__kernel_sock_timeval
*target_tv
;
1216 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1217 return -TARGET_EFAULT
;
1220 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1221 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1223 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1228 #if defined(TARGET_NR_futex) || \
1229 defined(TARGET_NR_rt_sigtimedwait) || \
1230 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1231 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1232 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1233 defined(TARGET_NR_mq_timedreceive)
1234 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
1235 abi_ulong target_addr
)
1237 struct target_timespec
*target_ts
;
1239 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1240 return -TARGET_EFAULT
;
1242 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1243 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1244 unlock_user_struct(target_ts
, target_addr
, 0);
1249 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64)
1250 static inline abi_long
target_to_host_timespec64(struct timespec
*host_ts
,
1251 abi_ulong target_addr
)
1253 struct target__kernel_timespec
*target_ts
;
1255 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1256 return -TARGET_EFAULT
;
1258 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1259 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1260 unlock_user_struct(target_ts
, target_addr
, 0);
1265 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
1266 struct timespec
*host_ts
)
1268 struct target_timespec
*target_ts
;
1270 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1271 return -TARGET_EFAULT
;
1273 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1274 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1275 unlock_user_struct(target_ts
, target_addr
, 1);
1279 static inline abi_long
host_to_target_timespec64(abi_ulong target_addr
,
1280 struct timespec
*host_ts
)
1282 struct target__kernel_timespec
*target_ts
;
1284 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1285 return -TARGET_EFAULT
;
1287 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1288 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1289 unlock_user_struct(target_ts
, target_addr
, 1);
1293 #if defined(TARGET_NR_gettimeofday)
1294 static inline abi_long
copy_to_user_timezone(abi_ulong target_tz_addr
,
1295 struct timezone
*tz
)
1297 struct target_timezone
*target_tz
;
1299 if (!lock_user_struct(VERIFY_WRITE
, target_tz
, target_tz_addr
, 1)) {
1300 return -TARGET_EFAULT
;
1303 __put_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1304 __put_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1306 unlock_user_struct(target_tz
, target_tz_addr
, 1);
1312 #if defined(TARGET_NR_settimeofday)
1313 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1314 abi_ulong target_tz_addr
)
1316 struct target_timezone
*target_tz
;
1318 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1319 return -TARGET_EFAULT
;
1322 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1323 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1325 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1331 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1334 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1335 abi_ulong target_mq_attr_addr
)
1337 struct target_mq_attr
*target_mq_attr
;
1339 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1340 target_mq_attr_addr
, 1))
1341 return -TARGET_EFAULT
;
1343 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1344 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1345 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1346 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1348 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1353 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1354 const struct mq_attr
*attr
)
1356 struct target_mq_attr
*target_mq_attr
;
1358 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1359 target_mq_attr_addr
, 0))
1360 return -TARGET_EFAULT
;
1362 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1363 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1364 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1365 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1367 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1373 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1374 /* do_select() must return target values and target errnos. */
1375 static abi_long
do_select(int n
,
1376 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1377 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1379 fd_set rfds
, wfds
, efds
;
1380 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1382 struct timespec ts
, *ts_ptr
;
1385 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1389 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1393 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1398 if (target_tv_addr
) {
1399 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1400 return -TARGET_EFAULT
;
1401 ts
.tv_sec
= tv
.tv_sec
;
1402 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1408 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1411 if (!is_error(ret
)) {
1412 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1413 return -TARGET_EFAULT
;
1414 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1415 return -TARGET_EFAULT
;
1416 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1417 return -TARGET_EFAULT
;
1419 if (target_tv_addr
) {
1420 tv
.tv_sec
= ts
.tv_sec
;
1421 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1422 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1423 return -TARGET_EFAULT
;
1431 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1432 static abi_long
do_old_select(abi_ulong arg1
)
1434 struct target_sel_arg_struct
*sel
;
1435 abi_ulong inp
, outp
, exp
, tvp
;
1438 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1439 return -TARGET_EFAULT
;
1442 nsel
= tswapal(sel
->n
);
1443 inp
= tswapal(sel
->inp
);
1444 outp
= tswapal(sel
->outp
);
1445 exp
= tswapal(sel
->exp
);
1446 tvp
= tswapal(sel
->tvp
);
1448 unlock_user_struct(sel
, arg1
, 0);
1450 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1455 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1458 return pipe2(host_pipe
, flags
);
1464 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1465 int flags
, int is_pipe2
)
1469 ret
= flags ?
do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1472 return get_errno(ret
);
1474 /* Several targets have special calling conventions for the original
1475 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1477 #if defined(TARGET_ALPHA)
1478 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1479 return host_pipe
[0];
1480 #elif defined(TARGET_MIPS)
1481 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1482 return host_pipe
[0];
1483 #elif defined(TARGET_SH4)
1484 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1485 return host_pipe
[0];
1486 #elif defined(TARGET_SPARC)
1487 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1488 return host_pipe
[0];
1492 if (put_user_s32(host_pipe
[0], pipedes
)
1493 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1494 return -TARGET_EFAULT
;
1495 return get_errno(ret
);
1498 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1499 abi_ulong target_addr
,
1502 struct target_ip_mreqn
*target_smreqn
;
1504 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1506 return -TARGET_EFAULT
;
1507 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1508 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1509 if (len
== sizeof(struct target_ip_mreqn
))
1510 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1511 unlock_user(target_smreqn
, target_addr
, 0);
1516 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1517 abi_ulong target_addr
,
1520 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1521 sa_family_t sa_family
;
1522 struct target_sockaddr
*target_saddr
;
1524 if (fd_trans_target_to_host_addr(fd
)) {
1525 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1528 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1530 return -TARGET_EFAULT
;
1532 sa_family
= tswap16(target_saddr
->sa_family
);
1534 /* Oops. The caller might send a incomplete sun_path; sun_path
1535 * must be terminated by \0 (see the manual page), but
1536 * unfortunately it is quite common to specify sockaddr_un
1537 * length as "strlen(x->sun_path)" while it should be
1538 * "strlen(...) + 1". We'll fix that here if needed.
1539 * Linux kernel has a similar feature.
1542 if (sa_family
== AF_UNIX
) {
1543 if (len
< unix_maxlen
&& len
> 0) {
1544 char *cp
= (char*)target_saddr
;
1546 if ( cp
[len
-1] && !cp
[len
] )
1549 if (len
> unix_maxlen
)
1553 memcpy(addr
, target_saddr
, len
);
1554 addr
->sa_family
= sa_family
;
1555 if (sa_family
== AF_NETLINK
) {
1556 struct sockaddr_nl
*nladdr
;
1558 nladdr
= (struct sockaddr_nl
*)addr
;
1559 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1560 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1561 } else if (sa_family
== AF_PACKET
) {
1562 struct target_sockaddr_ll
*lladdr
;
1564 lladdr
= (struct target_sockaddr_ll
*)addr
;
1565 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1566 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1568 unlock_user(target_saddr
, target_addr
, 0);
1573 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1574 struct sockaddr
*addr
,
1577 struct target_sockaddr
*target_saddr
;
1584 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1586 return -TARGET_EFAULT
;
1587 memcpy(target_saddr
, addr
, len
);
1588 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1589 sizeof(target_saddr
->sa_family
)) {
1590 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1592 if (addr
->sa_family
== AF_NETLINK
&&
1593 len
>= sizeof(struct target_sockaddr_nl
)) {
1594 struct target_sockaddr_nl
*target_nl
=
1595 (struct target_sockaddr_nl
*)target_saddr
;
1596 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1597 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1598 } else if (addr
->sa_family
== AF_PACKET
) {
1599 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1600 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1601 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1602 } else if (addr
->sa_family
== AF_INET6
&&
1603 len
>= sizeof(struct target_sockaddr_in6
)) {
1604 struct target_sockaddr_in6
*target_in6
=
1605 (struct target_sockaddr_in6
*)target_saddr
;
1606 target_in6
->sin6_scope_id
= tswap16(target_in6
->sin6_scope_id
);
1608 unlock_user(target_saddr
, target_addr
, len
);
1613 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1614 struct target_msghdr
*target_msgh
)
1616 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1617 abi_long msg_controllen
;
1618 abi_ulong target_cmsg_addr
;
1619 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1620 socklen_t space
= 0;
1622 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1623 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1625 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1626 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1627 target_cmsg_start
= target_cmsg
;
1629 return -TARGET_EFAULT
;
1631 while (cmsg
&& target_cmsg
) {
1632 void *data
= CMSG_DATA(cmsg
);
1633 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1635 int len
= tswapal(target_cmsg
->cmsg_len
)
1636 - sizeof(struct target_cmsghdr
);
1638 space
+= CMSG_SPACE(len
);
1639 if (space
> msgh
->msg_controllen
) {
1640 space
-= CMSG_SPACE(len
);
1641 /* This is a QEMU bug, since we allocated the payload
1642 * area ourselves (unlike overflow in host-to-target
1643 * conversion, which is just the guest giving us a buffer
1644 * that's too small). It can't happen for the payload types
1645 * we currently support; if it becomes an issue in future
1646 * we would need to improve our allocation strategy to
1647 * something more intelligent than "twice the size of the
1648 * target buffer we're reading from".
1650 qemu_log_mask(LOG_UNIMP
,
1651 ("Unsupported ancillary data %d/%d: "
1652 "unhandled msg size\n"),
1653 tswap32(target_cmsg
->cmsg_level
),
1654 tswap32(target_cmsg
->cmsg_type
));
1658 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1659 cmsg
->cmsg_level
= SOL_SOCKET
;
1661 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1663 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1664 cmsg
->cmsg_len
= CMSG_LEN(len
);
1666 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1667 int *fd
= (int *)data
;
1668 int *target_fd
= (int *)target_data
;
1669 int i
, numfds
= len
/ sizeof(int);
1671 for (i
= 0; i
< numfds
; i
++) {
1672 __get_user(fd
[i
], target_fd
+ i
);
1674 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1675 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1676 struct ucred
*cred
= (struct ucred
*)data
;
1677 struct target_ucred
*target_cred
=
1678 (struct target_ucred
*)target_data
;
1680 __get_user(cred
->pid
, &target_cred
->pid
);
1681 __get_user(cred
->uid
, &target_cred
->uid
);
1682 __get_user(cred
->gid
, &target_cred
->gid
);
1684 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
1685 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1686 memcpy(data
, target_data
, len
);
1689 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1690 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1693 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1695 msgh
->msg_controllen
= space
;
1699 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1700 struct msghdr
*msgh
)
1702 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1703 abi_long msg_controllen
;
1704 abi_ulong target_cmsg_addr
;
1705 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1706 socklen_t space
= 0;
1708 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1709 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1711 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1712 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1713 target_cmsg_start
= target_cmsg
;
1715 return -TARGET_EFAULT
;
1717 while (cmsg
&& target_cmsg
) {
1718 void *data
= CMSG_DATA(cmsg
);
1719 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1721 int len
= cmsg
->cmsg_len
- sizeof(struct cmsghdr
);
1722 int tgt_len
, tgt_space
;
1724 /* We never copy a half-header but may copy half-data;
1725 * this is Linux's behaviour in put_cmsg(). Note that
1726 * truncation here is a guest problem (which we report
1727 * to the guest via the CTRUNC bit), unlike truncation
1728 * in target_to_host_cmsg, which is a QEMU bug.
1730 if (msg_controllen
< sizeof(struct target_cmsghdr
)) {
1731 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1735 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1736 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1738 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1740 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1742 /* Payload types which need a different size of payload on
1743 * the target must adjust tgt_len here.
1746 switch (cmsg
->cmsg_level
) {
1748 switch (cmsg
->cmsg_type
) {
1750 tgt_len
= sizeof(struct target_timeval
);
1760 if (msg_controllen
< TARGET_CMSG_LEN(tgt_len
)) {
1761 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1762 tgt_len
= msg_controllen
- sizeof(struct target_cmsghdr
);
1765 /* We must now copy-and-convert len bytes of payload
1766 * into tgt_len bytes of destination space. Bear in mind
1767 * that in both source and destination we may be dealing
1768 * with a truncated value!
1770 switch (cmsg
->cmsg_level
) {
1772 switch (cmsg
->cmsg_type
) {
1775 int *fd
= (int *)data
;
1776 int *target_fd
= (int *)target_data
;
1777 int i
, numfds
= tgt_len
/ sizeof(int);
1779 for (i
= 0; i
< numfds
; i
++) {
1780 __put_user(fd
[i
], target_fd
+ i
);
1786 struct timeval
*tv
= (struct timeval
*)data
;
1787 struct target_timeval
*target_tv
=
1788 (struct target_timeval
*)target_data
;
1790 if (len
!= sizeof(struct timeval
) ||
1791 tgt_len
!= sizeof(struct target_timeval
)) {
1795 /* copy struct timeval to target */
1796 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1797 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1800 case SCM_CREDENTIALS
:
1802 struct ucred
*cred
= (struct ucred
*)data
;
1803 struct target_ucred
*target_cred
=
1804 (struct target_ucred
*)target_data
;
1806 __put_user(cred
->pid
, &target_cred
->pid
);
1807 __put_user(cred
->uid
, &target_cred
->uid
);
1808 __put_user(cred
->gid
, &target_cred
->gid
);
1817 switch (cmsg
->cmsg_type
) {
1820 uint32_t *v
= (uint32_t *)data
;
1821 uint32_t *t_int
= (uint32_t *)target_data
;
1823 if (len
!= sizeof(uint32_t) ||
1824 tgt_len
!= sizeof(uint32_t)) {
1827 __put_user(*v
, t_int
);
1833 struct sock_extended_err ee
;
1834 struct sockaddr_in offender
;
1836 struct errhdr_t
*errh
= (struct errhdr_t
*)data
;
1837 struct errhdr_t
*target_errh
=
1838 (struct errhdr_t
*)target_data
;
1840 if (len
!= sizeof(struct errhdr_t
) ||
1841 tgt_len
!= sizeof(struct errhdr_t
)) {
1844 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1845 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1846 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1847 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1848 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1849 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1850 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1851 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1852 (void *) &errh
->offender
, sizeof(errh
->offender
));
1861 switch (cmsg
->cmsg_type
) {
1864 uint32_t *v
= (uint32_t *)data
;
1865 uint32_t *t_int
= (uint32_t *)target_data
;
1867 if (len
!= sizeof(uint32_t) ||
1868 tgt_len
!= sizeof(uint32_t)) {
1871 __put_user(*v
, t_int
);
1877 struct sock_extended_err ee
;
1878 struct sockaddr_in6 offender
;
1880 struct errhdr6_t
*errh
= (struct errhdr6_t
*)data
;
1881 struct errhdr6_t
*target_errh
=
1882 (struct errhdr6_t
*)target_data
;
1884 if (len
!= sizeof(struct errhdr6_t
) ||
1885 tgt_len
!= sizeof(struct errhdr6_t
)) {
1888 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1889 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1890 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1891 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1892 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1893 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1894 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1895 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1896 (void *) &errh
->offender
, sizeof(errh
->offender
));
1906 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
1907 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1908 memcpy(target_data
, data
, MIN(len
, tgt_len
));
1909 if (tgt_len
> len
) {
1910 memset(target_data
+ len
, 0, tgt_len
- len
);
1914 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(tgt_len
));
1915 tgt_space
= TARGET_CMSG_SPACE(tgt_len
);
1916 if (msg_controllen
< tgt_space
) {
1917 tgt_space
= msg_controllen
;
1919 msg_controllen
-= tgt_space
;
1921 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1922 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1925 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1927 target_msgh
->msg_controllen
= tswapal(space
);
1931 /* do_setsockopt() Must return target values and target errnos. */
1932 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
1933 abi_ulong optval_addr
, socklen_t optlen
)
1937 struct ip_mreqn
*ip_mreq
;
1938 struct ip_mreq_source
*ip_mreq_source
;
1942 /* TCP options all take an 'int' value. */
1943 if (optlen
< sizeof(uint32_t))
1944 return -TARGET_EINVAL
;
1946 if (get_user_u32(val
, optval_addr
))
1947 return -TARGET_EFAULT
;
1948 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1955 case IP_ROUTER_ALERT
:
1959 case IP_MTU_DISCOVER
:
1966 case IP_MULTICAST_TTL
:
1967 case IP_MULTICAST_LOOP
:
1969 if (optlen
>= sizeof(uint32_t)) {
1970 if (get_user_u32(val
, optval_addr
))
1971 return -TARGET_EFAULT
;
1972 } else if (optlen
>= 1) {
1973 if (get_user_u8(val
, optval_addr
))
1974 return -TARGET_EFAULT
;
1976 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1978 case IP_ADD_MEMBERSHIP
:
1979 case IP_DROP_MEMBERSHIP
:
1980 if (optlen
< sizeof (struct target_ip_mreq
) ||
1981 optlen
> sizeof (struct target_ip_mreqn
))
1982 return -TARGET_EINVAL
;
1984 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
1985 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
1986 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
1989 case IP_BLOCK_SOURCE
:
1990 case IP_UNBLOCK_SOURCE
:
1991 case IP_ADD_SOURCE_MEMBERSHIP
:
1992 case IP_DROP_SOURCE_MEMBERSHIP
:
1993 if (optlen
!= sizeof (struct target_ip_mreq_source
))
1994 return -TARGET_EINVAL
;
1996 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1997 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
1998 unlock_user (ip_mreq_source
, optval_addr
, 0);
2007 case IPV6_MTU_DISCOVER
:
2010 case IPV6_RECVPKTINFO
:
2011 case IPV6_UNICAST_HOPS
:
2012 case IPV6_MULTICAST_HOPS
:
2013 case IPV6_MULTICAST_LOOP
:
2015 case IPV6_RECVHOPLIMIT
:
2016 case IPV6_2292HOPLIMIT
:
2019 case IPV6_2292PKTINFO
:
2020 case IPV6_RECVTCLASS
:
2021 case IPV6_RECVRTHDR
:
2022 case IPV6_2292RTHDR
:
2023 case IPV6_RECVHOPOPTS
:
2024 case IPV6_2292HOPOPTS
:
2025 case IPV6_RECVDSTOPTS
:
2026 case IPV6_2292DSTOPTS
:
2028 #ifdef IPV6_RECVPATHMTU
2029 case IPV6_RECVPATHMTU
:
2031 #ifdef IPV6_TRANSPARENT
2032 case IPV6_TRANSPARENT
:
2034 #ifdef IPV6_FREEBIND
2037 #ifdef IPV6_RECVORIGDSTADDR
2038 case IPV6_RECVORIGDSTADDR
:
2041 if (optlen
< sizeof(uint32_t)) {
2042 return -TARGET_EINVAL
;
2044 if (get_user_u32(val
, optval_addr
)) {
2045 return -TARGET_EFAULT
;
2047 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2048 &val
, sizeof(val
)));
2052 struct in6_pktinfo pki
;
2054 if (optlen
< sizeof(pki
)) {
2055 return -TARGET_EINVAL
;
2058 if (copy_from_user(&pki
, optval_addr
, sizeof(pki
))) {
2059 return -TARGET_EFAULT
;
2062 pki
.ipi6_ifindex
= tswap32(pki
.ipi6_ifindex
);
2064 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2065 &pki
, sizeof(pki
)));
2068 case IPV6_ADD_MEMBERSHIP
:
2069 case IPV6_DROP_MEMBERSHIP
:
2071 struct ipv6_mreq ipv6mreq
;
2073 if (optlen
< sizeof(ipv6mreq
)) {
2074 return -TARGET_EINVAL
;
2077 if (copy_from_user(&ipv6mreq
, optval_addr
, sizeof(ipv6mreq
))) {
2078 return -TARGET_EFAULT
;
2081 ipv6mreq
.ipv6mr_interface
= tswap32(ipv6mreq
.ipv6mr_interface
);
2083 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2084 &ipv6mreq
, sizeof(ipv6mreq
)));
2095 struct icmp6_filter icmp6f
;
2097 if (optlen
> sizeof(icmp6f
)) {
2098 optlen
= sizeof(icmp6f
);
2101 if (copy_from_user(&icmp6f
, optval_addr
, optlen
)) {
2102 return -TARGET_EFAULT
;
2105 for (val
= 0; val
< 8; val
++) {
2106 icmp6f
.data
[val
] = tswap32(icmp6f
.data
[val
]);
2109 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2121 /* those take an u32 value */
2122 if (optlen
< sizeof(uint32_t)) {
2123 return -TARGET_EINVAL
;
2126 if (get_user_u32(val
, optval_addr
)) {
2127 return -TARGET_EFAULT
;
2129 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2130 &val
, sizeof(val
)));
2137 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2142 char *alg_key
= g_malloc(optlen
);
2145 return -TARGET_ENOMEM
;
2147 if (copy_from_user(alg_key
, optval_addr
, optlen
)) {
2149 return -TARGET_EFAULT
;
2151 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2156 case ALG_SET_AEAD_AUTHSIZE
:
2158 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2167 case TARGET_SOL_SOCKET
:
2169 case TARGET_SO_RCVTIMEO
:
2173 optname
= SO_RCVTIMEO
;
2176 if (optlen
!= sizeof(struct target_timeval
)) {
2177 return -TARGET_EINVAL
;
2180 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2181 return -TARGET_EFAULT
;
2184 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2188 case TARGET_SO_SNDTIMEO
:
2189 optname
= SO_SNDTIMEO
;
2191 case TARGET_SO_ATTACH_FILTER
:
2193 struct target_sock_fprog
*tfprog
;
2194 struct target_sock_filter
*tfilter
;
2195 struct sock_fprog fprog
;
2196 struct sock_filter
*filter
;
2199 if (optlen
!= sizeof(*tfprog
)) {
2200 return -TARGET_EINVAL
;
2202 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2203 return -TARGET_EFAULT
;
2205 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2206 tswapal(tfprog
->filter
), 0)) {
2207 unlock_user_struct(tfprog
, optval_addr
, 1);
2208 return -TARGET_EFAULT
;
2211 fprog
.len
= tswap16(tfprog
->len
);
2212 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2213 if (filter
== NULL
) {
2214 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2215 unlock_user_struct(tfprog
, optval_addr
, 1);
2216 return -TARGET_ENOMEM
;
2218 for (i
= 0; i
< fprog
.len
; i
++) {
2219 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2220 filter
[i
].jt
= tfilter
[i
].jt
;
2221 filter
[i
].jf
= tfilter
[i
].jf
;
2222 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2224 fprog
.filter
= filter
;
2226 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2227 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2230 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2231 unlock_user_struct(tfprog
, optval_addr
, 1);
2234 case TARGET_SO_BINDTODEVICE
:
2236 char *dev_ifname
, *addr_ifname
;
2238 if (optlen
> IFNAMSIZ
- 1) {
2239 optlen
= IFNAMSIZ
- 1;
2241 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2243 return -TARGET_EFAULT
;
2245 optname
= SO_BINDTODEVICE
;
2246 addr_ifname
= alloca(IFNAMSIZ
);
2247 memcpy(addr_ifname
, dev_ifname
, optlen
);
2248 addr_ifname
[optlen
] = 0;
2249 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2250 addr_ifname
, optlen
));
2251 unlock_user (dev_ifname
, optval_addr
, 0);
2254 case TARGET_SO_LINGER
:
2257 struct target_linger
*tlg
;
2259 if (optlen
!= sizeof(struct target_linger
)) {
2260 return -TARGET_EINVAL
;
2262 if (!lock_user_struct(VERIFY_READ
, tlg
, optval_addr
, 1)) {
2263 return -TARGET_EFAULT
;
2265 __get_user(lg
.l_onoff
, &tlg
->l_onoff
);
2266 __get_user(lg
.l_linger
, &tlg
->l_linger
);
2267 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, SO_LINGER
,
2269 unlock_user_struct(tlg
, optval_addr
, 0);
2272 /* Options with 'int' argument. */
2273 case TARGET_SO_DEBUG
:
2276 case TARGET_SO_REUSEADDR
:
2277 optname
= SO_REUSEADDR
;
2280 case TARGET_SO_REUSEPORT
:
2281 optname
= SO_REUSEPORT
;
2284 case TARGET_SO_TYPE
:
2287 case TARGET_SO_ERROR
:
2290 case TARGET_SO_DONTROUTE
:
2291 optname
= SO_DONTROUTE
;
2293 case TARGET_SO_BROADCAST
:
2294 optname
= SO_BROADCAST
;
2296 case TARGET_SO_SNDBUF
:
2297 optname
= SO_SNDBUF
;
2299 case TARGET_SO_SNDBUFFORCE
:
2300 optname
= SO_SNDBUFFORCE
;
2302 case TARGET_SO_RCVBUF
:
2303 optname
= SO_RCVBUF
;
2305 case TARGET_SO_RCVBUFFORCE
:
2306 optname
= SO_RCVBUFFORCE
;
2308 case TARGET_SO_KEEPALIVE
:
2309 optname
= SO_KEEPALIVE
;
2311 case TARGET_SO_OOBINLINE
:
2312 optname
= SO_OOBINLINE
;
2314 case TARGET_SO_NO_CHECK
:
2315 optname
= SO_NO_CHECK
;
2317 case TARGET_SO_PRIORITY
:
2318 optname
= SO_PRIORITY
;
2321 case TARGET_SO_BSDCOMPAT
:
2322 optname
= SO_BSDCOMPAT
;
2325 case TARGET_SO_PASSCRED
:
2326 optname
= SO_PASSCRED
;
2328 case TARGET_SO_PASSSEC
:
2329 optname
= SO_PASSSEC
;
2331 case TARGET_SO_TIMESTAMP
:
2332 optname
= SO_TIMESTAMP
;
2334 case TARGET_SO_RCVLOWAT
:
2335 optname
= SO_RCVLOWAT
;
2340 if (optlen
< sizeof(uint32_t))
2341 return -TARGET_EINVAL
;
2343 if (get_user_u32(val
, optval_addr
))
2344 return -TARGET_EFAULT
;
2345 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2350 case NETLINK_PKTINFO
:
2351 case NETLINK_ADD_MEMBERSHIP
:
2352 case NETLINK_DROP_MEMBERSHIP
:
2353 case NETLINK_BROADCAST_ERROR
:
2354 case NETLINK_NO_ENOBUFS
:
2355 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2356 case NETLINK_LISTEN_ALL_NSID
:
2357 case NETLINK_CAP_ACK
:
2358 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2359 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2360 case NETLINK_EXT_ACK
:
2361 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2362 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2363 case NETLINK_GET_STRICT_CHK
:
2364 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2370 if (optlen
< sizeof(uint32_t)) {
2371 return -TARGET_EINVAL
;
2373 if (get_user_u32(val
, optval_addr
)) {
2374 return -TARGET_EFAULT
;
2376 ret
= get_errno(setsockopt(sockfd
, SOL_NETLINK
, optname
, &val
,
2379 #endif /* SOL_NETLINK */
2382 qemu_log_mask(LOG_UNIMP
, "Unsupported setsockopt level=%d optname=%d\n",
2384 ret
= -TARGET_ENOPROTOOPT
;
2389 /* do_getsockopt() Must return target values and target errnos. */
2390 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2391 abi_ulong optval_addr
, abi_ulong optlen
)
2398 case TARGET_SOL_SOCKET
:
2401 /* These don't just return a single integer */
2402 case TARGET_SO_PEERNAME
:
2404 case TARGET_SO_RCVTIMEO
: {
2408 optname
= SO_RCVTIMEO
;
2411 if (get_user_u32(len
, optlen
)) {
2412 return -TARGET_EFAULT
;
2415 return -TARGET_EINVAL
;
2419 ret
= get_errno(getsockopt(sockfd
, level
, optname
,
2424 if (len
> sizeof(struct target_timeval
)) {
2425 len
= sizeof(struct target_timeval
);
2427 if (copy_to_user_timeval(optval_addr
, &tv
)) {
2428 return -TARGET_EFAULT
;
2430 if (put_user_u32(len
, optlen
)) {
2431 return -TARGET_EFAULT
;
2435 case TARGET_SO_SNDTIMEO
:
2436 optname
= SO_SNDTIMEO
;
2438 case TARGET_SO_PEERCRED
: {
2441 struct target_ucred
*tcr
;
2443 if (get_user_u32(len
, optlen
)) {
2444 return -TARGET_EFAULT
;
2447 return -TARGET_EINVAL
;
2451 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
2459 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
2460 return -TARGET_EFAULT
;
2462 __put_user(cr
.pid
, &tcr
->pid
);
2463 __put_user(cr
.uid
, &tcr
->uid
);
2464 __put_user(cr
.gid
, &tcr
->gid
);
2465 unlock_user_struct(tcr
, optval_addr
, 1);
2466 if (put_user_u32(len
, optlen
)) {
2467 return -TARGET_EFAULT
;
2471 case TARGET_SO_PEERSEC
: {
2474 if (get_user_u32(len
, optlen
)) {
2475 return -TARGET_EFAULT
;
2478 return -TARGET_EINVAL
;
2480 name
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 0);
2482 return -TARGET_EFAULT
;
2485 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERSEC
,
2487 if (put_user_u32(lv
, optlen
)) {
2488 ret
= -TARGET_EFAULT
;
2490 unlock_user(name
, optval_addr
, lv
);
2493 case TARGET_SO_LINGER
:
2497 struct target_linger
*tlg
;
2499 if (get_user_u32(len
, optlen
)) {
2500 return -TARGET_EFAULT
;
2503 return -TARGET_EINVAL
;
2507 ret
= get_errno(getsockopt(sockfd
, level
, SO_LINGER
,
2515 if (!lock_user_struct(VERIFY_WRITE
, tlg
, optval_addr
, 0)) {
2516 return -TARGET_EFAULT
;
2518 __put_user(lg
.l_onoff
, &tlg
->l_onoff
);
2519 __put_user(lg
.l_linger
, &tlg
->l_linger
);
2520 unlock_user_struct(tlg
, optval_addr
, 1);
2521 if (put_user_u32(len
, optlen
)) {
2522 return -TARGET_EFAULT
;
2526 /* Options with 'int' argument. */
2527 case TARGET_SO_DEBUG
:
2530 case TARGET_SO_REUSEADDR
:
2531 optname
= SO_REUSEADDR
;
2534 case TARGET_SO_REUSEPORT
:
2535 optname
= SO_REUSEPORT
;
2538 case TARGET_SO_TYPE
:
2541 case TARGET_SO_ERROR
:
2544 case TARGET_SO_DONTROUTE
:
2545 optname
= SO_DONTROUTE
;
2547 case TARGET_SO_BROADCAST
:
2548 optname
= SO_BROADCAST
;
2550 case TARGET_SO_SNDBUF
:
2551 optname
= SO_SNDBUF
;
2553 case TARGET_SO_RCVBUF
:
2554 optname
= SO_RCVBUF
;
2556 case TARGET_SO_KEEPALIVE
:
2557 optname
= SO_KEEPALIVE
;
2559 case TARGET_SO_OOBINLINE
:
2560 optname
= SO_OOBINLINE
;
2562 case TARGET_SO_NO_CHECK
:
2563 optname
= SO_NO_CHECK
;
2565 case TARGET_SO_PRIORITY
:
2566 optname
= SO_PRIORITY
;
2569 case TARGET_SO_BSDCOMPAT
:
2570 optname
= SO_BSDCOMPAT
;
2573 case TARGET_SO_PASSCRED
:
2574 optname
= SO_PASSCRED
;
2576 case TARGET_SO_TIMESTAMP
:
2577 optname
= SO_TIMESTAMP
;
2579 case TARGET_SO_RCVLOWAT
:
2580 optname
= SO_RCVLOWAT
;
2582 case TARGET_SO_ACCEPTCONN
:
2583 optname
= SO_ACCEPTCONN
;
2590 /* TCP options all take an 'int' value. */
2592 if (get_user_u32(len
, optlen
))
2593 return -TARGET_EFAULT
;
2595 return -TARGET_EINVAL
;
2597 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2600 if (optname
== SO_TYPE
) {
2601 val
= host_to_target_sock_type(val
);
2606 if (put_user_u32(val
, optval_addr
))
2607 return -TARGET_EFAULT
;
2609 if (put_user_u8(val
, optval_addr
))
2610 return -TARGET_EFAULT
;
2612 if (put_user_u32(len
, optlen
))
2613 return -TARGET_EFAULT
;
2620 case IP_ROUTER_ALERT
:
2624 case IP_MTU_DISCOVER
:
2630 case IP_MULTICAST_TTL
:
2631 case IP_MULTICAST_LOOP
:
2632 if (get_user_u32(len
, optlen
))
2633 return -TARGET_EFAULT
;
2635 return -TARGET_EINVAL
;
2637 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2640 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2642 if (put_user_u32(len
, optlen
)
2643 || put_user_u8(val
, optval_addr
))
2644 return -TARGET_EFAULT
;
2646 if (len
> sizeof(int))
2648 if (put_user_u32(len
, optlen
)
2649 || put_user_u32(val
, optval_addr
))
2650 return -TARGET_EFAULT
;
2654 ret
= -TARGET_ENOPROTOOPT
;
2660 case IPV6_MTU_DISCOVER
:
2663 case IPV6_RECVPKTINFO
:
2664 case IPV6_UNICAST_HOPS
:
2665 case IPV6_MULTICAST_HOPS
:
2666 case IPV6_MULTICAST_LOOP
:
2668 case IPV6_RECVHOPLIMIT
:
2669 case IPV6_2292HOPLIMIT
:
2672 case IPV6_2292PKTINFO
:
2673 case IPV6_RECVTCLASS
:
2674 case IPV6_RECVRTHDR
:
2675 case IPV6_2292RTHDR
:
2676 case IPV6_RECVHOPOPTS
:
2677 case IPV6_2292HOPOPTS
:
2678 case IPV6_RECVDSTOPTS
:
2679 case IPV6_2292DSTOPTS
:
2681 #ifdef IPV6_RECVPATHMTU
2682 case IPV6_RECVPATHMTU
:
2684 #ifdef IPV6_TRANSPARENT
2685 case IPV6_TRANSPARENT
:
2687 #ifdef IPV6_FREEBIND
2690 #ifdef IPV6_RECVORIGDSTADDR
2691 case IPV6_RECVORIGDSTADDR
:
2693 if (get_user_u32(len
, optlen
))
2694 return -TARGET_EFAULT
;
2696 return -TARGET_EINVAL
;
2698 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2701 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2703 if (put_user_u32(len
, optlen
)
2704 || put_user_u8(val
, optval_addr
))
2705 return -TARGET_EFAULT
;
2707 if (len
> sizeof(int))
2709 if (put_user_u32(len
, optlen
)
2710 || put_user_u32(val
, optval_addr
))
2711 return -TARGET_EFAULT
;
2715 ret
= -TARGET_ENOPROTOOPT
;
2722 case NETLINK_PKTINFO
:
2723 case NETLINK_BROADCAST_ERROR
:
2724 case NETLINK_NO_ENOBUFS
:
2725 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2726 case NETLINK_LISTEN_ALL_NSID
:
2727 case NETLINK_CAP_ACK
:
2728 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2729 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2730 case NETLINK_EXT_ACK
:
2731 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2732 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2733 case NETLINK_GET_STRICT_CHK
:
2734 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2735 if (get_user_u32(len
, optlen
)) {
2736 return -TARGET_EFAULT
;
2738 if (len
!= sizeof(val
)) {
2739 return -TARGET_EINVAL
;
2742 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2746 if (put_user_u32(lv
, optlen
)
2747 || put_user_u32(val
, optval_addr
)) {
2748 return -TARGET_EFAULT
;
2751 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2752 case NETLINK_LIST_MEMBERSHIPS
:
2756 if (get_user_u32(len
, optlen
)) {
2757 return -TARGET_EFAULT
;
2760 return -TARGET_EINVAL
;
2762 results
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 1);
2764 return -TARGET_EFAULT
;
2767 ret
= get_errno(getsockopt(sockfd
, level
, optname
, results
, &lv
));
2769 unlock_user(results
, optval_addr
, 0);
2772 /* swap host endianess to target endianess. */
2773 for (i
= 0; i
< (len
/ sizeof(uint32_t)); i
++) {
2774 results
[i
] = tswap32(results
[i
]);
2776 if (put_user_u32(lv
, optlen
)) {
2777 return -TARGET_EFAULT
;
2779 unlock_user(results
, optval_addr
, 0);
2782 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2787 #endif /* SOL_NETLINK */
2790 qemu_log_mask(LOG_UNIMP
,
2791 "getsockopt level=%d optname=%d not yet supported\n",
2793 ret
= -TARGET_EOPNOTSUPP
;
2799 /* Convert target low/high pair representing file offset into the host
2800 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2801 * as the kernel doesn't handle them either.
2803 static void target_to_host_low_high(abi_ulong tlow
,
2805 unsigned long *hlow
,
2806 unsigned long *hhigh
)
2808 uint64_t off
= tlow
|
2809 ((unsigned long long)thigh
<< TARGET_LONG_BITS
/ 2) <<
2810 TARGET_LONG_BITS
/ 2;
2813 *hhigh
= (off
>> HOST_LONG_BITS
/ 2) >> HOST_LONG_BITS
/ 2;
2816 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
2817 abi_ulong count
, int copy
)
2819 struct target_iovec
*target_vec
;
2821 abi_ulong total_len
, max_len
;
2824 bool bad_address
= false;
2830 if (count
> IOV_MAX
) {
2835 vec
= g_try_new0(struct iovec
, count
);
2841 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2842 count
* sizeof(struct target_iovec
), 1);
2843 if (target_vec
== NULL
) {
2848 /* ??? If host page size > target page size, this will result in a
2849 value larger than what we can actually support. */
2850 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
2853 for (i
= 0; i
< count
; i
++) {
2854 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2855 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2860 } else if (len
== 0) {
2861 /* Zero length pointer is ignored. */
2862 vec
[i
].iov_base
= 0;
2864 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
2865 /* If the first buffer pointer is bad, this is a fault. But
2866 * subsequent bad buffers will result in a partial write; this
2867 * is realized by filling the vector with null pointers and
2869 if (!vec
[i
].iov_base
) {
2880 if (len
> max_len
- total_len
) {
2881 len
= max_len
- total_len
;
2884 vec
[i
].iov_len
= len
;
2888 unlock_user(target_vec
, target_addr
, 0);
2893 if (tswapal(target_vec
[i
].iov_len
) > 0) {
2894 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
2897 unlock_user(target_vec
, target_addr
, 0);
2904 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
2905 abi_ulong count
, int copy
)
2907 struct target_iovec
*target_vec
;
2910 target_vec
= lock_user(VERIFY_READ
, target_addr
,
2911 count
* sizeof(struct target_iovec
), 1);
2913 for (i
= 0; i
< count
; i
++) {
2914 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
2915 abi_long len
= tswapal(target_vec
[i
].iov_len
);
2919 unlock_user(vec
[i
].iov_base
, base
, copy ? vec
[i
].iov_len
: 0);
2921 unlock_user(target_vec
, target_addr
, 0);
2927 static inline int target_to_host_sock_type(int *type
)
2930 int target_type
= *type
;
2932 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
2933 case TARGET_SOCK_DGRAM
:
2934 host_type
= SOCK_DGRAM
;
2936 case TARGET_SOCK_STREAM
:
2937 host_type
= SOCK_STREAM
;
2940 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
2943 if (target_type
& TARGET_SOCK_CLOEXEC
) {
2944 #if defined(SOCK_CLOEXEC)
2945 host_type
|= SOCK_CLOEXEC
;
2947 return -TARGET_EINVAL
;
2950 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2951 #if defined(SOCK_NONBLOCK)
2952 host_type
|= SOCK_NONBLOCK
;
2953 #elif !defined(O_NONBLOCK)
2954 return -TARGET_EINVAL
;
2961 /* Try to emulate socket type flags after socket creation. */
2962 static int sock_flags_fixup(int fd
, int target_type
)
2964 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
2965 if (target_type
& TARGET_SOCK_NONBLOCK
) {
2966 int flags
= fcntl(fd
, F_GETFL
);
2967 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
2969 return -TARGET_EINVAL
;
2976 /* do_socket() Must return target values and target errnos. */
2977 static abi_long
do_socket(int domain
, int type
, int protocol
)
2979 int target_type
= type
;
2982 ret
= target_to_host_sock_type(&type
);
2987 if (domain
== PF_NETLINK
&& !(
2988 #ifdef CONFIG_RTNETLINK
2989 protocol
== NETLINK_ROUTE
||
2991 protocol
== NETLINK_KOBJECT_UEVENT
||
2992 protocol
== NETLINK_AUDIT
)) {
2993 return -TARGET_EPFNOSUPPORT
;
2996 if (domain
== AF_PACKET
||
2997 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
2998 protocol
= tswap16(protocol
);
3001 ret
= get_errno(socket(domain
, type
, protocol
));
3003 ret
= sock_flags_fixup(ret
, target_type
);
3004 if (type
== SOCK_PACKET
) {
3005 /* Manage an obsolete case :
3006 * if socket type is SOCK_PACKET, bind by name
3008 fd_trans_register(ret
, &target_packet_trans
);
3009 } else if (domain
== PF_NETLINK
) {
3011 #ifdef CONFIG_RTNETLINK
3013 fd_trans_register(ret
, &target_netlink_route_trans
);
3016 case NETLINK_KOBJECT_UEVENT
:
3017 /* nothing to do: messages are strings */
3020 fd_trans_register(ret
, &target_netlink_audit_trans
);
3023 g_assert_not_reached();
3030 /* do_bind() Must return target values and target errnos. */
3031 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
3037 if ((int)addrlen
< 0) {
3038 return -TARGET_EINVAL
;
3041 addr
= alloca(addrlen
+1);
3043 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3047 return get_errno(bind(sockfd
, addr
, addrlen
));
3050 /* do_connect() Must return target values and target errnos. */
3051 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
3057 if ((int)addrlen
< 0) {
3058 return -TARGET_EINVAL
;
3061 addr
= alloca(addrlen
+1);
3063 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3067 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
3070 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3071 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
3072 int flags
, int send
)
3078 abi_ulong target_vec
;
3080 if (msgp
->msg_name
) {
3081 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
3082 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
3083 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
3084 tswapal(msgp
->msg_name
),
3086 if (ret
== -TARGET_EFAULT
) {
3087 /* For connected sockets msg_name and msg_namelen must
3088 * be ignored, so returning EFAULT immediately is wrong.
3089 * Instead, pass a bad msg_name to the host kernel, and
3090 * let it decide whether to return EFAULT or not.
3092 msg
.msg_name
= (void *)-1;
3097 msg
.msg_name
= NULL
;
3098 msg
.msg_namelen
= 0;
3100 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3101 msg
.msg_control
= alloca(msg
.msg_controllen
);
3102 memset(msg
.msg_control
, 0, msg
.msg_controllen
);
3104 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3106 count
= tswapal(msgp
->msg_iovlen
);
3107 target_vec
= tswapal(msgp
->msg_iov
);
3109 if (count
> IOV_MAX
) {
3110 /* sendrcvmsg returns a different errno for this condition than
3111 * readv/writev, so we must catch it here before lock_iovec() does.
3113 ret
= -TARGET_EMSGSIZE
;
3117 vec
= lock_iovec(send ? VERIFY_READ
: VERIFY_WRITE
,
3118 target_vec
, count
, send
);
3120 ret
= -host_to_target_errno(errno
);
3123 msg
.msg_iovlen
= count
;
3127 if (fd_trans_target_to_host_data(fd
)) {
3130 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3131 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3132 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3133 msg
.msg_iov
->iov_len
);
3135 msg
.msg_iov
->iov_base
= host_msg
;
3136 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3140 ret
= target_to_host_cmsg(&msg
, msgp
);
3142 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3146 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3147 if (!is_error(ret
)) {
3149 if (fd_trans_host_to_target_data(fd
)) {
3150 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3151 MIN(msg
.msg_iov
->iov_len
, len
));
3153 ret
= host_to_target_cmsg(msgp
, &msg
);
3155 if (!is_error(ret
)) {
3156 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3157 msgp
->msg_flags
= tswap32(msg
.msg_flags
);
3158 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
3159 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3160 msg
.msg_name
, msg
.msg_namelen
);
3172 unlock_iovec(vec
, target_vec
, count
, !send
);
3177 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3178 int flags
, int send
)
3181 struct target_msghdr
*msgp
;
3183 if (!lock_user_struct(send ? VERIFY_READ
: VERIFY_WRITE
,
3187 return -TARGET_EFAULT
;
3189 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3190 unlock_user_struct(msgp
, target_msg
, send ?
0 : 1);
3194 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3195 * so it might not have this *mmsg-specific flag either.
3197 #ifndef MSG_WAITFORONE
3198 #define MSG_WAITFORONE 0x10000
3201 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3202 unsigned int vlen
, unsigned int flags
,
3205 struct target_mmsghdr
*mmsgp
;
3209 if (vlen
> UIO_MAXIOV
) {
3213 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3215 return -TARGET_EFAULT
;
3218 for (i
= 0; i
< vlen
; i
++) {
3219 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3220 if (is_error(ret
)) {
3223 mmsgp
[i
].msg_len
= tswap32(ret
);
3224 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3225 if (flags
& MSG_WAITFORONE
) {
3226 flags
|= MSG_DONTWAIT
;
3230 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3232 /* Return number of datagrams sent if we sent any at all;
3233 * otherwise return the error.
3241 /* do_accept4() Must return target values and target errnos. */
3242 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3243 abi_ulong target_addrlen_addr
, int flags
)
3245 socklen_t addrlen
, ret_addrlen
;
3250 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3252 if (target_addr
== 0) {
3253 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3256 /* linux returns EINVAL if addrlen pointer is invalid */
3257 if (get_user_u32(addrlen
, target_addrlen_addr
))
3258 return -TARGET_EINVAL
;
3260 if ((int)addrlen
< 0) {
3261 return -TARGET_EINVAL
;
3264 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3265 return -TARGET_EINVAL
;
3267 addr
= alloca(addrlen
);
3269 ret_addrlen
= addrlen
;
3270 ret
= get_errno(safe_accept4(fd
, addr
, &ret_addrlen
, host_flags
));
3271 if (!is_error(ret
)) {
3272 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3273 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3274 ret
= -TARGET_EFAULT
;
3280 /* do_getpeername() Must return target values and target errnos. */
3281 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3282 abi_ulong target_addrlen_addr
)
3284 socklen_t addrlen
, ret_addrlen
;
3288 if (get_user_u32(addrlen
, target_addrlen_addr
))
3289 return -TARGET_EFAULT
;
3291 if ((int)addrlen
< 0) {
3292 return -TARGET_EINVAL
;
3295 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3296 return -TARGET_EFAULT
;
3298 addr
= alloca(addrlen
);
3300 ret_addrlen
= addrlen
;
3301 ret
= get_errno(getpeername(fd
, addr
, &ret_addrlen
));
3302 if (!is_error(ret
)) {
3303 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3304 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3305 ret
= -TARGET_EFAULT
;
3311 /* do_getsockname() Must return target values and target errnos. */