Merge tag 'for_upstream' of git://git.kernel.org/pub/scm/virt/kvm/mst/qemu into staging
[qemu.git] / linux-user / syscall.c
1 /*
2 * Linux syscalls
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
65 #ifdef CONFIG_TIMERFD
66 #include <sys/timerfd.h>
67 #endif
68 #ifdef CONFIG_EVENTFD
69 #include <sys/eventfd.h>
70 #endif
71 #ifdef CONFIG_EPOLL
72 #include <sys/epoll.h>
73 #endif
74 #ifdef CONFIG_ATTR
75 #include "qemu/xattr.h"
76 #endif
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
79 #endif
80 #ifdef HAVE_SYS_KCOV_H
81 #include <sys/kcov.h>
82 #endif
83
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
90
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
96 #include <linux/kd.h>
97 #include <linux/mtio.h>
98 #include <linux/fs.h>
99 #include <linux/fd.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
102 #endif
103 #include <linux/fb.h>
104 #if defined(CONFIG_USBFS)
105 #include <linux/usbdevice_fs.h>
106 #include <linux/usb/ch9.h>
107 #endif
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include <netpacket/packet.h>
115 #include <linux/netlink.h>
116 #include <linux/if_alg.h>
117 #include <linux/rtc.h>
118 #include <sound/asound.h>
119 #ifdef HAVE_BTRFS_H
120 #include <linux/btrfs.h>
121 #endif
122 #ifdef HAVE_DRM_H
123 #include <libdrm/drm.h>
124 #include <libdrm/i915_drm.h>
125 #endif
126 #include "linux_loop.h"
127 #include "uname.h"
128
129 #include "qemu.h"
130 #include "user-internals.h"
131 #include "strace.h"
132 #include "signal-common.h"
133 #include "loader.h"
134 #include "user-mmap.h"
135 #include "safe-syscall.h"
136 #include "qemu/guest-random.h"
137 #include "qemu/selfmap.h"
138 #include "user/syscall-trace.h"
139 #include "qapi/error.h"
140 #include "fd-trans.h"
141 #include "tcg/tcg.h"
142
143 #ifndef CLONE_IO
144 #define CLONE_IO 0x80000000 /* Clone io context */
145 #endif
146
147 /* We can't directly call the host clone syscall, because this will
148 * badly confuse libc (breaking mutexes, for example). So we must
149 * divide clone flags into:
150 * * flag combinations that look like pthread_create()
151 * * flag combinations that look like fork()
152 * * flags we can implement within QEMU itself
153 * * flags we can't support and will return an error for
154 */
155 /* For thread creation, all these flags must be present; for
156 * fork, none must be present.
157 */
158 #define CLONE_THREAD_FLAGS \
159 (CLONE_VM | CLONE_FS | CLONE_FILES | \
160 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
161
162 /* These flags are ignored:
163 * CLONE_DETACHED is now ignored by the kernel;
164 * CLONE_IO is just an optimisation hint to the I/O scheduler
165 */
166 #define CLONE_IGNORED_FLAGS \
167 (CLONE_DETACHED | CLONE_IO)
168
169 /* Flags for fork which we can implement within QEMU itself */
170 #define CLONE_OPTIONAL_FORK_FLAGS \
171 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
172 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
173
174 /* Flags for thread creation which we can implement within QEMU itself */
175 #define CLONE_OPTIONAL_THREAD_FLAGS \
176 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
177 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
178
179 #define CLONE_INVALID_FORK_FLAGS \
180 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
181
182 #define CLONE_INVALID_THREAD_FLAGS \
183 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
184 CLONE_IGNORED_FLAGS))
185
186 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
187 * have almost all been allocated. We cannot support any of
188 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
189 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
190 * The checks against the invalid thread masks above will catch these.
191 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
192 */
193
194 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
195 * once. This exercises the codepaths for restart.
196 */
197 //#define DEBUG_ERESTARTSYS
198
199 //#include <linux/msdos_fs.h>
200 #define VFAT_IOCTL_READDIR_BOTH \
201 _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
202 #define VFAT_IOCTL_READDIR_SHORT \
203 _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
204
205 #undef _syscall0
206 #undef _syscall1
207 #undef _syscall2
208 #undef _syscall3
209 #undef _syscall4
210 #undef _syscall5
211 #undef _syscall6
212
213 #define _syscall0(type,name) \
214 static type name (void) \
215 { \
216 return syscall(__NR_##name); \
217 }
218
219 #define _syscall1(type,name,type1,arg1) \
220 static type name (type1 arg1) \
221 { \
222 return syscall(__NR_##name, arg1); \
223 }
224
225 #define _syscall2(type,name,type1,arg1,type2,arg2) \
226 static type name (type1 arg1,type2 arg2) \
227 { \
228 return syscall(__NR_##name, arg1, arg2); \
229 }
230
231 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
232 static type name (type1 arg1,type2 arg2,type3 arg3) \
233 { \
234 return syscall(__NR_##name, arg1, arg2, arg3); \
235 }
236
237 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
238 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
239 { \
240 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
241 }
242
243 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
244 type5,arg5) \
245 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
246 { \
247 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
248 }
249
250
251 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
252 type5,arg5,type6,arg6) \
253 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
254 type6 arg6) \
255 { \
256 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
257 }
258
259
260 #define __NR_sys_uname __NR_uname
261 #define __NR_sys_getcwd1 __NR_getcwd
262 #define __NR_sys_getdents __NR_getdents
263 #define __NR_sys_getdents64 __NR_getdents64
264 #define __NR_sys_getpriority __NR_getpriority
265 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
266 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
267 #define __NR_sys_syslog __NR_syslog
268 #if defined(__NR_futex)
269 # define __NR_sys_futex __NR_futex
270 #endif
271 #if defined(__NR_futex_time64)
272 # define __NR_sys_futex_time64 __NR_futex_time64
273 #endif
274 #define __NR_sys_inotify_init __NR_inotify_init
275 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
276 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
277 #define __NR_sys_statx __NR_statx
278
279 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
280 #define __NR__llseek __NR_lseek
281 #endif
282
283 /* Newer kernel ports have llseek() instead of _llseek() */
284 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
285 #define TARGET_NR__llseek TARGET_NR_llseek
286 #endif
287
288 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
289 #ifndef TARGET_O_NONBLOCK_MASK
290 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
291 #endif
292
293 #define __NR_sys_gettid __NR_gettid
294 _syscall0(int, sys_gettid)
295
296 /* For the 64-bit guest on 32-bit host case we must emulate
297 * getdents using getdents64, because otherwise the host
298 * might hand us back more dirent records than we can fit
299 * into the guest buffer after structure format conversion.
300 * Otherwise we emulate getdents with getdents if the host has it.
301 */
302 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
303 #define EMULATE_GETDENTS_WITH_GETDENTS
304 #endif
305
306 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
307 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
308 #endif
309 #if (defined(TARGET_NR_getdents) && \
310 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
311 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
312 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
313 #endif
314 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
315 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
316 loff_t *, res, uint, wh);
317 #endif
318 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
319 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
320 siginfo_t *, uinfo)
321 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
322 #ifdef __NR_exit_group
323 _syscall1(int,exit_group,int,error_code)
324 #endif
325 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
326 _syscall1(int,set_tid_address,int *,tidptr)
327 #endif
328 #if defined(__NR_futex)
329 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
330 const struct timespec *,timeout,int *,uaddr2,int,val3)
331 #endif
332 #if defined(__NR_futex_time64)
333 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
334 const struct timespec *,timeout,int *,uaddr2,int,val3)
335 #endif
336 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
337 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
338 unsigned long *, user_mask_ptr);
339 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
340 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
341 unsigned long *, user_mask_ptr);
342 #define __NR_sys_getcpu __NR_getcpu
343 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
344 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
345 void *, arg);
346 _syscall2(int, capget, struct __user_cap_header_struct *, header,
347 struct __user_cap_data_struct *, data);
348 _syscall2(int, capset, struct __user_cap_header_struct *, header,
349 struct __user_cap_data_struct *, data);
350 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
351 _syscall2(int, ioprio_get, int, which, int, who)
352 #endif
353 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
354 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
355 #endif
356 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
357 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
358 #endif
359
360 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
361 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
362 unsigned long, idx1, unsigned long, idx2)
363 #endif
364
365 /*
366 * It is assumed that struct statx is architecture independent.
367 */
368 #if defined(TARGET_NR_statx) && defined(__NR_statx)
369 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
370 unsigned int, mask, struct target_statx *, statxbuf)
371 #endif
372 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
373 _syscall2(int, membarrier, int, cmd, int, flags)
374 #endif
375
376 static const bitmask_transtbl fcntl_flags_tbl[] = {
377 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
378 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
379 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
380 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
381 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
382 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
383 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
384 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
385 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
386 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
387 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
388 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
389 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
390 #if defined(O_DIRECT)
391 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
392 #endif
393 #if defined(O_NOATIME)
394 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
395 #endif
396 #if defined(O_CLOEXEC)
397 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
398 #endif
399 #if defined(O_PATH)
400 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
401 #endif
402 #if defined(O_TMPFILE)
403 { TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE },
404 #endif
405 /* Don't terminate the list prematurely on 64-bit host+guest. */
406 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
407 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
408 #endif
409 { 0, 0, 0, 0 }
410 };
411
412 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
413
414 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
415 #if defined(__NR_utimensat)
416 #define __NR_sys_utimensat __NR_utimensat
417 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
418 const struct timespec *,tsp,int,flags)
419 #else
420 static int sys_utimensat(int dirfd, const char *pathname,
421 const struct timespec times[2], int flags)
422 {
423 errno = ENOSYS;
424 return -1;
425 }
426 #endif
427 #endif /* TARGET_NR_utimensat */
428
429 #ifdef TARGET_NR_renameat2
430 #if defined(__NR_renameat2)
431 #define __NR_sys_renameat2 __NR_renameat2
432 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
433 const char *, new, unsigned int, flags)
434 #else
435 static int sys_renameat2(int oldfd, const char *old,
436 int newfd, const char *new, int flags)
437 {
438 if (flags == 0) {
439 return renameat(oldfd, old, newfd, new);
440 }
441 errno = ENOSYS;
442 return -1;
443 }
444 #endif
445 #endif /* TARGET_NR_renameat2 */
446
447 #ifdef CONFIG_INOTIFY
448 #include <sys/inotify.h>
449
450 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
451 static int sys_inotify_init(void)
452 {
453 return (inotify_init());
454 }
455 #endif
456 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
457 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
458 {
459 return (inotify_add_watch(fd, pathname, mask));
460 }
461 #endif
462 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
463 static int sys_inotify_rm_watch(int fd, int32_t wd)
464 {
465 return (inotify_rm_watch(fd, wd));
466 }
467 #endif
468 #ifdef CONFIG_INOTIFY1
469 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
470 static int sys_inotify_init1(int flags)
471 {
472 return (inotify_init1(flags));
473 }
474 #endif
475 #endif
476 #else
477 /* Userspace can usually survive runtime without inotify */
478 #undef TARGET_NR_inotify_init
479 #undef TARGET_NR_inotify_init1
480 #undef TARGET_NR_inotify_add_watch
481 #undef TARGET_NR_inotify_rm_watch
482 #endif /* CONFIG_INOTIFY */
483
484 #if defined(TARGET_NR_prlimit64)
485 #ifndef __NR_prlimit64
486 # define __NR_prlimit64 -1
487 #endif
488 #define __NR_sys_prlimit64 __NR_prlimit64
489 /* The glibc rlimit structure may not be that used by the underlying syscall */
490 struct host_rlimit64 {
491 uint64_t rlim_cur;
492 uint64_t rlim_max;
493 };
494 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
495 const struct host_rlimit64 *, new_limit,
496 struct host_rlimit64 *, old_limit)
497 #endif
498
499
500 #if defined(TARGET_NR_timer_create)
501 /* Maximum of 32 active POSIX timers allowed at any one time. */
502 static timer_t g_posix_timers[32] = { 0, } ;
503
504 static inline int next_free_host_timer(void)
505 {
506 int k ;
507 /* FIXME: Does finding the next free slot require a lock? */
508 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
509 if (g_posix_timers[k] == 0) {
510 g_posix_timers[k] = (timer_t) 1;
511 return k;
512 }
513 }
514 return -1;
515 }
516 #endif
517
518 static inline int host_to_target_errno(int host_errno)
519 {
520 switch (host_errno) {
521 #define E(X) case X: return TARGET_##X;
522 #include "errnos.c.inc"
523 #undef E
524 default:
525 return host_errno;
526 }
527 }
528
529 static inline int target_to_host_errno(int target_errno)
530 {
531 switch (target_errno) {
532 #define E(X) case TARGET_##X: return X;
533 #include "errnos.c.inc"
534 #undef E
535 default:
536 return target_errno;
537 }
538 }
539
540 static inline abi_long get_errno(abi_long ret)
541 {
542 if (ret == -1)
543 return -host_to_target_errno(errno);
544 else
545 return ret;
546 }
547
548 const char *target_strerror(int err)
549 {
550 if (err == TARGET_ERESTARTSYS) {
551 return "To be restarted";
552 }
553 if (err == TARGET_QEMU_ESIGRETURN) {
554 return "Successful exit from sigreturn";
555 }
556
557 return strerror(target_to_host_errno(err));
558 }
559
560 #define safe_syscall0(type, name) \
561 static type safe_##name(void) \
562 { \
563 return safe_syscall(__NR_##name); \
564 }
565
566 #define safe_syscall1(type, name, type1, arg1) \
567 static type safe_##name(type1 arg1) \
568 { \
569 return safe_syscall(__NR_##name, arg1); \
570 }
571
572 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
573 static type safe_##name(type1 arg1, type2 arg2) \
574 { \
575 return safe_syscall(__NR_##name, arg1, arg2); \
576 }
577
578 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
579 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
580 { \
581 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
582 }
583
584 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
585 type4, arg4) \
586 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
587 { \
588 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
589 }
590
591 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
592 type4, arg4, type5, arg5) \
593 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
594 type5 arg5) \
595 { \
596 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
597 }
598
599 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
600 type4, arg4, type5, arg5, type6, arg6) \
601 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
602 type5 arg5, type6 arg6) \
603 { \
604 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
605 }
606
607 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
608 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
609 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
610 int, flags, mode_t, mode)
611 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
612 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
613 struct rusage *, rusage)
614 #endif
615 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
616 int, options, struct rusage *, rusage)
617 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
618 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
619 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
620 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
621 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
622 #endif
623 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
624 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
625 struct timespec *, tsp, const sigset_t *, sigmask,
626 size_t, sigsetsize)
627 #endif
628 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
629 int, maxevents, int, timeout, const sigset_t *, sigmask,
630 size_t, sigsetsize)
631 #if defined(__NR_futex)
632 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
633 const struct timespec *,timeout,int *,uaddr2,int,val3)
634 #endif
635 #if defined(__NR_futex_time64)
636 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
637 const struct timespec *,timeout,int *,uaddr2,int,val3)
638 #endif
639 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
640 safe_syscall2(int, kill, pid_t, pid, int, sig)
641 safe_syscall2(int, tkill, int, tid, int, sig)
642 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
643 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
644 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
645 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
646 unsigned long, pos_l, unsigned long, pos_h)
647 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
648 unsigned long, pos_l, unsigned long, pos_h)
649 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
650 socklen_t, addrlen)
651 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
652 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
653 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
654 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
655 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
656 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
657 safe_syscall2(int, flock, int, fd, int, operation)
658 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
659 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
660 const struct timespec *, uts, size_t, sigsetsize)
661 #endif
662 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
663 int, flags)
664 #if defined(TARGET_NR_nanosleep)
665 safe_syscall2(int, nanosleep, const struct timespec *, req,
666 struct timespec *, rem)
667 #endif
668 #if defined(TARGET_NR_clock_nanosleep) || \
669 defined(TARGET_NR_clock_nanosleep_time64)
670 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
671 const struct timespec *, req, struct timespec *, rem)
672 #endif
673 #ifdef __NR_ipc
674 #ifdef __s390x__
675 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
676 void *, ptr)
677 #else
678 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
679 void *, ptr, long, fifth)
680 #endif
681 #endif
682 #ifdef __NR_msgsnd
683 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
684 int, flags)
685 #endif
686 #ifdef __NR_msgrcv
687 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
688 long, msgtype, int, flags)
689 #endif
690 #ifdef __NR_semtimedop
691 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
692 unsigned, nsops, const struct timespec *, timeout)
693 #endif
694 #if defined(TARGET_NR_mq_timedsend) || \
695 defined(TARGET_NR_mq_timedsend_time64)
696 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
697 size_t, len, unsigned, prio, const struct timespec *, timeout)
698 #endif
699 #if defined(TARGET_NR_mq_timedreceive) || \
700 defined(TARGET_NR_mq_timedreceive_time64)
701 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
702 size_t, len, unsigned *, prio, const struct timespec *, timeout)
703 #endif
704 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
705 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
706 int, outfd, loff_t *, poutoff, size_t, length,
707 unsigned int, flags)
708 #endif
709
710 /* We do ioctl like this rather than via safe_syscall3 to preserve the
711 * "third argument might be integer or pointer or not present" behaviour of
712 * the libc function.
713 */
714 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
715 /* Similarly for fcntl. Note that callers must always:
716 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
717 * use the flock64 struct rather than unsuffixed flock
718 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
719 */
720 #ifdef __NR_fcntl64
721 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
722 #else
723 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
724 #endif
725
726 static inline int host_to_target_sock_type(int host_type)
727 {
728 int target_type;
729
730 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
731 case SOCK_DGRAM:
732 target_type = TARGET_SOCK_DGRAM;
733 break;
734 case SOCK_STREAM:
735 target_type = TARGET_SOCK_STREAM;
736 break;
737 default:
738 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
739 break;
740 }
741
742 #if defined(SOCK_CLOEXEC)
743 if (host_type & SOCK_CLOEXEC) {
744 target_type |= TARGET_SOCK_CLOEXEC;
745 }
746 #endif
747
748 #if defined(SOCK_NONBLOCK)
749 if (host_type & SOCK_NONBLOCK) {
750 target_type |= TARGET_SOCK_NONBLOCK;
751 }
752 #endif
753
754 return target_type;
755 }
756
757 static abi_ulong target_brk;
758 static abi_ulong target_original_brk;
759 static abi_ulong brk_page;
760
761 void target_set_brk(abi_ulong new_brk)
762 {
763 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
764 brk_page = HOST_PAGE_ALIGN(target_brk);
765 }
766
767 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
768 #define DEBUGF_BRK(message, args...)
769
770 /* do_brk() must return target values and target errnos. */
771 abi_long do_brk(abi_ulong new_brk)
772 {
773 abi_long mapped_addr;
774 abi_ulong new_alloc_size;
775
776 /* brk pointers are always untagged */
777
778 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
779
780 if (!new_brk) {
781 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
782 return target_brk;
783 }
784 if (new_brk < target_original_brk) {
785 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
786 target_brk);
787 return target_brk;
788 }
789
790 /* If the new brk is less than the highest page reserved to the
791 * target heap allocation, set it and we're almost done... */
792 if (new_brk <= brk_page) {
793 /* Heap contents are initialized to zero, as for anonymous
794 * mapped pages. */
795 if (new_brk > target_brk) {
796 memset(g2h_untagged(target_brk), 0, new_brk - target_brk);
797 }
798 target_brk = new_brk;
799 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
800 return target_brk;
801 }
802
803 /* We need to allocate more memory after the brk... Note that
804 * we don't use MAP_FIXED because that will map over the top of
805 * any existing mapping (like the one with the host libc or qemu
806 * itself); instead we treat "mapped but at wrong address" as
807 * a failure and unmap again.
808 */
809 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
810 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
811 PROT_READ|PROT_WRITE,
812 MAP_ANON|MAP_PRIVATE, 0, 0));
813
814 if (mapped_addr == brk_page) {
815 /* Heap contents are initialized to zero, as for anonymous
816 * mapped pages. Technically the new pages are already
817 * initialized to zero since they *are* anonymous mapped
818 * pages, however we have to take care with the contents that
819 * come from the remaining part of the previous page: it may
820 * contains garbage data due to a previous heap usage (grown
821 * then shrunken). */
822 memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
823
824 target_brk = new_brk;
825 brk_page = HOST_PAGE_ALIGN(target_brk);
826 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
827 target_brk);
828 return target_brk;
829 } else if (mapped_addr != -1) {
830 /* Mapped but at wrong address, meaning there wasn't actually
831 * enough space for this brk.
832 */
833 target_munmap(mapped_addr, new_alloc_size);
834 mapped_addr = -1;
835 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
836 }
837 else {
838 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
839 }
840
841 #if defined(TARGET_ALPHA)
842 /* We (partially) emulate OSF/1 on Alpha, which requires we
843 return a proper errno, not an unchanged brk value. */
844 return -TARGET_ENOMEM;
845 #endif
846 /* For everything else, return the previous break. */
847 return target_brk;
848 }
849
850 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
851 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
852 static inline abi_long copy_from_user_fdset(fd_set *fds,
853 abi_ulong target_fds_addr,
854 int n)
855 {
856 int i, nw, j, k;
857 abi_ulong b, *target_fds;
858
859 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
860 if (!(target_fds = lock_user(VERIFY_READ,
861 target_fds_addr,
862 sizeof(abi_ulong) * nw,
863 1)))
864 return -TARGET_EFAULT;
865
866 FD_ZERO(fds);
867 k = 0;
868 for (i = 0; i < nw; i++) {
869 /* grab the abi_ulong */
870 __get_user(b, &target_fds[i]);
871 for (j = 0; j < TARGET_ABI_BITS; j++) {
872 /* check the bit inside the abi_ulong */
873 if ((b >> j) & 1)
874 FD_SET(k, fds);
875 k++;
876 }
877 }
878
879 unlock_user(target_fds, target_fds_addr, 0);
880
881 return 0;
882 }
883
884 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
885 abi_ulong target_fds_addr,
886 int n)
887 {
888 if (target_fds_addr) {
889 if (copy_from_user_fdset(fds, target_fds_addr, n))
890 return -TARGET_EFAULT;
891 *fds_ptr = fds;
892 } else {
893 *fds_ptr = NULL;
894 }
895 return 0;
896 }
897
898 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
899 const fd_set *fds,
900 int n)
901 {
902 int i, nw, j, k;
903 abi_long v;
904 abi_ulong *target_fds;
905
906 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
907 if (!(target_fds = lock_user(VERIFY_WRITE,
908 target_fds_addr,
909 sizeof(abi_ulong) * nw,
910 0)))
911 return -TARGET_EFAULT;
912
913 k = 0;
914 for (i = 0; i < nw; i++) {
915 v = 0;
916 for (j = 0; j < TARGET_ABI_BITS; j++) {
917 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
918 k++;
919 }
920 __put_user(v, &target_fds[i]);
921 }
922
923 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
924
925 return 0;
926 }
927 #endif
928
929 #if defined(__alpha__)
930 #define HOST_HZ 1024
931 #else
932 #define HOST_HZ 100
933 #endif
934
935 static inline abi_long host_to_target_clock_t(long ticks)
936 {
937 #if HOST_HZ == TARGET_HZ
938 return ticks;
939 #else
940 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
941 #endif
942 }
943
944 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
945 const struct rusage *rusage)
946 {
947 struct target_rusage *target_rusage;
948
949 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
950 return -TARGET_EFAULT;
951 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
952 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
953 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
954 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
955 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
956 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
957 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
958 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
959 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
960 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
961 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
962 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
963 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
964 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
965 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
966 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
967 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
968 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
969 unlock_user_struct(target_rusage, target_addr, 1);
970
971 return 0;
972 }
973
974 #ifdef TARGET_NR_setrlimit
975 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
976 {
977 abi_ulong target_rlim_swap;
978 rlim_t result;
979
980 target_rlim_swap = tswapal(target_rlim);
981 if (target_rlim_swap == TARGET_RLIM_INFINITY)
982 return RLIM_INFINITY;
983
984 result = target_rlim_swap;
985 if (target_rlim_swap != (rlim_t)result)
986 return RLIM_INFINITY;
987
988 return result;
989 }
990 #endif
991
992 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
993 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
994 {
995 abi_ulong target_rlim_swap;
996 abi_ulong result;
997
998 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
999 target_rlim_swap = TARGET_RLIM_INFINITY;
1000 else
1001 target_rlim_swap = rlim;
1002 result = tswapal(target_rlim_swap);
1003
1004 return result;
1005 }
1006 #endif
1007
1008 static inline int target_to_host_resource(int code)
1009 {
1010 switch (code) {
1011 case TARGET_RLIMIT_AS:
1012 return RLIMIT_AS;
1013 case TARGET_RLIMIT_CORE:
1014 return RLIMIT_CORE;
1015 case TARGET_RLIMIT_CPU:
1016 return RLIMIT_CPU;
1017 case TARGET_RLIMIT_DATA:
1018 return RLIMIT_DATA;
1019 case TARGET_RLIMIT_FSIZE:
1020 return RLIMIT_FSIZE;
1021 case TARGET_RLIMIT_LOCKS:
1022 return RLIMIT_LOCKS;
1023 case TARGET_RLIMIT_MEMLOCK:
1024 return RLIMIT_MEMLOCK;
1025 case TARGET_RLIMIT_MSGQUEUE:
1026 return RLIMIT_MSGQUEUE;
1027 case TARGET_RLIMIT_NICE:
1028 return RLIMIT_NICE;
1029 case TARGET_RLIMIT_NOFILE:
1030 return RLIMIT_NOFILE;
1031 case TARGET_RLIMIT_NPROC:
1032 return RLIMIT_NPROC;
1033 case TARGET_RLIMIT_RSS:
1034 return RLIMIT_RSS;
1035 case TARGET_RLIMIT_RTPRIO:
1036 return RLIMIT_RTPRIO;
1037 case TARGET_RLIMIT_SIGPENDING:
1038 return RLIMIT_SIGPENDING;
1039 case TARGET_RLIMIT_STACK:
1040 return RLIMIT_STACK;
1041 default:
1042 return code;
1043 }
1044 }
1045
1046 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1047 abi_ulong target_tv_addr)
1048 {
1049 struct target_timeval *target_tv;
1050
1051 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1052 return -TARGET_EFAULT;
1053 }
1054
1055 __get_user(tv->tv_sec, &target_tv->tv_sec);
1056 __get_user(tv->tv_usec, &target_tv->tv_usec);
1057
1058 unlock_user_struct(target_tv, target_tv_addr, 0);
1059
1060 return 0;
1061 }
1062
1063 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1064 const struct timeval *tv)
1065 {
1066 struct target_timeval *target_tv;
1067
1068 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1069 return -TARGET_EFAULT;
1070 }
1071
1072 __put_user(tv->tv_sec, &target_tv->tv_sec);
1073 __put_user(tv->tv_usec, &target_tv->tv_usec);
1074
1075 unlock_user_struct(target_tv, target_tv_addr, 1);
1076
1077 return 0;
1078 }
1079
1080 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1081 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1082 abi_ulong target_tv_addr)
1083 {
1084 struct target__kernel_sock_timeval *target_tv;
1085
1086 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1087 return -TARGET_EFAULT;
1088 }
1089
1090 __get_user(tv->tv_sec, &target_tv->tv_sec);
1091 __get_user(tv->tv_usec, &target_tv->tv_usec);
1092
1093 unlock_user_struct(target_tv, target_tv_addr, 0);
1094
1095 return 0;
1096 }
1097 #endif
1098
1099 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1100 const struct timeval *tv)
1101 {
1102 struct target__kernel_sock_timeval *target_tv;
1103
1104 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1105 return -TARGET_EFAULT;
1106 }
1107
1108 __put_user(tv->tv_sec, &target_tv->tv_sec);
1109 __put_user(tv->tv_usec, &target_tv->tv_usec);
1110
1111 unlock_user_struct(target_tv, target_tv_addr, 1);
1112
1113 return 0;
1114 }
1115
1116 #if defined(TARGET_NR_futex) || \
1117 defined(TARGET_NR_rt_sigtimedwait) || \
1118 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1119 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1120 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1121 defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1122 defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1123 defined(TARGET_NR_timer_settime) || \
1124 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1125 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1126 abi_ulong target_addr)
1127 {
1128 struct target_timespec *target_ts;
1129
1130 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1131 return -TARGET_EFAULT;
1132 }
1133 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1134 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1135 unlock_user_struct(target_ts, target_addr, 0);
1136 return 0;
1137 }
1138 #endif
1139
1140 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1141 defined(TARGET_NR_timer_settime64) || \
1142 defined(TARGET_NR_mq_timedsend_time64) || \
1143 defined(TARGET_NR_mq_timedreceive_time64) || \
1144 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1145 defined(TARGET_NR_clock_nanosleep_time64) || \
1146 defined(TARGET_NR_rt_sigtimedwait_time64) || \
1147 defined(TARGET_NR_utimensat) || \
1148 defined(TARGET_NR_utimensat_time64) || \
1149 defined(TARGET_NR_semtimedop_time64) || \
1150 defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1151 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1152 abi_ulong target_addr)
1153 {
1154 struct target__kernel_timespec *target_ts;
1155
1156 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1157 return -TARGET_EFAULT;
1158 }
1159 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1160 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1161 /* in 32bit mode, this drops the padding */
1162 host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1163 unlock_user_struct(target_ts, target_addr, 0);
1164 return 0;
1165 }
1166 #endif
1167
1168 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1169 struct timespec *host_ts)
1170 {
1171 struct target_timespec *target_ts;
1172
1173 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1174 return -TARGET_EFAULT;
1175 }
1176 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1177 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1178 unlock_user_struct(target_ts, target_addr, 1);
1179 return 0;
1180 }
1181
1182 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1183 struct timespec *host_ts)
1184 {
1185 struct target__kernel_timespec *target_ts;
1186
1187 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1188 return -TARGET_EFAULT;
1189 }
1190 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1191 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1192 unlock_user_struct(target_ts, target_addr, 1);
1193 return 0;
1194 }
1195
1196 #if defined(TARGET_NR_gettimeofday)
1197 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1198 struct timezone *tz)
1199 {
1200 struct target_timezone *target_tz;
1201
1202 if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1203 return -TARGET_EFAULT;
1204 }
1205
1206 __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1207 __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1208
1209 unlock_user_struct(target_tz, target_tz_addr, 1);
1210
1211 return 0;
1212 }
1213 #endif
1214
1215 #if defined(TARGET_NR_settimeofday)
1216 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1217 abi_ulong target_tz_addr)
1218 {
1219 struct target_timezone *target_tz;
1220
1221 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1222 return -TARGET_EFAULT;
1223 }
1224
1225 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1226 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1227
1228 unlock_user_struct(target_tz, target_tz_addr, 0);
1229
1230 return 0;
1231 }
1232 #endif
1233
1234 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1235 #include <mqueue.h>
1236
1237 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1238 abi_ulong target_mq_attr_addr)
1239 {
1240 struct target_mq_attr *target_mq_attr;
1241
1242 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1243 target_mq_attr_addr, 1))
1244 return -TARGET_EFAULT;
1245
1246 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1247 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1248 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1249 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1250
1251 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1252
1253 return 0;
1254 }
1255
1256 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1257 const struct mq_attr *attr)
1258 {
1259 struct target_mq_attr *target_mq_attr;
1260
1261 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1262 target_mq_attr_addr, 0))
1263 return -TARGET_EFAULT;
1264
1265 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1266 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1267 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1268 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1269
1270 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1271
1272 return 0;
1273 }
1274 #endif
1275
1276 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1277 /* do_select() must return target values and target errnos. */
1278 static abi_long do_select(int n,
1279 abi_ulong rfd_addr, abi_ulong wfd_addr,
1280 abi_ulong efd_addr, abi_ulong target_tv_addr)
1281 {
1282 fd_set rfds, wfds, efds;
1283 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1284 struct timeval tv;
1285 struct timespec ts, *ts_ptr;
1286 abi_long ret;
1287
1288 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1289 if (ret) {
1290 return ret;
1291 }
1292 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1293 if (ret) {
1294 return ret;
1295 }
1296 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1297 if (ret) {
1298 return ret;
1299 }
1300
1301 if (target_tv_addr) {
1302 if (copy_from_user_timeval(&tv, target_tv_addr))
1303 return -TARGET_EFAULT;
1304 ts.tv_sec = tv.tv_sec;
1305 ts.tv_nsec = tv.tv_usec * 1000;
1306 ts_ptr = &ts;
1307 } else {
1308 ts_ptr = NULL;
1309 }
1310
1311 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1312 ts_ptr, NULL));
1313
1314 if (!is_error(ret)) {
1315 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1316 return -TARGET_EFAULT;
1317 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1318 return -TARGET_EFAULT;
1319 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1320 return -TARGET_EFAULT;
1321
1322 if (target_tv_addr) {
1323 tv.tv_sec = ts.tv_sec;
1324 tv.tv_usec = ts.tv_nsec / 1000;
1325 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1326 return -TARGET_EFAULT;
1327 }
1328 }
1329 }
1330
1331 return ret;
1332 }
1333
1334 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1335 static abi_long do_old_select(abi_ulong arg1)
1336 {
1337 struct target_sel_arg_struct *sel;
1338 abi_ulong inp, outp, exp, tvp;
1339 long nsel;
1340
1341 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1342 return -TARGET_EFAULT;
1343 }
1344
1345 nsel = tswapal(sel->n);
1346 inp = tswapal(sel->inp);
1347 outp = tswapal(sel->outp);
1348 exp = tswapal(sel->exp);
1349 tvp = tswapal(sel->tvp);
1350
1351 unlock_user_struct(sel, arg1, 0);
1352
1353 return do_select(nsel, inp, outp, exp, tvp);
1354 }
1355 #endif
1356 #endif
1357
1358 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1359 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1360 abi_long arg4, abi_long arg5, abi_long arg6,
1361 bool time64)
1362 {
1363 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1364 fd_set rfds, wfds, efds;
1365 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1366 struct timespec ts, *ts_ptr;
1367 abi_long ret;
1368
1369 /*
1370 * The 6th arg is actually two args smashed together,
1371 * so we cannot use the C library.
1372 */
1373 sigset_t set;
1374 struct {
1375 sigset_t *set;
1376 size_t size;
1377 } sig, *sig_ptr;
1378
1379 abi_ulong arg_sigset, arg_sigsize, *arg7;
1380 target_sigset_t *target_sigset;
1381
1382 n = arg1;
1383 rfd_addr = arg2;
1384 wfd_addr = arg3;
1385 efd_addr = arg4;
1386 ts_addr = arg5;
1387
1388 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1389 if (ret) {
1390 return ret;
1391 }
1392 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1393 if (ret) {
1394 return ret;
1395 }
1396 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1397 if (ret) {
1398 return ret;
1399 }
1400
1401 /*
1402 * This takes a timespec, and not a timeval, so we cannot
1403 * use the do_select() helper ...
1404 */
1405 if (ts_addr) {
1406 if (time64) {
1407 if (target_to_host_timespec64(&ts, ts_addr)) {
1408 return -TARGET_EFAULT;
1409 }
1410 } else {
1411 if (target_to_host_timespec(&ts, ts_addr)) {
1412 return -TARGET_EFAULT;
1413 }
1414 }
1415 ts_ptr = &ts;
1416 } else {
1417 ts_ptr = NULL;
1418 }
1419
1420 /* Extract the two packed args for the sigset */
1421 if (arg6) {
1422 sig_ptr = &sig;
1423 sig.size = SIGSET_T_SIZE;
1424
1425 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1426 if (!arg7) {
1427 return -TARGET_EFAULT;
1428 }
1429 arg_sigset = tswapal(arg7[0]);
1430 arg_sigsize = tswapal(arg7[1]);
1431 unlock_user(arg7, arg6, 0);
1432
1433 if (arg_sigset) {
1434 sig.set = &set;
1435 if (arg_sigsize != sizeof(*target_sigset)) {
1436 /* Like the kernel, we enforce correct size sigsets */
1437 return -TARGET_EINVAL;
1438 }
1439 target_sigset = lock_user(VERIFY_READ, arg_sigset,
1440 sizeof(*target_sigset), 1);
1441 if (!target_sigset) {
1442 return -TARGET_EFAULT;
1443 }
1444 target_to_host_sigset(&set, target_sigset);
1445 unlock_user(target_sigset, arg_sigset, 0);
1446 } else {
1447 sig.set = NULL;
1448 }
1449 } else {
1450 sig_ptr = NULL;
1451 }
1452
1453 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1454 ts_ptr, sig_ptr));
1455
1456 if (!is_error(ret)) {
1457 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1458 return -TARGET_EFAULT;
1459 }
1460 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1461 return -TARGET_EFAULT;
1462 }
1463 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1464 return -TARGET_EFAULT;
1465 }
1466 if (time64) {
1467 if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1468 return -TARGET_EFAULT;
1469 }
1470 } else {
1471 if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1472 return -TARGET_EFAULT;
1473 }
1474 }
1475 }
1476 return ret;
1477 }
1478 #endif
1479
1480 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1481 defined(TARGET_NR_ppoll_time64)
1482 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1483 abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1484 {
1485 struct target_pollfd *target_pfd;
1486 unsigned int nfds = arg2;
1487 struct pollfd *pfd;
1488 unsigned int i;
1489 abi_long ret;
1490
1491 pfd = NULL;
1492 target_pfd = NULL;
1493 if (nfds) {
1494 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1495 return -TARGET_EINVAL;
1496 }
1497 target_pfd = lock_user(VERIFY_WRITE, arg1,
1498 sizeof(struct target_pollfd) * nfds, 1);
1499 if (!target_pfd) {
1500 return -TARGET_EFAULT;
1501 }
1502
1503 pfd = alloca(sizeof(struct pollfd) * nfds);
1504 for (i = 0; i < nfds; i++) {
1505 pfd[i].fd = tswap32(target_pfd[i].fd);
1506 pfd[i].events = tswap16(target_pfd[i].events);
1507 }
1508 }
1509 if (ppoll) {
1510 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1511 target_sigset_t *target_set;
1512 sigset_t _set, *set = &_set;
1513
1514 if (arg3) {
1515 if (time64) {
1516 if (target_to_host_timespec64(timeout_ts, arg3)) {
1517 unlock_user(target_pfd, arg1, 0);
1518 return -TARGET_EFAULT;
1519 }
1520 } else {
1521 if (target_to_host_timespec(timeout_ts, arg3)) {
1522 unlock_user(target_pfd, arg1, 0);
1523 return -TARGET_EFAULT;
1524 }
1525 }
1526 } else {
1527 timeout_ts = NULL;
1528 }
1529
1530 if (arg4) {
1531 if (arg5 != sizeof(target_sigset_t)) {
1532 unlock_user(target_pfd, arg1, 0);
1533 return -TARGET_EINVAL;
1534 }
1535
1536 target_set = lock_user(VERIFY_READ, arg4,
1537 sizeof(target_sigset_t), 1);
1538 if (!target_set) {
1539 unlock_user(target_pfd, arg1, 0);
1540 return -TARGET_EFAULT;
1541 }
1542 target_to_host_sigset(set, target_set);
1543 } else {
1544 set = NULL;
1545 }
1546
1547 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1548 set, SIGSET_T_SIZE));
1549
1550 if (!is_error(ret) && arg3) {
1551 if (time64) {
1552 if (host_to_target_timespec64(arg3, timeout_ts)) {
1553 return -TARGET_EFAULT;
1554 }
1555 } else {
1556 if (host_to_target_timespec(arg3, timeout_ts)) {
1557 return -TARGET_EFAULT;
1558 }
1559 }
1560 }
1561 if (arg4) {
1562 unlock_user(target_set, arg4, 0);
1563 }
1564 } else {
1565 struct timespec ts, *pts;
1566
1567 if (arg3 >= 0) {
1568 /* Convert ms to secs, ns */
1569 ts.tv_sec = arg3 / 1000;
1570 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1571 pts = &ts;
1572 } else {
1573 /* -ve poll() timeout means "infinite" */
1574 pts = NULL;
1575 }
1576 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1577 }
1578
1579 if (!is_error(ret)) {
1580 for (i = 0; i < nfds; i++) {
1581 target_pfd[i].revents = tswap16(pfd[i].revents);
1582 }
1583 }
1584 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1585 return ret;
1586 }
1587 #endif
1588
1589 static abi_long do_pipe2(int host_pipe[], int flags)
1590 {
1591 #ifdef CONFIG_PIPE2
1592 return pipe2(host_pipe, flags);
1593 #else
1594 return -ENOSYS;
1595 #endif
1596 }
1597
1598 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1599 int flags, int is_pipe2)
1600 {
1601 int host_pipe[2];
1602 abi_long ret;
1603 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1604
1605 if (is_error(ret))
1606 return get_errno(ret);
1607
1608 /* Several targets have special calling conventions for the original
1609 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1610 if (!is_pipe2) {
1611 #if defined(TARGET_ALPHA)
1612 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1613 return host_pipe[0];
1614 #elif defined(TARGET_MIPS)
1615 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1616 return host_pipe[0];
1617 #elif defined(TARGET_SH4)
1618 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1619 return host_pipe[0];
1620 #elif defined(TARGET_SPARC)
1621 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1622 return host_pipe[0];
1623 #endif
1624 }
1625
1626 if (put_user_s32(host_pipe[0], pipedes)
1627 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1628 return -TARGET_EFAULT;
1629 return get_errno(ret);
1630 }
1631
1632 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1633 abi_ulong target_addr,
1634 socklen_t len)
1635 {
1636 struct target_ip_mreqn *target_smreqn;
1637
1638 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1639 if (!target_smreqn)
1640 return -TARGET_EFAULT;
1641 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1642 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1643 if (len == sizeof(struct target_ip_mreqn))
1644 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1645 unlock_user(target_smreqn, target_addr, 0);
1646
1647 return 0;
1648 }
1649
1650 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1651 abi_ulong target_addr,
1652 socklen_t len)
1653 {
1654 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1655 sa_family_t sa_family;
1656 struct target_sockaddr *target_saddr;
1657
1658 if (fd_trans_target_to_host_addr(fd)) {
1659 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1660 }
1661
1662 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1663 if (!target_saddr)
1664 return -TARGET_EFAULT;
1665
1666 sa_family = tswap16(target_saddr->sa_family);
1667
1668 /* Oops. The caller might send a incomplete sun_path; sun_path
1669 * must be terminated by \0 (see the manual page), but
1670 * unfortunately it is quite common to specify sockaddr_un
1671 * length as "strlen(x->sun_path)" while it should be
1672 * "strlen(...) + 1". We'll fix that here if needed.
1673 * Linux kernel has a similar feature.
1674 */
1675
1676 if (sa_family == AF_UNIX) {
1677 if (len < unix_maxlen && len > 0) {
1678 char *cp = (char*)target_saddr;
1679
1680 if ( cp[len-1] && !cp[len] )
1681 len++;
1682 }
1683 if (len > unix_maxlen)
1684 len = unix_maxlen;
1685 }
1686
1687 memcpy(addr, target_saddr, len);
1688 addr->sa_family = sa_family;
1689 if (sa_family == AF_NETLINK) {
1690 struct sockaddr_nl *nladdr;
1691
1692 nladdr = (struct sockaddr_nl *)addr;
1693 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1694 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1695 } else if (sa_family == AF_PACKET) {
1696 struct target_sockaddr_ll *lladdr;
1697
1698 lladdr = (struct target_sockaddr_ll *)addr;
1699 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1700 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1701 }
1702 unlock_user(target_saddr, target_addr, 0);
1703
1704 return 0;
1705 }
1706
1707 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1708 struct sockaddr *addr,
1709 socklen_t len)
1710 {
1711 struct target_sockaddr *target_saddr;
1712
1713 if (len == 0) {
1714 return 0;
1715 }
1716 assert(addr);
1717
1718 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1719 if (!target_saddr)
1720 return -TARGET_EFAULT;
1721 memcpy(target_saddr, addr, len);
1722 if (len >= offsetof(struct target_sockaddr, sa_family) +
1723 sizeof(target_saddr->sa_family)) {
1724 target_saddr->sa_family = tswap16(addr->sa_family);
1725 }
1726 if (addr->sa_family == AF_NETLINK &&
1727 len >= sizeof(struct target_sockaddr_nl)) {
1728 struct target_sockaddr_nl *target_nl =
1729 (struct target_sockaddr_nl *)target_saddr;
1730 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1731 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1732 } else if (addr->sa_family == AF_PACKET) {
1733 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1734 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1735 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1736 } else if (addr->sa_family == AF_INET6 &&
1737 len >= sizeof(struct target_sockaddr_in6)) {
1738 struct target_sockaddr_in6 *target_in6 =
1739 (struct target_sockaddr_in6 *)target_saddr;
1740 target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1741 }
1742 unlock_user(target_saddr, target_addr, len);
1743
1744 return 0;
1745 }
1746
1747 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1748 struct target_msghdr *target_msgh)
1749 {
1750 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1751 abi_long msg_controllen;
1752 abi_ulong target_cmsg_addr;
1753 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1754 socklen_t space = 0;
1755
1756 msg_controllen = tswapal(target_msgh->msg_controllen);
1757 if (msg_controllen < sizeof (struct target_cmsghdr))
1758 goto the_end;
1759 target_cmsg_addr = tswapal(target_msgh->msg_control);
1760 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1761 target_cmsg_start = target_cmsg;
1762 if (!target_cmsg)
1763 return -TARGET_EFAULT;
1764
1765 while (cmsg && target_cmsg) {
1766 void *data = CMSG_DATA(cmsg);
1767 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1768
1769 int len = tswapal(target_cmsg->cmsg_len)
1770 - sizeof(struct target_cmsghdr);
1771
1772 space += CMSG_SPACE(len);
1773 if (space > msgh->msg_controllen) {
1774 space -= CMSG_SPACE(len);
1775 /* This is a QEMU bug, since we allocated the payload
1776 * area ourselves (unlike overflow in host-to-target
1777 * conversion, which is just the guest giving us a buffer
1778 * that's too small). It can't happen for the payload types
1779 * we currently support; if it becomes an issue in future
1780 * we would need to improve our allocation strategy to
1781 * something more intelligent than "twice the size of the
1782 * target buffer we're reading from".
1783 */
1784 qemu_log_mask(LOG_UNIMP,
1785 ("Unsupported ancillary data %d/%d: "
1786 "unhandled msg size\n"),
1787 tswap32(target_cmsg->cmsg_level),
1788 tswap32(target_cmsg->cmsg_type));
1789 break;
1790 }
1791
1792 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1793 cmsg->cmsg_level = SOL_SOCKET;
1794 } else {
1795 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1796 }
1797 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1798 cmsg->cmsg_len = CMSG_LEN(len);
1799
1800 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1801 int *fd = (int *)data;
1802 int *target_fd = (int *)target_data;
1803 int i, numfds = len / sizeof(int);
1804
1805 for (i = 0; i < numfds; i++) {
1806 __get_user(fd[i], target_fd + i);
1807 }
1808 } else if (cmsg->cmsg_level == SOL_SOCKET
1809 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1810 struct ucred *cred = (struct ucred *)data;
1811 struct target_ucred *target_cred =
1812 (struct target_ucred *)target_data;
1813
1814 __get_user(cred->pid, &target_cred->pid);
1815 __get_user(cred->uid, &target_cred->uid);
1816 __get_user(cred->gid, &target_cred->gid);
1817 } else {
1818 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1819 cmsg->cmsg_level, cmsg->cmsg_type);
1820 memcpy(data, target_data, len);
1821 }
1822
1823 cmsg = CMSG_NXTHDR(msgh, cmsg);
1824 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1825 target_cmsg_start);
1826 }
1827 unlock_user(target_cmsg, target_cmsg_addr, 0);
1828 the_end:
1829 msgh->msg_controllen = space;
1830 return 0;
1831 }
1832
1833 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1834 struct msghdr *msgh)
1835 {
1836 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1837 abi_long msg_controllen;
1838 abi_ulong target_cmsg_addr;
1839 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1840 socklen_t space = 0;
1841
1842 msg_controllen = tswapal(target_msgh->msg_controllen);
1843 if (msg_controllen < sizeof (struct target_cmsghdr))
1844 goto the_end;
1845 target_cmsg_addr = tswapal(target_msgh->msg_control);
1846 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1847 target_cmsg_start = target_cmsg;
1848 if (!target_cmsg)
1849 return -TARGET_EFAULT;
1850
1851 while (cmsg && target_cmsg) {
1852 void *data = CMSG_DATA(cmsg);
1853 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1854
1855 int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1856 int tgt_len, tgt_space;
1857
1858 /* We never copy a half-header but may copy half-data;
1859 * this is Linux's behaviour in put_cmsg(). Note that
1860 * truncation here is a guest problem (which we report
1861 * to the guest via the CTRUNC bit), unlike truncation
1862 * in target_to_host_cmsg, which is a QEMU bug.
1863 */
1864 if (msg_controllen < sizeof(struct target_cmsghdr)) {
1865 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1866 break;
1867 }
1868
1869 if (cmsg->cmsg_level == SOL_SOCKET) {
1870 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1871 } else {
1872 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1873 }
1874 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1875
1876 /* Payload types which need a different size of payload on
1877 * the target must adjust tgt_len here.
1878 */
1879 tgt_len = len;
1880 switch (cmsg->cmsg_level) {
1881 case SOL_SOCKET:
1882 switch (cmsg->cmsg_type) {
1883 case SO_TIMESTAMP:
1884 tgt_len = sizeof(struct target_timeval);
1885 break;
1886 default:
1887 break;
1888 }
1889 break;
1890 default:
1891 break;
1892 }
1893
1894 if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1895 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1896 tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1897 }
1898
1899 /* We must now copy-and-convert len bytes of payload
1900 * into tgt_len bytes of destination space. Bear in mind
1901 * that in both source and destination we may be dealing
1902 * with a truncated value!
1903 */
1904 switch (cmsg->cmsg_level) {
1905 case SOL_SOCKET:
1906 switch (cmsg->cmsg_type) {
1907 case SCM_RIGHTS:
1908 {
1909 int *fd = (int *)data;
1910 int *target_fd = (int *)target_data;
1911 int i, numfds = tgt_len / sizeof(int);
1912
1913 for (i = 0; i < numfds; i++) {
1914 __put_user(fd[i], target_fd + i);
1915 }
1916 break;
1917 }
1918 case SO_TIMESTAMP:
1919 {
1920 struct timeval *tv = (struct timeval *)data;
1921 struct target_timeval *target_tv =
1922 (struct target_timeval *)target_data;
1923
1924 if (len != sizeof(struct timeval) ||
1925 tgt_len != sizeof(struct target_timeval)) {
1926 goto unimplemented;
1927 }
1928
1929 /* copy struct timeval to target */
1930 __put_user(tv->tv_sec, &target_tv->tv_sec);
1931 __put_user(tv->tv_usec, &target_tv->tv_usec);
1932 break;
1933 }
1934 case SCM_CREDENTIALS:
1935 {
1936 struct ucred *cred = (struct ucred *)data;
1937 struct target_ucred *target_cred =
1938 (struct target_ucred *)target_data;
1939
1940 __put_user(cred->pid, &target_cred->pid);
1941 __put_user(cred->uid, &target_cred->uid);
1942 __put_user(cred->gid, &target_cred->gid);
1943 break;
1944 }
1945 default:
1946 goto unimplemented;
1947 }
1948 break;
1949
1950 case SOL_IP:
1951 switch (cmsg->cmsg_type) {
1952 case IP_TTL:
1953 {
1954 uint32_t *v = (uint32_t *)data;
1955 uint32_t *t_int = (uint32_t *)target_data;
1956
1957 if (len != sizeof(uint32_t) ||
1958 tgt_len != sizeof(uint32_t)) {
1959 goto unimplemented;
1960 }
1961 __put_user(*v, t_int);
1962 break;
1963 }
1964 case IP_RECVERR:
1965 {
1966 struct errhdr_t {
1967 struct sock_extended_err ee;
1968 struct sockaddr_in offender;
1969 };
1970 struct errhdr_t *errh = (struct errhdr_t *)data;
1971 struct errhdr_t *target_errh =
1972 (struct errhdr_t *)target_data;
1973
1974 if (len != sizeof(struct errhdr_t) ||
1975 tgt_len != sizeof(struct errhdr_t)) {
1976 goto unimplemented;
1977 }
1978 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1979 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1980 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1981 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1982 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1983 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1984 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1985 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1986 (void *) &errh->offender, sizeof(errh->offender));
1987 break;
1988 }
1989 default:
1990 goto unimplemented;
1991 }
1992 break;
1993
1994 case SOL_IPV6:
1995 switch (cmsg->cmsg_type) {
1996 case IPV6_HOPLIMIT:
1997 {
1998 uint32_t *v = (uint32_t *)data;
1999 uint32_t *t_int = (uint32_t *)target_data;
2000
2001 if (len != sizeof(uint32_t) ||
2002 tgt_len != sizeof(uint32_t)) {
2003 goto unimplemented;
2004 }
2005 __put_user(*v, t_int);
2006 break;
2007 }
2008 case IPV6_RECVERR:
2009 {
2010 struct errhdr6_t {
2011 struct sock_extended_err ee;
2012 struct sockaddr_in6 offender;
2013 };
2014 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2015 struct errhdr6_t *target_errh =
2016 (struct errhdr6_t *)target_data;
2017
2018 if (len != sizeof(struct errhdr6_t) ||
2019 tgt_len != sizeof(struct errhdr6_t)) {
2020 goto unimplemented;
2021 }
2022 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2023 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2024 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
2025 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2026 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2027 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2028 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2029 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2030 (void *) &errh->offender, sizeof(errh->offender));
2031 break;
2032 }
2033 default:
2034 goto unimplemented;
2035 }
2036 break;
2037
2038 default:
2039 unimplemented:
2040 qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2041 cmsg->cmsg_level, cmsg->cmsg_type);
2042 memcpy(target_data, data, MIN(len, tgt_len));
2043 if (tgt_len > len) {
2044 memset(target_data + len, 0, tgt_len - len);
2045 }
2046 }
2047
2048 target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2049 tgt_space = TARGET_CMSG_SPACE(tgt_len);
2050 if (msg_controllen < tgt_space) {
2051 tgt_space = msg_controllen;
2052 }
2053 msg_controllen -= tgt_space;
2054 space += tgt_space;
2055 cmsg = CMSG_NXTHDR(msgh, cmsg);
2056 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2057 target_cmsg_start);
2058 }
2059 unlock_user(target_cmsg, target_cmsg_addr, space);
2060 the_end:
2061 target_msgh->msg_controllen = tswapal(space);
2062 return 0;
2063 }
2064
2065 /* do_setsockopt() Must return target values and target errnos. */
2066 static abi_long do_setsockopt(int sockfd, int level, int optname,
2067 abi_ulong optval_addr, socklen_t optlen)
2068 {
2069 abi_long ret;
2070 int val;
2071 struct ip_mreqn *ip_mreq;
2072 struct ip_mreq_source *ip_mreq_source;
2073
2074 switch(level) {
2075 case SOL_TCP:
2076 case SOL_UDP:
2077 /* TCP and UDP options all take an 'int' value. */
2078 if (optlen < sizeof(uint32_t))
2079 return -TARGET_EINVAL;
2080
2081 if (get_user_u32(val, optval_addr))
2082 return -TARGET_EFAULT;
2083 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2084 break;
2085 case SOL_IP:
2086 switch(optname) {
2087 case IP_TOS:
2088 case IP_TTL:
2089 case IP_HDRINCL:
2090 case IP_ROUTER_ALERT:
2091 case IP_RECVOPTS:
2092 case IP_RETOPTS:
2093 case IP_PKTINFO:
2094 case IP_MTU_DISCOVER:
2095 case IP_RECVERR:
2096 case IP_RECVTTL:
2097 case IP_RECVTOS:
2098 #ifdef IP_FREEBIND
2099 case IP_FREEBIND:
2100 #endif
2101 case IP_MULTICAST_TTL:
2102 case IP_MULTICAST_LOOP:
2103 val = 0;
2104 if (optlen >= sizeof(uint32_t)) {
2105 if (get_user_u32(val, optval_addr))
2106 return -TARGET_EFAULT;
2107 } else if (optlen >= 1) {
2108 if (get_user_u8(val, optval_addr))
2109 return -TARGET_EFAULT;
2110 }
2111 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2112 break;
2113 case IP_ADD_MEMBERSHIP:
2114 case IP_DROP_MEMBERSHIP:
2115 if (optlen < sizeof (struct target_ip_mreq) ||
2116 optlen > sizeof (struct target_ip_mreqn))
2117 return -TARGET_EINVAL;
2118
2119 ip_mreq = (struct ip_mreqn *) alloca(optlen);
2120 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2121 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2122 break;
2123
2124 case IP_BLOCK_SOURCE:
2125 case IP_UNBLOCK_SOURCE:
2126 case IP_ADD_SOURCE_MEMBERSHIP:
2127 case IP_DROP_SOURCE_MEMBERSHIP:
2128 if (optlen != sizeof (struct target_ip_mreq_source))
2129 return -TARGET_EINVAL;
2130
2131 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2132 if (!ip_mreq_source) {
2133 return -TARGET_EFAULT;
2134 }
2135 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2136 unlock_user (ip_mreq_source, optval_addr, 0);
2137 break;
2138
2139 default:
2140 goto unimplemented;
2141 }
2142 break;
2143 case SOL_IPV6:
2144 switch (optname) {
2145 case IPV6_MTU_DISCOVER:
2146 case IPV6_MTU:
2147 case IPV6_V6ONLY:
2148 case IPV6_RECVPKTINFO:
2149 case IPV6_UNICAST_HOPS:
2150 case IPV6_MULTICAST_HOPS:
2151 case IPV6_MULTICAST_LOOP:
2152 case IPV6_RECVERR:
2153 case IPV6_RECVHOPLIMIT:
2154 case IPV6_2292HOPLIMIT:
2155 case IPV6_CHECKSUM:
2156 case IPV6_ADDRFORM:
2157 case IPV6_2292PKTINFO:
2158 case IPV6_RECVTCLASS:
2159 case IPV6_RECVRTHDR:
2160 case IPV6_2292RTHDR:
2161 case IPV6_RECVHOPOPTS:
2162 case IPV6_2292HOPOPTS:
2163 case IPV6_RECVDSTOPTS:
2164 case IPV6_2292DSTOPTS:
2165 case IPV6_TCLASS:
2166 case IPV6_ADDR_PREFERENCES:
2167 #ifdef IPV6_RECVPATHMTU
2168 case IPV6_RECVPATHMTU:
2169 #endif
2170 #ifdef IPV6_TRANSPARENT
2171 case IPV6_TRANSPARENT:
2172 #endif
2173 #ifdef IPV6_FREEBIND
2174 case IPV6_FREEBIND:
2175 #endif
2176 #ifdef IPV6_RECVORIGDSTADDR
2177 case IPV6_RECVORIGDSTADDR:
2178 #endif
2179 val = 0;
2180 if (optlen < sizeof(uint32_t)) {
2181 return -TARGET_EINVAL;
2182 }
2183 if (get_user_u32(val, optval_addr)) {
2184 return -TARGET_EFAULT;
2185 }
2186 ret = get_errno(setsockopt(sockfd, level, optname,
2187 &val, sizeof(val)));
2188 break;
2189 case IPV6_PKTINFO:
2190 {
2191 struct in6_pktinfo pki;
2192
2193 if (optlen < sizeof(pki)) {
2194 return -TARGET_EINVAL;
2195 }
2196
2197 if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2198 return -TARGET_EFAULT;
2199 }
2200
2201 pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2202
2203 ret = get_errno(setsockopt(sockfd, level, optname,
2204 &pki, sizeof(pki)));
2205 break;
2206 }
2207 case IPV6_ADD_MEMBERSHIP:
2208 case IPV6_DROP_MEMBERSHIP:
2209 {
2210 struct ipv6_mreq ipv6mreq;
2211
2212 if (optlen < sizeof(ipv6mreq)) {
2213 return -TARGET_EINVAL;
2214 }
2215
2216 if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2217 return -TARGET_EFAULT;
2218 }
2219
2220 ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2221
2222 ret = get_errno(setsockopt(sockfd, level, optname,
2223 &ipv6mreq, sizeof(ipv6mreq)));
2224 break;
2225 }
2226 default:
2227 goto unimplemented;
2228 }
2229 break;
2230 case SOL_ICMPV6:
2231 switch (optname) {
2232 case ICMPV6_FILTER:
2233 {
2234 struct icmp6_filter icmp6f;
2235
2236 if (optlen > sizeof(icmp6f)) {
2237 optlen = sizeof(icmp6f);
2238 }
2239
2240 if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2241 return -TARGET_EFAULT;
2242 }
2243
2244 for (val = 0; val < 8; val++) {
2245 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2246 }
2247
2248 ret = get_errno(setsockopt(sockfd, level, optname,
2249 &icmp6f, optlen));
2250 break;
2251 }
2252 default:
2253 goto unimplemented;
2254 }
2255 break;
2256 case SOL_RAW:
2257 switch (optname) {
2258 case ICMP_FILTER:
2259 case IPV6_CHECKSUM:
2260 /* those take an u32 value */
2261 if (optlen < sizeof(uint32_t)) {
2262 return -TARGET_EINVAL;
2263 }
2264
2265 if (get_user_u32(val, optval_addr)) {
2266 return -TARGET_EFAULT;
2267 }
2268 ret = get_errno(setsockopt(sockfd, level, optname,
2269 &val, sizeof(val)));
2270 break;
2271
2272 default:
2273 goto unimplemented;
2274 }
2275 break;
2276 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2277 case SOL_ALG:
2278 switch (optname) {
2279 case ALG_SET_KEY:
2280 {
2281 char *alg_key = g_malloc(optlen);
2282
2283 if (!alg_key) {
2284 return -TARGET_ENOMEM;
2285 }
2286 if (copy_from_user(alg_key, optval_addr, optlen)) {
2287 g_free(alg_key);
2288 return -TARGET_EFAULT;
2289 }
2290 ret = get_errno(setsockopt(sockfd, level, optname,
2291 alg_key, optlen));
2292 g_free(alg_key);
2293 break;
2294 }
2295 case ALG_SET_AEAD_AUTHSIZE:
2296 {
2297 ret = get_errno(setsockopt(sockfd, level, optname,
2298 NULL, optlen));
2299 break;
2300 }
2301 default:
2302 goto unimplemented;
2303 }
2304 break;
2305 #endif
2306 case TARGET_SOL_SOCKET:
2307 switch (optname) {
2308 case TARGET_SO_RCVTIMEO:
2309 {
2310 struct timeval tv;
2311
2312 optname = SO_RCVTIMEO;
2313
2314 set_timeout:
2315 if (optlen != sizeof(struct target_timeval)) {
2316 return -TARGET_EINVAL;
2317 }
2318
2319 if (copy_from_user_timeval(&tv, optval_addr)) {
2320 return -TARGET_EFAULT;
2321 }
2322
2323 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2324 &tv, sizeof(tv)));
2325 return ret;
2326 }
2327 case TARGET_SO_SNDTIMEO:
2328 optname = SO_SNDTIMEO;
2329 goto set_timeout;
2330 case TARGET_SO_ATTACH_FILTER:
2331 {
2332 struct target_sock_fprog *tfprog;
2333 struct target_sock_filter *tfilter;
2334 struct sock_fprog fprog;
2335 struct sock_filter *filter;
2336 int i;
2337
2338 if (optlen != sizeof(*tfprog)) {
2339 return -TARGET_EINVAL;
2340 }
2341 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2342 return -TARGET_EFAULT;
2343 }
2344 if (!lock_user_struct(VERIFY_READ, tfilter,
2345 tswapal(tfprog->filter), 0)) {
2346 unlock_user_struct(tfprog, optval_addr, 1);
2347 return -TARGET_EFAULT;
2348 }
2349
2350 fprog.len = tswap16(tfprog->len);
2351 filter = g_try_new(struct sock_filter, fprog.len);
2352 if (filter == NULL) {
2353 unlock_user_struct(tfilter, tfprog->filter, 1);
2354 unlock_user_struct(tfprog, optval_addr, 1);
2355 return -TARGET_ENOMEM;
2356 }
2357 for (i = 0; i < fprog.len; i++) {
2358 filter[i].code = tswap16(tfilter[i].code);
2359 filter[i].jt = tfilter[i].jt;
2360 filter[i].jf = tfilter[i].jf;
2361 filter[i].k = tswap32(tfilter[i].k);
2362 }
2363 fprog.filter = filter;
2364
2365 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2366 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2367 g_free(filter);
2368
2369 unlock_user_struct(tfilter, tfprog->filter, 1);
2370 unlock_user_struct(tfprog, optval_addr, 1);
2371 return ret;
2372 }
2373 case TARGET_SO_BINDTODEVICE:
2374 {
2375 char *dev_ifname, *addr_ifname;
2376
2377 if (optlen > IFNAMSIZ - 1) {
2378 optlen = IFNAMSIZ - 1;
2379 }
2380 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2381 if (!dev_ifname) {
2382 return -TARGET_EFAULT;
2383 }
2384 optname = SO_BINDTODEVICE;
2385 addr_ifname = alloca(IFNAMSIZ);
2386 memcpy(addr_ifname, dev_ifname, optlen);
2387 addr_ifname[optlen] = 0;
2388 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2389 addr_ifname, optlen));
2390 unlock_user (dev_ifname, optval_addr, 0);
2391 return ret;
2392 }
2393 case TARGET_SO_LINGER:
2394 {
2395 struct linger lg;
2396 struct target_linger *tlg;
2397
2398 if (optlen != sizeof(struct target_linger)) {
2399 return -TARGET_EINVAL;
2400 }
2401 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2402 return -TARGET_EFAULT;
2403 }
2404 __get_user(lg.l_onoff, &tlg->l_onoff);
2405 __get_user(lg.l_linger, &tlg->l_linger);
2406 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2407 &lg, sizeof(lg)));
2408 unlock_user_struct(tlg, optval_addr, 0);
2409 return ret;
2410 }
2411 /* Options with 'int' argument. */
2412 case TARGET_SO_DEBUG:
2413 optname = SO_DEBUG;
2414 break;
2415 case TARGET_SO_REUSEADDR:
2416 optname = SO_REUSEADDR;
2417 break;
2418 #ifdef SO_REUSEPORT
2419 case TARGET_SO_REUSEPORT:
2420 optname = SO_REUSEPORT;
2421 break;
2422 #endif
2423 case TARGET_SO_TYPE:
2424 optname = SO_TYPE;
2425 break;
2426 case TARGET_SO_ERROR:
2427 optname = SO_ERROR;
2428 break;
2429 case TARGET_SO_DONTROUTE:
2430 optname = SO_DONTROUTE;
2431 break;
2432 case TARGET_SO_BROADCAST:
2433 optname = SO_BROADCAST;
2434 break;
2435 case TARGET_SO_SNDBUF:
2436 optname = SO_SNDBUF;
2437 break;
2438 case TARGET_SO_SNDBUFFORCE:
2439 optname = SO_SNDBUFFORCE;
2440 break;
2441 case TARGET_SO_RCVBUF:
2442 optname = SO_RCVBUF;
2443 break;
2444 case TARGET_SO_RCVBUFFORCE:
2445 optname = SO_RCVBUFFORCE;
2446 break;
2447 case TARGET_SO_KEEPALIVE:
2448 optname = SO_KEEPALIVE;
2449 break;
2450 case TARGET_SO_OOBINLINE:
2451 optname = SO_OOBINLINE;
2452 break;
2453 case TARGET_SO_NO_CHECK:
2454 optname = SO_NO_CHECK;
2455 break;
2456 case TARGET_SO_PRIORITY:
2457 optname = SO_PRIORITY;
2458 break;
2459 #ifdef SO_BSDCOMPAT
2460 case TARGET_SO_BSDCOMPAT:
2461 optname = SO_BSDCOMPAT;
2462 break;
2463 #endif
2464 case TARGET_SO_PASSCRED:
2465 optname = SO_PASSCRED;
2466 break;
2467 case TARGET_SO_PASSSEC:
2468 optname = SO_PASSSEC;
2469 break;
2470 case TARGET_SO_TIMESTAMP:
2471 optname = SO_TIMESTAMP;
2472 break;
2473 case TARGET_SO_RCVLOWAT:
2474 optname = SO_RCVLOWAT;
2475 break;
2476 default:
2477 goto unimplemented;
2478 }
2479 if (optlen < sizeof(uint32_t))
2480 return -TARGET_EINVAL;
2481
2482 if (get_user_u32(val, optval_addr))
2483 return -TARGET_EFAULT;
2484 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2485 break;
2486 #ifdef SOL_NETLINK
2487 case SOL_NETLINK:
2488 switch (optname) {
2489 case NETLINK_PKTINFO:
2490 case NETLINK_ADD_MEMBERSHIP:
2491 case NETLINK_DROP_MEMBERSHIP:
2492 case NETLINK_BROADCAST_ERROR:
2493 case NETLINK_NO_ENOBUFS:
2494 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2495 case NETLINK_LISTEN_ALL_NSID:
2496 case NETLINK_CAP_ACK:
2497 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2498 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2499 case NETLINK_EXT_ACK:
2500 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2501 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2502 case NETLINK_GET_STRICT_CHK:
2503 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2504 break;
2505 default:
2506 goto unimplemented;
2507 }
2508 val = 0;
2509 if (optlen < sizeof(uint32_t)) {
2510 return -TARGET_EINVAL;
2511 }
2512 if (get_user_u32(val, optval_addr)) {
2513 return -TARGET_EFAULT;
2514 }
2515 ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2516 sizeof(val)));
2517 break;
2518 #endif /* SOL_NETLINK */
2519 default:
2520 unimplemented:
2521 qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2522 level, optname);
2523 ret = -TARGET_ENOPROTOOPT;
2524 }
2525 return ret;
2526 }
2527
2528 /* do_getsockopt() Must return target values and target errnos. */
2529 static abi_long do_getsockopt(int sockfd, int level, int optname,
2530 abi_ulong optval_addr, abi_ulong optlen)
2531 {
2532 abi_long ret;
2533 int len, val;
2534 socklen_t lv;
2535
2536 switch(level) {
2537 case TARGET_SOL_SOCKET:
2538 level = SOL_SOCKET;
2539 switch (optname) {
2540 /* These don't just return a single integer */
2541 case TARGET_SO_PEERNAME:
2542 goto unimplemented;
2543 case TARGET_SO_RCVTIMEO: {
2544 struct timeval tv;
2545 socklen_t tvlen;
2546
2547 optname = SO_RCVTIMEO;
2548
2549 get_timeout:
2550 if (get_user_u32(len, optlen)) {
2551 return -TARGET_EFAULT;
2552 }
2553 if (len < 0) {
2554 return -TARGET_EINVAL;
2555 }
2556
2557 tvlen = sizeof(tv);
2558 ret = get_errno(getsockopt(sockfd, level, optname,
2559 &tv, &tvlen));
2560 if (ret < 0) {
2561 return ret;
2562 }
2563 if (len > sizeof(struct target_timeval)) {
2564 len = sizeof(struct target_timeval);
2565 }
2566 if (copy_to_user_timeval(optval_addr, &tv)) {
2567 return -TARGET_EFAULT;
2568 }
2569 if (put_user_u32(len, optlen)) {
2570 return -TARGET_EFAULT;
2571 }
2572 break;
2573 }
2574 case TARGET_SO_SNDTIMEO:
2575 optname = SO_SNDTIMEO;
2576 goto get_timeout;
2577 case TARGET_SO_PEERCRED: {
2578 struct ucred cr;
2579 socklen_t crlen;
2580 struct target_ucred *tcr;
2581
2582 if (get_user_u32(len, optlen)) {
2583 return -TARGET_EFAULT;
2584 }
2585 if (len < 0) {
2586 return -TARGET_EINVAL;
2587 }
2588
2589 crlen = sizeof(cr);
2590 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2591 &cr, &crlen));
2592 if (ret < 0) {
2593 return ret;
2594 }
2595 if (len > crlen) {
2596 len = crlen;
2597 }
2598 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2599 return -TARGET_EFAULT;
2600 }
2601 __put_user(cr.pid, &tcr->pid);
2602 __put_user(cr.uid, &tcr->uid);
2603 __put_user(cr.gid, &tcr->gid);
2604 unlock_user_struct(tcr, optval_addr, 1);
2605 if (put_user_u32(len, optlen)) {
2606 return -TARGET_EFAULT;
2607 }
2608 break;
2609 }
2610 case TARGET_SO_PEERSEC: {
2611 char *name;
2612
2613 if (get_user_u32(len, optlen)) {
2614 return -TARGET_EFAULT;
2615 }
2616 if (len < 0) {
2617 return -TARGET_EINVAL;
2618 }
2619 name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2620 if (!name) {
2621 return -TARGET_EFAULT;
2622 }
2623 lv = len;
2624 ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2625 name, &lv));
2626 if (put_user_u32(lv, optlen)) {
2627 ret = -TARGET_EFAULT;
2628 }
2629 unlock_user(name, optval_addr, lv);
2630 break;
2631 }
2632 case TARGET_SO_LINGER:
2633 {
2634 struct linger lg;
2635 socklen_t lglen;
2636 struct target_linger *tlg;
2637
2638 if (get_user_u32(len, optlen)) {
2639 return -TARGET_EFAULT;
2640 }
2641 if (len < 0) {
2642 return -TARGET_EINVAL;
2643 }
2644
2645 lglen = sizeof(lg);
2646 ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2647 &lg, &lglen));
2648 if (ret < 0) {
2649 return ret;
2650 }
2651 if (len > lglen) {
2652 len = lglen;
2653 }
2654 if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2655 return -TARGET_EFAULT;
2656 }
2657 __put_user(lg.l_onoff, &tlg->l_onoff);
2658 __put_user(lg.l_linger, &tlg->l_linger);
2659 unlock_user_struct(tlg, optval_addr, 1);
2660 if (put_user_u32(len, optlen)) {
2661 return -TARGET_EFAULT;
2662 }
2663 break;
2664 }
2665 /* Options with 'int' argument. */
2666 case TARGET_SO_DEBUG:
2667 optname = SO_DEBUG;
2668 goto int_case;
2669 case TARGET_SO_REUSEADDR:
2670 optname = SO_REUSEADDR;
2671 goto int_case;
2672 #ifdef SO_REUSEPORT
2673 case TARGET_SO_REUSEPORT:
2674 optname = SO_REUSEPORT;
2675 goto int_case;
2676 #endif
2677 case TARGET_SO_TYPE:
2678 optname = SO_TYPE;
2679 goto int_case;
2680 case TARGET_SO_ERROR:
2681 optname = SO_ERROR;
2682 goto int_case;
2683 case TARGET_SO_DONTROUTE:
2684 optname = SO_DONTROUTE;
2685 goto int_case;
2686 case TARGET_SO_BROADCAST:
2687 optname = SO_BROADCAST;
2688 goto int_case;
2689 case TARGET_SO_SNDBUF:
2690 optname = SO_SNDBUF;
2691 goto int_case;
2692 case TARGET_SO_RCVBUF:
2693 optname = SO_RCVBUF;
2694 goto int_case;
2695 case TARGET_SO_KEEPALIVE:
2696 optname = SO_KEEPALIVE;
2697 goto int_case;
2698 case TARGET_SO_OOBINLINE:
2699 optname = SO_OOBINLINE;
2700 goto int_case;
2701 case TARGET_SO_NO_CHECK:
2702 optname = SO_NO_CHECK;
2703 goto int_case;
2704 case TARGET_SO_PRIORITY:
2705 optname = SO_PRIORITY;
2706 goto int_case;
2707 #ifdef SO_BSDCOMPAT
2708 case TARGET_SO_BSDCOMPAT:
2709 optname = SO_BSDCOMPAT;
2710 goto int_case;
2711 #endif
2712 case TARGET_SO_PASSCRED:
2713 optname = SO_PASSCRED;
2714 goto int_case;
2715 case TARGET_SO_TIMESTAMP:
2716 optname = SO_TIMESTAMP;
2717 goto int_case;
2718 case TARGET_SO_RCVLOWAT:
2719 optname = SO_RCVLOWAT;
2720 goto int_case;
2721 case TARGET_SO_ACCEPTCONN:
2722 optname = SO_ACCEPTCONN;
2723 goto int_case;
2724 case TARGET_SO_PROTOCOL:
2725 optname = SO_PROTOCOL;
2726 goto int_case;
2727 case TARGET_SO_DOMAIN:
2728 optname = SO_DOMAIN;
2729 goto int_case;
2730 default:
2731 goto int_case;
2732 }
2733 break;
2734 case SOL_TCP:
2735 case SOL_UDP:
2736 /* TCP and UDP options all take an 'int' value. */
2737 int_case:
2738 if (get_user_u32(len, optlen))
2739 return -TARGET_EFAULT;
2740 if (len < 0)
2741 return -TARGET_EINVAL;
2742 lv = sizeof(lv);
2743 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2744 if (ret < 0)
2745 return ret;
2746 if (optname == SO_TYPE) {
2747 val = host_to_target_sock_type(val);
2748 }
2749 if (len > lv)
2750 len = lv;
2751 if (len == 4) {
2752 if (put_user_u32(val, optval_addr))
2753 return -TARGET_EFAULT;
2754 } else {
2755 if (put_user_u8(val, optval_addr))
2756 return -TARGET_EFAULT;
2757 }
2758 if (put_user_u32(len, optlen))
2759 return -TARGET_EFAULT;
2760 break;
2761 case SOL_IP:
2762 switch(optname) {
2763 case IP_TOS:
2764 case IP_TTL:
2765 case IP_HDRINCL:
2766 case IP_ROUTER_ALERT:
2767 case IP_RECVOPTS:
2768 case IP_RETOPTS:
2769 case IP_PKTINFO:
2770 case IP_MTU_DISCOVER:
2771 case IP_RECVERR:
2772 case IP_RECVTOS:
2773 #ifdef IP_FREEBIND
2774 case IP_FREEBIND:
2775 #endif
2776 case IP_MULTICAST_TTL:
2777 case IP_MULTICAST_LOOP:
2778 if (get_user_u32(len, optlen))
2779 return -TARGET_EFAULT;
2780 if (len < 0)
2781 return -TARGET_EINVAL;
2782 lv = sizeof(lv);
2783 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2784 if (ret < 0)
2785 return ret;
2786 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2787 len = 1;
2788 if (put_user_u32(len, optlen)
2789 || put_user_u8(val, optval_addr))
2790 return -TARGET_EFAULT;
2791 } else {
2792 if (len > sizeof(int))
2793 len = sizeof(int);
2794 if (put_user_u32(len, optlen)
2795 || put_user_u32(val, optval_addr))
2796 return -TARGET_EFAULT;
2797 }
2798 break;
2799 default:
2800 ret = -TARGET_ENOPROTOOPT;
2801 break;
2802 }
2803 break;
2804 case SOL_IPV6:
2805 switch (optname) {
2806 case IPV6_MTU_DISCOVER:
2807 case IPV6_MTU:
2808 case IPV6_V6ONLY:
2809 case IPV6_RECVPKTINFO:
2810 case IPV6_UNICAST_HOPS:
2811 case IPV6_MULTICAST_HOPS:
2812 case IPV6_MULTICAST_LOOP:
2813 case IPV6_RECVERR:
2814 case IPV6_RECVHOPLIMIT:
2815 case IPV6_2292HOPLIMIT:
2816 case IPV6_CHECKSUM:
2817 case IPV6_ADDRFORM:
2818 case IPV6_2292PKTINFO:
2819 case IPV6_RECVTCLASS:
2820 case IPV6_RECVRTHDR:
2821 case IPV6_2292RTHDR:
2822 case IPV6_RECVHOPOPTS:
2823 case IPV6_2292HOPOPTS:
2824 case IPV6_RECVDSTOPTS:
2825 case IPV6_2292DSTOPTS:
2826 case IPV6_TCLASS:
2827 case IPV6_ADDR_PREFERENCES:
2828 #ifdef IPV6_RECVPATHMTU
2829 case IPV6_RECVPATHMTU:
2830 #endif
2831 #ifdef IPV6_TRANSPARENT
2832 case IPV6_TRANSPARENT:
2833 #endif
2834 #ifdef IPV6_FREEBIND
2835 case IPV6_FREEBIND:
2836 #endif
2837 #ifdef IPV6_RECVORIGDSTADDR
2838 case IPV6_RECVORIGDSTADDR:
2839 #endif
2840 if (get_user_u32(len, optlen))
2841 return -TARGET_EFAULT;
2842 if (len < 0)
2843 return -TARGET_EINVAL;
2844 lv = sizeof(lv);
2845 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2846 if (ret < 0)
2847 return ret;
2848 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2849 len = 1;
2850 if (put_user_u32(len, optlen)
2851 || put_user_u8(val, optval_addr))
2852 return -TARGET_EFAULT;
2853 } else {
2854 if (len > sizeof(int))
2855 len = sizeof(int);
2856 if (put_user_u32(len, optlen)
2857 || put_user_u32(val, optval_addr))
2858 return -TARGET_EFAULT;
2859 }
2860 break;
2861 default:
2862 ret = -TARGET_ENOPROTOOPT;
2863 break;
2864 }
2865 break;
2866 #ifdef SOL_NETLINK
2867 case SOL_NETLINK:
2868 switch (optname) {
2869 case NETLINK_PKTINFO:
2870 case NETLINK_BROADCAST_ERROR:
2871 case NETLINK_NO_ENOBUFS:
2872 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2873 case NETLINK_LISTEN_ALL_NSID:
2874 case NETLINK_CAP_ACK:
2875 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2876 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2877 case NETLINK_EXT_ACK:
2878 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2879 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2880 case NETLINK_GET_STRICT_CHK:
2881 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2882 if (get_user_u32(len, optlen)) {
2883 return -TARGET_EFAULT;
2884 }
2885 if (len != sizeof(val)) {
2886 return -TARGET_EINVAL;
2887 }
2888 lv = len;
2889 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2890 if (ret < 0) {
2891 return ret;
2892 }
2893 if (put_user_u32(lv, optlen)
2894 || put_user_u32(val, optval_addr)) {
2895 return -TARGET_EFAULT;
2896 }
2897 break;
2898 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2899 case NETLINK_LIST_MEMBERSHIPS:
2900 {
2901 uint32_t *results;
2902 int i;
2903 if (get_user_u32(len, optlen)) {
2904 return -TARGET_EFAULT;
2905 }
2906 if (len < 0) {
2907 return -TARGET_EINVAL;
2908 }
2909 results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2910 if (!results && len > 0) {
2911 return -TARGET_EFAULT;
2912 }
2913 lv = len;
2914 ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2915 if (ret < 0) {
2916 unlock_user(results, optval_addr, 0);
2917 return ret;
2918 }
2919 /* swap host endianess to target endianess. */
2920 for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2921 results[i] = tswap32(results[i]);
2922 }
2923 if (put_user_u32(lv, optlen)) {
2924 return -TARGET_EFAULT;
2925 }
2926 unlock_user(results, optval_addr, 0);
2927 break;
2928 }
2929 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2930 default:
2931 goto unimplemented;
2932 }
2933 break;
2934 #endif /* SOL_NETLINK */
2935 default:
2936 unimplemented:
2937 qemu_log_mask(LOG_UNIMP,
2938 "getsockopt level=%d optname=%d not yet supported\n",
2939 level, optname);
2940 ret = -TARGET_EOPNOTSUPP;
2941 break;
2942 }
2943 return ret;
2944 }
2945
2946 /* Convert target low/high pair representing file offset into the host
2947 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2948 * as the kernel doesn't handle them either.
2949 */
2950 static void target_to_host_low_high(abi_ulong tlow,
2951 abi_ulong thigh,
2952 unsigned long *hlow,
2953 unsigned long *hhigh)
2954 {
2955 uint64_t off = tlow |
2956 ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2957 TARGET_LONG_BITS / 2;
2958
2959 *hlow = off;
2960 *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2961 }
2962
2963 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2964 abi_ulong count, int copy)
2965 {
2966 struct target_iovec *target_vec;
2967 struct iovec *vec;
2968 abi_ulong total_len, max_len;
2969 int i;
2970 int err = 0;
2971 bool bad_address = false;
2972
2973 if (count == 0) {
2974 errno = 0;
2975 return NULL;
2976 }
2977 if (count > IOV_MAX) {
2978 errno = EINVAL;
2979 return NULL;
2980 }
2981
2982 vec = g_try_new0(struct iovec, count);
2983 if (vec == NULL) {
2984 errno = ENOMEM;
2985 return NULL;
2986 }
2987
2988 target_vec = lock_user(VERIFY_READ, target_addr,
2989 count * sizeof(struct target_iovec), 1);
2990 if (target_vec == NULL) {
2991 err = EFAULT;
2992 goto fail2;
2993 }
2994
2995 /* ??? If host page size > target page size, this will result in a
2996 value larger than what we can actually support. */
2997 max_len = 0x7fffffff & TARGET_PAGE_MASK;
2998 total_len = 0;
2999
3000 for (i = 0; i < count; i++) {
3001 abi_ulong base = tswapal(target_vec[i].iov_base);
3002 abi_long len = tswapal(target_vec[i].iov_len);
3003
3004 if (len < 0) {
3005 err = EINVAL;
3006 goto fail;
3007 } else if (len == 0) {
3008 /* Zero length pointer is ignored. */
3009 vec[i].iov_base = 0;
3010 } else {
3011 vec[i].iov_base = lock_user(type, base, len, copy);
3012 /* If the first buffer pointer is bad, this is a fault. But
3013 * subsequent bad buffers will result in a partial write; this
3014 * is realized by filling the vector with null pointers and
3015 * zero lengths. */
3016 if (!vec[i].iov_base) {
3017 if (i == 0) {
3018 err = EFAULT;
3019 goto fail;
3020 } else {
3021 bad_address = true;
3022 }
3023 }
3024 if (bad_address) {
3025 len = 0;
3026 }
3027 if (len > max_len - total_len) {
3028 len = max_len - total_len;
3029 }
3030 }
3031 vec[i].iov_len = len;
3032 total_len += len;
3033 }
3034
3035 unlock_user(target_vec, target_addr, 0);
3036 return vec;
3037
3038 fail:
3039 while (--i >= 0) {
3040 if (tswapal(target_vec[i].iov_len) > 0) {
3041 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3042 }
3043 }
3044 unlock_user(target_vec, target_addr, 0);
3045 fail2:
3046 g_free(vec);
3047 errno = err;
3048 return NULL;
3049 }
3050
3051 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3052 abi_ulong count, int copy)
3053 {
3054 struct target_iovec *target_vec;
3055 int i;
3056
3057 target_vec = lock_user(VERIFY_READ, target_addr,
3058 count * sizeof(struct target_iovec), 1);
3059 if (target_vec) {
3060 for (i = 0; i < count; i++) {
3061 abi_ulong base = tswapal(target_vec[i].iov_base);
3062 abi_long len = tswapal(target_vec[i].iov_len);
3063 if (len < 0) {
3064 break;
3065 }
3066 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3067 }
3068 unlock_user(target_vec, target_addr, 0);
3069 }
3070
3071 g_free(vec);
3072 }
3073
3074 static inline int target_to_host_sock_type(int *type)
3075 {
3076 int host_type = 0;
3077 int target_type = *type;
3078
3079 switch (target_type & TARGET_SOCK_TYPE_MASK) {
3080 case TARGET_SOCK_DGRAM:
3081 host_type = SOCK_DGRAM;
3082 break;
3083 case TARGET_SOCK_STREAM:
3084 host_type = SOCK_STREAM;
3085 break;
3086 default:
3087 host_type = target_type & TARGET_SOCK_TYPE_MASK;
3088 break;
3089 }
3090 if (target_type & TARGET_SOCK_CLOEXEC) {
3091 #if defined(SOCK_CLOEXEC)
3092 host_type |= SOCK_CLOEXEC;
3093 #else
3094 return -TARGET_EINVAL;
3095 #endif
3096 }
3097 if (target_type & TARGET_SOCK_NONBLOCK) {
3098 #if defined(SOCK_NONBLOCK)
3099 host_type |= SOCK_NONBLOCK;
3100 #elif !defined(O_NONBLOCK)
3101 return -TARGET_EINVAL;
3102 #endif
3103 }
3104 *type = host_type;
3105 return 0;
3106 }
3107
3108 /* Try to emulate socket type flags after socket creation. */
3109 static int sock_flags_fixup(int fd, int target_type)
3110 {
3111 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3112 if (target_type & TARGET_SOCK_NONBLOCK) {
3113 int flags = fcntl(fd, F_GETFL);
3114 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3115 close(fd);
3116 return -TARGET_EINVAL;
3117 }
3118 }
3119 #endif
3120 return fd;
3121 }
3122
3123 /* do_socket() Must return target values and target errnos. */
3124 static abi_long do_socket(int domain, int type, int protocol)
3125 {
3126 int target_type = type;
3127 int ret;
3128
3129 ret = target_to_host_sock_type(&type);
3130 if (ret) {
3131 return ret;
3132 }
3133
3134 if (domain == PF_NETLINK && !(
3135 #ifdef CONFIG_RTNETLINK
3136 protocol == NETLINK_ROUTE ||
3137 #endif
3138 protocol == NETLINK_KOBJECT_UEVENT ||
3139 protocol == NETLINK_AUDIT)) {
3140 return -TARGET_EPROTONOSUPPORT;
3141 }
3142
3143 if (domain == AF_PACKET ||
3144 (domain == AF_INET && type == SOCK_PACKET)) {
3145 protocol = tswap16(protocol);
3146 }
3147
3148 ret = get_errno(socket(domain, type, protocol));
3149 if (ret >= 0) {
3150 ret = sock_flags_fixup(ret, target_type);
3151 if (type == SOCK_PACKET) {
3152 /* Manage an obsolete case :
3153 * if socket type is SOCK_PACKET, bind by name
3154 */
3155 fd_trans_register(ret, &target_packet_trans);
3156 } else if (domain == PF_NETLINK) {
3157 switch (protocol) {
3158 #ifdef CONFIG_RTNETLINK
3159 case NETLINK_ROUTE:
3160 fd_trans_register(ret, &target_netlink_route_trans);
3161 break;
3162 #endif
3163 case NETLINK_KOBJECT_UEVENT:
3164 /* nothing to do: messages are strings */
3165 break;
3166 case NETLINK_AUDIT:
3167 fd_trans_register(ret, &target_netlink_audit_trans);
3168 break;
3169 default:
3170 g_assert_not_reached();
3171 }
3172 }
3173 }
3174 return ret;
3175 }
3176
3177 /* do_bind() Must return target values and target errnos. */
3178 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3179 socklen_t addrlen)
3180 {
3181 void *addr;
3182 abi_long ret;
3183
3184 if ((int)addrlen < 0) {
3185 return -TARGET_EINVAL;
3186 }
3187
3188 addr = alloca(addrlen+1);
3189
3190 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3191 if (ret)
3192 return ret;
3193
3194 return get_errno(bind(sockfd, addr, addrlen));
3195 }
3196
3197 /* do_connect() Must return target values and target errnos. */
3198 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3199 socklen_t addrlen)
3200 {
3201 void *addr;
3202 abi_long ret;
3203
3204 if ((int)addrlen < 0) {
3205 return -TARGET_EINVAL;
3206 }
3207
3208 addr = alloca(addrlen+1);
3209
3210 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3211 if (ret)
3212 return ret;
3213
3214 return get_errno(safe_connect(sockfd, addr, addrlen));
3215 }
3216
3217 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3218 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3219 int flags, int send)
3220 {
3221 abi_long ret, len;
3222 struct msghdr msg;
3223 abi_ulong count;
3224 struct iovec *vec;
3225 abi_ulong target_vec;
3226
3227 if (msgp->msg_name) {
3228 msg.msg_namelen = tswap32(msgp->msg_namelen);
3229 msg.msg_name = alloca(msg.msg_namelen+1);
3230 ret = target_to_host_sockaddr(fd, msg.msg_name,
3231 tswapal(msgp->msg_name),
3232 msg.msg_namelen);
3233 if (ret == -TARGET_EFAULT) {
3234 /* For connected sockets msg_name and msg_namelen must
3235 * be ignored, so returning EFAULT immediately is wrong.
3236 * Instead, pass a bad msg_name to the host kernel, and
3237 * let it decide whether to return EFAULT or not.
3238 */
3239 msg.msg_name = (void *)-1;
3240 } else if (ret) {
3241 goto out2;
3242 }
3243 } else {
3244 msg.msg_name = NULL;
3245 msg.msg_namelen = 0;
3246 }
3247 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3248 msg.msg_control = alloca(msg.msg_controllen);
3249 memset(msg.msg_control, 0, msg.msg_controllen);
3250
3251 msg.msg_flags = tswap32(msgp->msg_flags);
3252
3253 count = tswapal(msgp->msg_iovlen);
3254 target_vec = tswapal(msgp->msg_iov);
3255
3256 if (count > IOV_MAX) {
3257 /* sendrcvmsg returns a different errno for this condition than
3258 * readv/writev, so we must catch it here before lock_iovec() does.
3259 */
3260 ret = -TARGET_EMSGSIZE;
3261 goto out2;
3262 }
3263
3264 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3265 target_vec, count, send);
3266 if (vec == NULL) {
3267 ret = -host_to_target_errno(errno);
3268 goto out2;
3269 }
3270 msg.msg_iovlen = count;
3271 msg.msg_iov = vec;
3272
3273 if (send) {
3274 if (fd_trans_target_to_host_data(fd)) {
3275 void *host_msg;
3276
3277 host_msg = g_malloc(msg.msg_iov->iov_len);
3278 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3279 ret = fd_trans_target_to_host_data(fd)(host_msg,
3280 msg.msg_iov->iov_len);
3281 if (ret >= 0) {
3282 msg.msg_iov->iov_base = host_msg;
3283 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3284 }
3285 g_free(host_msg);
3286 } else {
3287 ret = target_to_host_cmsg(&msg, msgp);
3288 if (ret == 0) {
3289 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3290 }
3291 }
3292 } else {
3293 ret = get_errno(safe_recvmsg(fd, &msg, flags));
3294 if (!is_error(ret)) {
3295 len = ret;
3296 if (fd_trans_host_to_target_data(fd)) {
3297 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3298 MIN(msg.msg_iov->iov_len, len));
3299 } else {
3300 ret = host_to_target_cmsg(msgp, &msg);
3301 }
3302 if (!is_error(ret)) {
3303 msgp->msg_namelen = tswap32(msg.msg_namelen);
3304 msgp->msg_flags = tswap32(msg.msg_flags);
3305 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3306 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3307 msg.msg_name, msg.msg_namelen);
3308 if (ret) {
3309 goto out;
3310 }
3311 }
3312
3313 ret = len;
3314 }
3315 }
3316 }
3317
3318 out:
3319 unlock_iovec(vec, target_vec, count, !send);
3320 out2:
3321 return ret;
3322 }
3323
3324 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3325 int flags, int send)
3326 {
3327 abi_long ret;
3328 struct target_msghdr *msgp;
3329
3330 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3331 msgp,
3332 target_msg,
3333 send ? 1 : 0)) {
3334 return -TARGET_EFAULT;
3335 }
3336 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3337 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3338 return ret;
3339 }
3340
3341 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3342 * so it might not have this *mmsg-specific flag either.
3343 */
3344 #ifndef MSG_WAITFORONE
3345 #define MSG_WAITFORONE 0x10000
3346 #endif
3347
3348 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3349 unsigned int vlen, unsigned int flags,
3350 int send)
3351 {
3352 struct target_mmsghdr *mmsgp;
3353 abi_long ret = 0;
3354 int i;
3355
3356 if (vlen > UIO_MAXIOV) {
3357 vlen = UIO_MAXIOV;
3358 }
3359
3360 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3361 if (!mmsgp) {
3362 return -TARGET_EFAULT;
3363 }
3364
3365 for (i = 0; i < vlen; i++) {
3366 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3367 if (is_error(ret)) {
3368 break;
3369 }
3370 mmsgp[i].msg_len = tswap32(ret);
3371 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3372 if (flags & MSG_WAITFORONE) {
3373 flags |= MSG_DONTWAIT;
3374 }
3375 }
3376
3377 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3378
3379 /* Return number of datagrams sent if we sent any at all;
3380 * otherwise return the error.
3381 */
3382 if (i) {
3383 return i;
3384 }
3385 return ret;
3386 }
3387
3388 /* do_accept4() Must return target values and target errnos. */
3389 static abi_long do_accept4(int fd, abi_ulong target_addr,
3390 abi_ulong target_addrlen_addr, int flags)
3391 {
3392 socklen_t addrlen, ret_addrlen;
3393 void *addr;
3394 abi_long ret;
3395 int host_flags;
3396
3397 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3398
3399 if (target_addr == 0) {
3400 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3401 }
3402
3403 /* linux returns EFAULT if addrlen pointer is invalid */
3404 if (get_user_u32(addrlen, target_addrlen_addr))
3405 return -TARGET_EFAULT;
3406
3407 if ((int)addrlen < 0) {
3408 return -TARGET_EINVAL;
3409 }
3410
3411 if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3412 return -TARGET_EFAULT;
3413 }
3414
3415 addr = alloca(addrlen);
3416
3417 ret_addrlen = addrlen;
3418 ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3419 if (!is_error(ret)) {
3420 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3421 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3422 ret = -TARGET_EFAULT;
3423 }
3424 }
3425 return ret;
3426 }
3427
3428 /* do_getpeername() Must return target values and target errnos. */
3429 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3430 abi_ulong target_addrlen_addr)
3431 {
3432 socklen_t addrlen, ret_addrlen;
3433 void *addr;
3434 abi_long ret;
3435
3436 if (get_user_u32(addrlen, target_addrlen_addr))
3437 return -TARGET_EFAULT;
3438
3439 if ((int)addrlen < 0) {
3440 return -TARGET_EINVAL;
3441 }
3442
3443 if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3444 return -TARGET_EFAULT;
3445 }
3446
3447 addr = alloca(addrlen);
3448
3449 ret_addrlen = addrlen;
3450 ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3451 if (!is_error(ret)) {
3452 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3453 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3454 ret = -TARGET_EFAULT;
3455 }
3456 }
3457 return ret;
3458 }
3459
3460 /* do_getsockname() Must return target values and target errnos. */
3461 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3462 abi_ulong target_addrlen_addr)
3463 {
3464 socklen_t addrlen, ret_addrlen;
3465 void *addr;
3466 abi_long ret;
3467
3468 if (get_user_u32(addrlen, target_addrlen_addr))
3469 return -TARGET_EFAULT;
3470
3471 if ((int)addrlen < 0) {
3472 return -TARGET_EINVAL;
3473 }
3474
3475 if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3476 return -TARGET_EFAULT;
3477 }
3478
3479 addr = alloca(addrlen);
3480
3481 ret_addrlen = addrlen;
3482 ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3483 if (!is_error(ret)) {
3484 host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3485 if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3486 ret = -TARGET_EFAULT;
3487 }
3488 }
3489 return ret;
3490 }
3491
3492 /* do_socketpair() Must return target values and target errnos. */
3493 static abi_long do_socketpair(int domain, int type, int protocol,
3494 abi_ulong target_tab_addr)
3495 {
3496 int tab[2];
3497 abi_long ret;
3498
3499 target_to_host_sock_type(&type);
3500
3501 ret = get_errno(socketpair(domain, type, protocol, tab));
3502 if (!is_error(ret)) {
3503 if (put_user_s32(tab[0], target_tab_addr)
3504 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3505 ret = -TARGET_EFAULT;
3506 }
3507 return ret;
3508 }
3509
3510 /* do_sendto() Must return target values and target errnos. */
3511 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3512 abi_ulong target_addr, socklen_t addrlen)
3513 {
3514 void *addr;
3515 void *host_msg;
3516 void *copy_msg = NULL;
3517 abi_long ret;
3518
3519 if ((int)addrlen < 0) {
3520 return -TARGET_EINVAL;
3521 }
3522
3523 host_msg = lock_user(VERIFY_READ, msg, len, 1);
3524 if (!host_msg)
3525 return -TARGET_EFAULT;
3526 if (fd_trans_target_to_host_data(fd)) {
3527 copy_msg = host_msg;
3528 host_msg = g_malloc(len);
3529 memcpy(host_msg, copy_msg, len);
3530 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3531 if (ret < 0) {
3532 goto fail;
3533 }
3534 }
3535 if (target_addr) {
3536 addr = alloca(addrlen+1);
3537 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3538 if (ret) {
3539 goto fail;
3540 }
3541 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3542 } else {
3543 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3544 }
3545 fail:
3546 if (copy_msg) {
3547 g_free(host_msg);
3548 host_msg = copy_msg;
3549 }
3550 unlock_user(host_msg, msg, 0);
3551 return ret;
3552 }
3553
3554 /* do_recvfrom() Must return target values and target errnos. */
3555 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3556 abi_ulong target_addr,
3557 abi_ulong target_addrlen)
3558 {
3559 socklen_t addrlen, ret_addrlen;
3560 void *addr;
3561 void *host_msg;
3562 abi_long ret;
3563
3564 if (!msg) {
3565 host_msg = NULL;
3566 } else {
3567 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3568 if (!host_msg) {
3569 return -TARGET_EFAULT;
3570 }
3571 }
3572 if (target_addr) {
3573 if (get_user_u32(addrlen, target_addrlen)) {
3574 ret = -TARGET_EFAULT;
3575 goto fail;
3576 }
3577 if ((int)addrlen < 0) {
3578 ret = -TARGET_EINVAL;
3579 goto fail;
3580 }
3581 addr = alloca(addrlen);
3582 ret_addrlen = addrlen;
3583 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3584 addr, &ret_addrlen));
3585 } else {
3586 addr = NULL; /* To keep compiler quiet. */
3587 addrlen = 0; /* To keep compiler quiet. */
3588 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3589 }
3590 if (!is_error(ret)) {
3591 if (fd_trans_host_to_target_data(fd)) {
3592 abi_long trans;
3593 trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3594 if (is_error(trans)) {
3595 ret = trans;
3596 goto fail;
3597 }
3598 }
3599 if (target_addr) {
3600 host_to_target_sockaddr(target_addr, addr,
3601 MIN(addrlen, ret_addrlen));
3602 if (put_user_u32(ret_addrlen, target_addrlen)) {
3603 ret = -TARGET_EFAULT;
3604 goto fail;
3605 }
3606 }
3607 unlock_user(host_msg, msg, len);
3608 } else {
3609 fail:
3610 unlock_user(host_msg, msg, 0);
3611 }
3612 return ret;
3613 }
3614
3615 #ifdef TARGET_NR_socketcall
3616 /* do_socketcall() must return target values and target errnos. */
3617 static abi_long do_socketcall(int num, abi_ulong vptr)
3618 {
3619 static const unsigned nargs[] = { /* number of arguments per operation */
3620 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */
3621 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */
3622 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */
3623 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */
3624 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */
3625 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3626 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3627 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */
3628 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */
3629 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */
3630 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */
3631 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */
3632 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */
3633 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3634 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3635 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */
3636 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */
3637 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */
3638 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */
3639 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */
3640 };
3641 abi_long a[6]; /* max 6 args */
3642 unsigned i;
3643
3644 /* check the range of the first argument num */
3645 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3646 if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3647 return -TARGET_EINVAL;
3648 }
3649 /* ensure we have space for args */
3650 if (nargs[num] > ARRAY_SIZE(a)) {
3651 return -TARGET_EINVAL;
3652 }
3653 /* collect the arguments in a[] according to nargs[] */
3654 for (i = 0; i < nargs[num]; ++i) {
3655 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3656 return -TARGET_EFAULT;
3657 }
3658 }
3659 /* now when we have the args, invoke the appropriate underlying function */
3660 switch (num) {
3661 case TARGET_SYS_SOCKET: /* domain, type, protocol */
3662 return do_socket(a[0], a[1], a[2]);
3663 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3664 return do_bind(a[0], a[1], a[2]);
3665 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3666 return do_connect(a[0], a[1], a[2]);
3667 case TARGET_SYS_LISTEN: /* sockfd, backlog */
3668 return get_errno(listen(a[0], a[1]));
3669 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3670 return do_accept4(a[0], a[1], a[2], 0);
3671 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3672 return do_getsockname(a[0], a[1], a[2]);
3673 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3674 return do_getpeername(a[0], a[1], a[2]);
3675 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3676 return do_socketpair(a[0], a[1], a[2], a[3]);
3677 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3678 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3679 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3680 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3681 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3682 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3683 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3684 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3685 case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3686 return get_errno(shutdown(a[0], a[1]));
3687 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3688 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3689 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3690 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3691 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3692 return do_sendrecvmsg(a[0], a[1], a[2], 1);
3693 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3694 return do_sendrecvmsg(a[0], a[1], a[2], 0);
3695 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3696 return do_accept4(a[0], a[1], a[2], a[3]);
3697 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3698 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3699 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3700 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3701 default:
3702 qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3703 return -TARGET_EINVAL;
3704 }
3705 }
3706 #endif
3707