cirrus: fix off-by-one in cirrus_bitblt_rop_bkwd_transp_*_16
[qemu.git] / linux-user / syscall.c
1 /*
2 * Linux syscalls
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #include <sys/timex.h>
39 #ifdef __ia64__
40 int __clone2(int (*fn)(void *), void *child_stack_base,
41 size_t stack_size, int flags, void *arg, ...);
42 #endif
43 #include <sys/socket.h>
44 #include <sys/un.h>
45 #include <sys/uio.h>
46 #include <poll.h>
47 #include <sys/times.h>
48 #include <sys/shm.h>
49 #include <sys/sem.h>
50 #include <sys/statfs.h>
51 #include <time.h>
52 #include <utime.h>
53 #include <sys/sysinfo.h>
54 #include <sys/signalfd.h>
55 //#include <sys/user.h>
56 #include <netinet/ip.h>
57 #include <netinet/tcp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/errqueue.h>
62 #include "qemu-common.h"
63 #ifdef CONFIG_TIMERFD
64 #include <sys/timerfd.h>
65 #endif
66 #ifdef TARGET_GPROF
67 #include <sys/gmon.h>
68 #endif
69 #ifdef CONFIG_EVENTFD
70 #include <sys/eventfd.h>
71 #endif
72 #ifdef CONFIG_EPOLL
73 #include <sys/epoll.h>
74 #endif
75 #ifdef CONFIG_ATTR
76 #include "qemu/xattr.h"
77 #endif
78 #ifdef CONFIG_SENDFILE
79 #include <sys/sendfile.h>
80 #endif
81
82 #define termios host_termios
83 #define winsize host_winsize
84 #define termio host_termio
85 #define sgttyb host_sgttyb /* same as target */
86 #define tchars host_tchars /* same as target */
87 #define ltchars host_ltchars /* same as target */
88
89 #include <linux/termios.h>
90 #include <linux/unistd.h>
91 #include <linux/cdrom.h>
92 #include <linux/hdreg.h>
93 #include <linux/soundcard.h>
94 #include <linux/kd.h>
95 #include <linux/mtio.h>
96 #include <linux/fs.h>
97 #if defined(CONFIG_FIEMAP)
98 #include <linux/fiemap.h>
99 #endif
100 #include <linux/fb.h>
101 #include <linux/vt.h>
102 #include <linux/dm-ioctl.h>
103 #include <linux/reboot.h>
104 #include <linux/route.h>
105 #include <linux/filter.h>
106 #include <linux/blkpg.h>
107 #include <netpacket/packet.h>
108 #include <linux/netlink.h>
109 #ifdef CONFIG_RTNETLINK
110 #include <linux/rtnetlink.h>
111 #include <linux/if_bridge.h>
112 #endif
113 #include <linux/audit.h>
114 #include "linux_loop.h"
115 #include "uname.h"
116
117 #include "qemu.h"
118
119 #ifndef CLONE_IO
120 #define CLONE_IO 0x80000000 /* Clone io context */
121 #endif
122
123 /* We can't directly call the host clone syscall, because this will
124 * badly confuse libc (breaking mutexes, for example). So we must
125 * divide clone flags into:
126 * * flag combinations that look like pthread_create()
127 * * flag combinations that look like fork()
128 * * flags we can implement within QEMU itself
129 * * flags we can't support and will return an error for
130 */
131 /* For thread creation, all these flags must be present; for
132 * fork, none must be present.
133 */
134 #define CLONE_THREAD_FLAGS \
135 (CLONE_VM | CLONE_FS | CLONE_FILES | \
136 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
137
138 /* These flags are ignored:
139 * CLONE_DETACHED is now ignored by the kernel;
140 * CLONE_IO is just an optimisation hint to the I/O scheduler
141 */
142 #define CLONE_IGNORED_FLAGS \
143 (CLONE_DETACHED | CLONE_IO)
144
145 /* Flags for fork which we can implement within QEMU itself */
146 #define CLONE_OPTIONAL_FORK_FLAGS \
147 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
148 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
149
150 /* Flags for thread creation which we can implement within QEMU itself */
151 #define CLONE_OPTIONAL_THREAD_FLAGS \
152 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
153 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
154
155 #define CLONE_INVALID_FORK_FLAGS \
156 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
157
158 #define CLONE_INVALID_THREAD_FLAGS \
159 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
160 CLONE_IGNORED_FLAGS))
161
162 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
163 * have almost all been allocated. We cannot support any of
164 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
165 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
166 * The checks against the invalid thread masks above will catch these.
167 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
168 */
169
170 //#define DEBUG
171 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
172 * once. This exercises the codepaths for restart.
173 */
174 //#define DEBUG_ERESTARTSYS
175
176 //#include <linux/msdos_fs.h>
177 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
178 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
179
180 #undef _syscall0
181 #undef _syscall1
182 #undef _syscall2
183 #undef _syscall3
184 #undef _syscall4
185 #undef _syscall5
186 #undef _syscall6
187
188 #define _syscall0(type,name) \
189 static type name (void) \
190 { \
191 return syscall(__NR_##name); \
192 }
193
194 #define _syscall1(type,name,type1,arg1) \
195 static type name (type1 arg1) \
196 { \
197 return syscall(__NR_##name, arg1); \
198 }
199
200 #define _syscall2(type,name,type1,arg1,type2,arg2) \
201 static type name (type1 arg1,type2 arg2) \
202 { \
203 return syscall(__NR_##name, arg1, arg2); \
204 }
205
206 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
207 static type name (type1 arg1,type2 arg2,type3 arg3) \
208 { \
209 return syscall(__NR_##name, arg1, arg2, arg3); \
210 }
211
212 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
213 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
214 { \
215 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
216 }
217
218 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
219 type5,arg5) \
220 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
221 { \
222 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
223 }
224
225
226 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
227 type5,arg5,type6,arg6) \
228 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
229 type6 arg6) \
230 { \
231 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
232 }
233
234
235 #define __NR_sys_uname __NR_uname
236 #define __NR_sys_getcwd1 __NR_getcwd
237 #define __NR_sys_getdents __NR_getdents
238 #define __NR_sys_getdents64 __NR_getdents64
239 #define __NR_sys_getpriority __NR_getpriority
240 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
241 #define __NR_sys_syslog __NR_syslog
242 #define __NR_sys_futex __NR_futex
243 #define __NR_sys_inotify_init __NR_inotify_init
244 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
245 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
246
247 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
248 defined(__s390x__)
249 #define __NR__llseek __NR_lseek
250 #endif
251
252 /* Newer kernel ports have llseek() instead of _llseek() */
253 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
254 #define TARGET_NR__llseek TARGET_NR_llseek
255 #endif
256
257 #ifdef __NR_gettid
258 _syscall0(int, gettid)
259 #else
260 /* This is a replacement for the host gettid() and must return a host
261 errno. */
262 static int gettid(void) {
263 return -ENOSYS;
264 }
265 #endif
266 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
267 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
268 #endif
269 #if !defined(__NR_getdents) || \
270 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
271 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
272 #endif
273 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
274 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
275 loff_t *, res, uint, wh);
276 #endif
277 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
278 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
279 #ifdef __NR_exit_group
280 _syscall1(int,exit_group,int,error_code)
281 #endif
282 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
283 _syscall1(int,set_tid_address,int *,tidptr)
284 #endif
285 #if defined(TARGET_NR_futex) && defined(__NR_futex)
286 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
287 const struct timespec *,timeout,int *,uaddr2,int,val3)
288 #endif
289 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
290 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
291 unsigned long *, user_mask_ptr);
292 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
293 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
294 unsigned long *, user_mask_ptr);
295 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
296 void *, arg);
297 _syscall2(int, capget, struct __user_cap_header_struct *, header,
298 struct __user_cap_data_struct *, data);
299 _syscall2(int, capset, struct __user_cap_header_struct *, header,
300 struct __user_cap_data_struct *, data);
301 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
302 _syscall2(int, ioprio_get, int, which, int, who)
303 #endif
304 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
305 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
306 #endif
307 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
308 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
309 #endif
310
311 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
312 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
313 unsigned long, idx1, unsigned long, idx2)
314 #endif
315
316 static bitmask_transtbl fcntl_flags_tbl[] = {
317 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
318 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
319 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
320 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
321 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
322 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
323 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
324 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
325 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
326 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
327 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
328 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
329 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
330 #if defined(O_DIRECT)
331 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
332 #endif
333 #if defined(O_NOATIME)
334 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
335 #endif
336 #if defined(O_CLOEXEC)
337 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
338 #endif
339 #if defined(O_PATH)
340 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
341 #endif
342 /* Don't terminate the list prematurely on 64-bit host+guest. */
343 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
344 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
345 #endif
346 { 0, 0, 0, 0 }
347 };
348
349 enum {
350 QEMU_IFLA_BR_UNSPEC,
351 QEMU_IFLA_BR_FORWARD_DELAY,
352 QEMU_IFLA_BR_HELLO_TIME,
353 QEMU_IFLA_BR_MAX_AGE,
354 QEMU_IFLA_BR_AGEING_TIME,
355 QEMU_IFLA_BR_STP_STATE,
356 QEMU_IFLA_BR_PRIORITY,
357 QEMU_IFLA_BR_VLAN_FILTERING,
358 QEMU_IFLA_BR_VLAN_PROTOCOL,
359 QEMU_IFLA_BR_GROUP_FWD_MASK,
360 QEMU_IFLA_BR_ROOT_ID,
361 QEMU_IFLA_BR_BRIDGE_ID,
362 QEMU_IFLA_BR_ROOT_PORT,
363 QEMU_IFLA_BR_ROOT_PATH_COST,
364 QEMU_IFLA_BR_TOPOLOGY_CHANGE,
365 QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
366 QEMU_IFLA_BR_HELLO_TIMER,
367 QEMU_IFLA_BR_TCN_TIMER,
368 QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER,
369 QEMU_IFLA_BR_GC_TIMER,
370 QEMU_IFLA_BR_GROUP_ADDR,
371 QEMU_IFLA_BR_FDB_FLUSH,
372 QEMU_IFLA_BR_MCAST_ROUTER,
373 QEMU_IFLA_BR_MCAST_SNOOPING,
374 QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR,
375 QEMU_IFLA_BR_MCAST_QUERIER,
376 QEMU_IFLA_BR_MCAST_HASH_ELASTICITY,
377 QEMU_IFLA_BR_MCAST_HASH_MAX,
378 QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT,
379 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT,
380 QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL,
381 QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL,
382 QEMU_IFLA_BR_MCAST_QUERIER_INTVL,
383 QEMU_IFLA_BR_MCAST_QUERY_INTVL,
384 QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL,
385 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL,
386 QEMU_IFLA_BR_NF_CALL_IPTABLES,
387 QEMU_IFLA_BR_NF_CALL_IP6TABLES,
388 QEMU_IFLA_BR_NF_CALL_ARPTABLES,
389 QEMU_IFLA_BR_VLAN_DEFAULT_PVID,
390 QEMU_IFLA_BR_PAD,
391 QEMU_IFLA_BR_VLAN_STATS_ENABLED,
392 QEMU_IFLA_BR_MCAST_STATS_ENABLED,
393 QEMU___IFLA_BR_MAX,
394 };
395
396 enum {
397 QEMU_IFLA_UNSPEC,
398 QEMU_IFLA_ADDRESS,
399 QEMU_IFLA_BROADCAST,
400 QEMU_IFLA_IFNAME,
401 QEMU_IFLA_MTU,
402 QEMU_IFLA_LINK,
403 QEMU_IFLA_QDISC,
404 QEMU_IFLA_STATS,
405 QEMU_IFLA_COST,
406 QEMU_IFLA_PRIORITY,
407 QEMU_IFLA_MASTER,
408 QEMU_IFLA_WIRELESS,
409 QEMU_IFLA_PROTINFO,
410 QEMU_IFLA_TXQLEN,
411 QEMU_IFLA_MAP,
412 QEMU_IFLA_WEIGHT,
413 QEMU_IFLA_OPERSTATE,
414 QEMU_IFLA_LINKMODE,
415 QEMU_IFLA_LINKINFO,
416 QEMU_IFLA_NET_NS_PID,
417 QEMU_IFLA_IFALIAS,
418 QEMU_IFLA_NUM_VF,
419 QEMU_IFLA_VFINFO_LIST,
420 QEMU_IFLA_STATS64,
421 QEMU_IFLA_VF_PORTS,
422 QEMU_IFLA_PORT_SELF,
423 QEMU_IFLA_AF_SPEC,
424 QEMU_IFLA_GROUP,
425 QEMU_IFLA_NET_NS_FD,
426 QEMU_IFLA_EXT_MASK,
427 QEMU_IFLA_PROMISCUITY,
428 QEMU_IFLA_NUM_TX_QUEUES,
429 QEMU_IFLA_NUM_RX_QUEUES,
430 QEMU_IFLA_CARRIER,
431 QEMU_IFLA_PHYS_PORT_ID,
432 QEMU_IFLA_CARRIER_CHANGES,
433 QEMU_IFLA_PHYS_SWITCH_ID,
434 QEMU_IFLA_LINK_NETNSID,
435 QEMU_IFLA_PHYS_PORT_NAME,
436 QEMU_IFLA_PROTO_DOWN,
437 QEMU_IFLA_GSO_MAX_SEGS,
438 QEMU_IFLA_GSO_MAX_SIZE,
439 QEMU_IFLA_PAD,
440 QEMU_IFLA_XDP,
441 QEMU___IFLA_MAX
442 };
443
444 enum {
445 QEMU_IFLA_BRPORT_UNSPEC,
446 QEMU_IFLA_BRPORT_STATE,
447 QEMU_IFLA_BRPORT_PRIORITY,
448 QEMU_IFLA_BRPORT_COST,
449 QEMU_IFLA_BRPORT_MODE,
450 QEMU_IFLA_BRPORT_GUARD,
451 QEMU_IFLA_BRPORT_PROTECT,
452 QEMU_IFLA_BRPORT_FAST_LEAVE,
453 QEMU_IFLA_BRPORT_LEARNING,
454 QEMU_IFLA_BRPORT_UNICAST_FLOOD,
455 QEMU_IFLA_BRPORT_PROXYARP,
456 QEMU_IFLA_BRPORT_LEARNING_SYNC,
457 QEMU_IFLA_BRPORT_PROXYARP_WIFI,
458 QEMU_IFLA_BRPORT_ROOT_ID,
459 QEMU_IFLA_BRPORT_BRIDGE_ID,
460 QEMU_IFLA_BRPORT_DESIGNATED_PORT,
461 QEMU_IFLA_BRPORT_DESIGNATED_COST,
462 QEMU_IFLA_BRPORT_ID,
463 QEMU_IFLA_BRPORT_NO,
464 QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
465 QEMU_IFLA_BRPORT_CONFIG_PENDING,
466 QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER,
467 QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER,
468 QEMU_IFLA_BRPORT_HOLD_TIMER,
469 QEMU_IFLA_BRPORT_FLUSH,
470 QEMU_IFLA_BRPORT_MULTICAST_ROUTER,
471 QEMU_IFLA_BRPORT_PAD,
472 QEMU___IFLA_BRPORT_MAX
473 };
474
475 enum {
476 QEMU_IFLA_INFO_UNSPEC,
477 QEMU_IFLA_INFO_KIND,
478 QEMU_IFLA_INFO_DATA,
479 QEMU_IFLA_INFO_XSTATS,
480 QEMU_IFLA_INFO_SLAVE_KIND,
481 QEMU_IFLA_INFO_SLAVE_DATA,
482 QEMU___IFLA_INFO_MAX,
483 };
484
485 enum {
486 QEMU_IFLA_INET_UNSPEC,
487 QEMU_IFLA_INET_CONF,
488 QEMU___IFLA_INET_MAX,
489 };
490
491 enum {
492 QEMU_IFLA_INET6_UNSPEC,
493 QEMU_IFLA_INET6_FLAGS,
494 QEMU_IFLA_INET6_CONF,
495 QEMU_IFLA_INET6_STATS,
496 QEMU_IFLA_INET6_MCAST,
497 QEMU_IFLA_INET6_CACHEINFO,
498 QEMU_IFLA_INET6_ICMP6STATS,
499 QEMU_IFLA_INET6_TOKEN,
500 QEMU_IFLA_INET6_ADDR_GEN_MODE,
501 QEMU___IFLA_INET6_MAX
502 };
503
504 typedef abi_long (*TargetFdDataFunc)(void *, size_t);
505 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
506 typedef struct TargetFdTrans {
507 TargetFdDataFunc host_to_target_data;
508 TargetFdDataFunc target_to_host_data;
509 TargetFdAddrFunc target_to_host_addr;
510 } TargetFdTrans;
511
512 static TargetFdTrans **target_fd_trans;
513
514 static unsigned int target_fd_max;
515
516 static TargetFdDataFunc fd_trans_target_to_host_data(int fd)
517 {
518 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
519 return target_fd_trans[fd]->target_to_host_data;
520 }
521 return NULL;
522 }
523
524 static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
525 {
526 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
527 return target_fd_trans[fd]->host_to_target_data;
528 }
529 return NULL;
530 }
531
532 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
533 {
534 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
535 return target_fd_trans[fd]->target_to_host_addr;
536 }
537 return NULL;
538 }
539
540 static void fd_trans_register(int fd, TargetFdTrans *trans)
541 {
542 unsigned int oldmax;
543
544 if (fd >= target_fd_max) {
545 oldmax = target_fd_max;
546 target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
547 target_fd_trans = g_renew(TargetFdTrans *,
548 target_fd_trans, target_fd_max);
549 memset((void *)(target_fd_trans + oldmax), 0,
550 (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
551 }
552 target_fd_trans[fd] = trans;
553 }
554
555 static void fd_trans_unregister(int fd)
556 {
557 if (fd >= 0 && fd < target_fd_max) {
558 target_fd_trans[fd] = NULL;
559 }
560 }
561
562 static void fd_trans_dup(int oldfd, int newfd)
563 {
564 fd_trans_unregister(newfd);
565 if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
566 fd_trans_register(newfd, target_fd_trans[oldfd]);
567 }
568 }
569
570 static int sys_getcwd1(char *buf, size_t size)
571 {
572 if (getcwd(buf, size) == NULL) {
573 /* getcwd() sets errno */
574 return (-1);
575 }
576 return strlen(buf)+1;
577 }
578
579 #ifdef TARGET_NR_utimensat
580 #if defined(__NR_utimensat)
581 #define __NR_sys_utimensat __NR_utimensat
582 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
583 const struct timespec *,tsp,int,flags)
584 #else
585 static int sys_utimensat(int dirfd, const char *pathname,
586 const struct timespec times[2], int flags)
587 {
588 errno = ENOSYS;
589 return -1;
590 }
591 #endif
592 #endif /* TARGET_NR_utimensat */
593
594 #ifdef CONFIG_INOTIFY
595 #include <sys/inotify.h>
596
597 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
598 static int sys_inotify_init(void)
599 {
600 return (inotify_init());
601 }
602 #endif
603 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
604 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
605 {
606 return (inotify_add_watch(fd, pathname, mask));
607 }
608 #endif
609 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
610 static int sys_inotify_rm_watch(int fd, int32_t wd)
611 {
612 return (inotify_rm_watch(fd, wd));
613 }
614 #endif
615 #ifdef CONFIG_INOTIFY1
616 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
617 static int sys_inotify_init1(int flags)
618 {
619 return (inotify_init1(flags));
620 }
621 #endif
622 #endif
623 #else
624 /* Userspace can usually survive runtime without inotify */
625 #undef TARGET_NR_inotify_init
626 #undef TARGET_NR_inotify_init1
627 #undef TARGET_NR_inotify_add_watch
628 #undef TARGET_NR_inotify_rm_watch
629 #endif /* CONFIG_INOTIFY */
630
631 #if defined(TARGET_NR_prlimit64)
632 #ifndef __NR_prlimit64
633 # define __NR_prlimit64 -1
634 #endif
635 #define __NR_sys_prlimit64 __NR_prlimit64
636 /* The glibc rlimit structure may not be that used by the underlying syscall */
637 struct host_rlimit64 {
638 uint64_t rlim_cur;
639 uint64_t rlim_max;
640 };
641 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
642 const struct host_rlimit64 *, new_limit,
643 struct host_rlimit64 *, old_limit)
644 #endif
645
646
647 #if defined(TARGET_NR_timer_create)
648 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
649 static timer_t g_posix_timers[32] = { 0, } ;
650
651 static inline int next_free_host_timer(void)
652 {
653 int k ;
654 /* FIXME: Does finding the next free slot require a lock? */
655 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
656 if (g_posix_timers[k] == 0) {
657 g_posix_timers[k] = (timer_t) 1;
658 return k;
659 }
660 }
661 return -1;
662 }
663 #endif
664
665 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
666 #ifdef TARGET_ARM
667 static inline int regpairs_aligned(void *cpu_env) {
668 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
669 }
670 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
671 static inline int regpairs_aligned(void *cpu_env) { return 1; }
672 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
673 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
674 * of registers which translates to the same as ARM/MIPS, because we start with
675 * r3 as arg1 */
676 static inline int regpairs_aligned(void *cpu_env) { return 1; }
677 #else
678 static inline int regpairs_aligned(void *cpu_env) { return 0; }
679 #endif
680
681 #define ERRNO_TABLE_SIZE 1200
682
683 /* target_to_host_errno_table[] is initialized from
684 * host_to_target_errno_table[] in syscall_init(). */
685 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
686 };
687
688 /*
689 * This list is the union of errno values overridden in asm-<arch>/errno.h
690 * minus the errnos that are not actually generic to all archs.
691 */
692 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
693 [EAGAIN] = TARGET_EAGAIN,
694 [EIDRM] = TARGET_EIDRM,
695 [ECHRNG] = TARGET_ECHRNG,
696 [EL2NSYNC] = TARGET_EL2NSYNC,
697 [EL3HLT] = TARGET_EL3HLT,
698 [EL3RST] = TARGET_EL3RST,
699 [ELNRNG] = TARGET_ELNRNG,
700 [EUNATCH] = TARGET_EUNATCH,
701 [ENOCSI] = TARGET_ENOCSI,
702 [EL2HLT] = TARGET_EL2HLT,
703 [EDEADLK] = TARGET_EDEADLK,
704 [ENOLCK] = TARGET_ENOLCK,
705 [EBADE] = TARGET_EBADE,
706 [EBADR] = TARGET_EBADR,
707 [EXFULL] = TARGET_EXFULL,
708 [ENOANO] = TARGET_ENOANO,
709 [EBADRQC] = TARGET_EBADRQC,
710 [EBADSLT] = TARGET_EBADSLT,
711 [EBFONT] = TARGET_EBFONT,
712 [ENOSTR] = TARGET_ENOSTR,
713 [ENODATA] = TARGET_ENODATA,
714 [ETIME] = TARGET_ETIME,
715 [ENOSR] = TARGET_ENOSR,
716 [ENONET] = TARGET_ENONET,
717 [ENOPKG] = TARGET_ENOPKG,
718 [EREMOTE] = TARGET_EREMOTE,
719 [ENOLINK] = TARGET_ENOLINK,
720 [EADV] = TARGET_EADV,
721 [ESRMNT] = TARGET_ESRMNT,
722 [ECOMM] = TARGET_ECOMM,
723 [EPROTO] = TARGET_EPROTO,
724 [EDOTDOT] = TARGET_EDOTDOT,
725 [EMULTIHOP] = TARGET_EMULTIHOP,
726 [EBADMSG] = TARGET_EBADMSG,
727 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
728 [EOVERFLOW] = TARGET_EOVERFLOW,
729 [ENOTUNIQ] = TARGET_ENOTUNIQ,
730 [EBADFD] = TARGET_EBADFD,
731 [EREMCHG] = TARGET_EREMCHG,
732 [ELIBACC] = TARGET_ELIBACC,
733 [ELIBBAD] = TARGET_ELIBBAD,
734 [ELIBSCN] = TARGET_ELIBSCN,
735 [ELIBMAX] = TARGET_ELIBMAX,
736 [ELIBEXEC] = TARGET_ELIBEXEC,
737 [EILSEQ] = TARGET_EILSEQ,
738 [ENOSYS] = TARGET_ENOSYS,
739 [ELOOP] = TARGET_ELOOP,
740 [ERESTART] = TARGET_ERESTART,
741 [ESTRPIPE] = TARGET_ESTRPIPE,
742 [ENOTEMPTY] = TARGET_ENOTEMPTY,
743 [EUSERS] = TARGET_EUSERS,
744 [ENOTSOCK] = TARGET_ENOTSOCK,
745 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
746 [EMSGSIZE] = TARGET_EMSGSIZE,
747 [EPROTOTYPE] = TARGET_EPROTOTYPE,
748 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
749 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
750 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
751 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
752 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
753 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
754 [EADDRINUSE] = TARGET_EADDRINUSE,
755 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
756 [ENETDOWN] = TARGET_ENETDOWN,
757 [ENETUNREACH] = TARGET_ENETUNREACH,
758 [ENETRESET] = TARGET_ENETRESET,
759 [ECONNABORTED] = TARGET_ECONNABORTED,
760 [ECONNRESET] = TARGET_ECONNRESET,
761 [ENOBUFS] = TARGET_ENOBUFS,
762 [EISCONN] = TARGET_EISCONN,
763 [ENOTCONN] = TARGET_ENOTCONN,
764 [EUCLEAN] = TARGET_EUCLEAN,
765 [ENOTNAM] = TARGET_ENOTNAM,
766 [ENAVAIL] = TARGET_ENAVAIL,
767 [EISNAM] = TARGET_EISNAM,
768 [EREMOTEIO] = TARGET_EREMOTEIO,
769 [EDQUOT] = TARGET_EDQUOT,
770 [ESHUTDOWN] = TARGET_ESHUTDOWN,
771 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
772 [ETIMEDOUT] = TARGET_ETIMEDOUT,
773 [ECONNREFUSED] = TARGET_ECONNREFUSED,
774 [EHOSTDOWN] = TARGET_EHOSTDOWN,
775 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
776 [EALREADY] = TARGET_EALREADY,
777 [EINPROGRESS] = TARGET_EINPROGRESS,
778 [ESTALE] = TARGET_ESTALE,
779 [ECANCELED] = TARGET_ECANCELED,
780 [ENOMEDIUM] = TARGET_ENOMEDIUM,
781 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
782 #ifdef ENOKEY
783 [ENOKEY] = TARGET_ENOKEY,
784 #endif
785 #ifdef EKEYEXPIRED
786 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
787 #endif
788 #ifdef EKEYREVOKED
789 [EKEYREVOKED] = TARGET_EKEYREVOKED,
790 #endif
791 #ifdef EKEYREJECTED
792 [EKEYREJECTED] = TARGET_EKEYREJECTED,
793 #endif
794 #ifdef EOWNERDEAD
795 [EOWNERDEAD] = TARGET_EOWNERDEAD,
796 #endif
797 #ifdef ENOTRECOVERABLE
798 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
799 #endif
800 #ifdef ENOMSG
801 [ENOMSG] = TARGET_ENOMSG,
802 #endif
803 #ifdef ERKFILL
804 [ERFKILL] = TARGET_ERFKILL,
805 #endif
806 #ifdef EHWPOISON
807 [EHWPOISON] = TARGET_EHWPOISON,
808 #endif
809 };
810
811 static inline int host_to_target_errno(int err)
812 {
813 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
814 host_to_target_errno_table[err]) {
815 return host_to_target_errno_table[err];
816 }
817 return err;
818 }
819
820 static inline int target_to_host_errno(int err)
821 {
822 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
823 target_to_host_errno_table[err]) {
824 return target_to_host_errno_table[err];
825 }
826 return err;
827 }
828
829 static inline abi_long get_errno(abi_long ret)
830 {
831 if (ret == -1)
832 return -host_to_target_errno(errno);
833 else
834 return ret;
835 }
836
837 static inline int is_error(abi_long ret)
838 {
839 return (abi_ulong)ret >= (abi_ulong)(-4096);
840 }
841
842 const char *target_strerror(int err)
843 {
844 if (err == TARGET_ERESTARTSYS) {
845 return "To be restarted";
846 }
847 if (err == TARGET_QEMU_ESIGRETURN) {
848 return "Successful exit from sigreturn";
849 }
850
851 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
852 return NULL;
853 }
854 return strerror(target_to_host_errno(err));
855 }
856
857 #define safe_syscall0(type, name) \
858 static type safe_##name(void) \
859 { \
860 return safe_syscall(__NR_##name); \
861 }
862
863 #define safe_syscall1(type, name, type1, arg1) \
864 static type safe_##name(type1 arg1) \
865 { \
866 return safe_syscall(__NR_##name, arg1); \
867 }
868
869 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
870 static type safe_##name(type1 arg1, type2 arg2) \
871 { \
872 return safe_syscall(__NR_##name, arg1, arg2); \
873 }
874
875 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
876 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
877 { \
878 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
879 }
880
881 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
882 type4, arg4) \
883 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
884 { \
885 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
886 }
887
888 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
889 type4, arg4, type5, arg5) \
890 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
891 type5 arg5) \
892 { \
893 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
894 }
895
896 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
897 type4, arg4, type5, arg5, type6, arg6) \
898 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
899 type5 arg5, type6 arg6) \
900 { \
901 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
902 }
903
904 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
905 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
906 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
907 int, flags, mode_t, mode)
908 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
909 struct rusage *, rusage)
910 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
911 int, options, struct rusage *, rusage)
912 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
913 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
914 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
915 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
916 struct timespec *, tsp, const sigset_t *, sigmask,
917 size_t, sigsetsize)
918 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
919 int, maxevents, int, timeout, const sigset_t *, sigmask,
920 size_t, sigsetsize)
921 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
922 const struct timespec *,timeout,int *,uaddr2,int,val3)
923 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
924 safe_syscall2(int, kill, pid_t, pid, int, sig)
925 safe_syscall2(int, tkill, int, tid, int, sig)
926 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
927 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
928 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
929 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
930 unsigned long, pos_l, unsigned long, pos_h)
931 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
932 unsigned long, pos_l, unsigned long, pos_h)
933 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
934 socklen_t, addrlen)
935 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
936 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
937 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
938 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
939 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
940 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
941 safe_syscall2(int, flock, int, fd, int, operation)
942 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
943 const struct timespec *, uts, size_t, sigsetsize)
944 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
945 int, flags)
946 safe_syscall2(int, nanosleep, const struct timespec *, req,
947 struct timespec *, rem)
948 #ifdef TARGET_NR_clock_nanosleep
949 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
950 const struct timespec *, req, struct timespec *, rem)
951 #endif
952 #ifdef __NR_msgsnd
953 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
954 int, flags)
955 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
956 long, msgtype, int, flags)
957 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
958 unsigned, nsops, const struct timespec *, timeout)
959 #else
960 /* This host kernel architecture uses a single ipc syscall; fake up
961 * wrappers for the sub-operations to hide this implementation detail.
962 * Annoyingly we can't include linux/ipc.h to get the constant definitions
963 * for the call parameter because some structs in there conflict with the
964 * sys/ipc.h ones. So we just define them here, and rely on them being
965 * the same for all host architectures.
966 */
967 #define Q_SEMTIMEDOP 4
968 #define Q_MSGSND 11
969 #define Q_MSGRCV 12
970 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
971
972 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
973 void *, ptr, long, fifth)
974 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
975 {
976 return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
977 }
978 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
979 {
980 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
981 }
982 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
983 const struct timespec *timeout)
984 {
985 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
986 (long)timeout);
987 }
988 #endif
989 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
990 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
991 size_t, len, unsigned, prio, const struct timespec *, timeout)
992 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
993 size_t, len, unsigned *, prio, const struct timespec *, timeout)
994 #endif
995 /* We do ioctl like this rather than via safe_syscall3 to preserve the
996 * "third argument might be integer or pointer or not present" behaviour of
997 * the libc function.
998 */
999 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
1000 /* Similarly for fcntl. Note that callers must always:
1001 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
1002 * use the flock64 struct rather than unsuffixed flock
1003 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
1004 */
1005 #ifdef __NR_fcntl64
1006 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
1007 #else
1008 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
1009 #endif
1010
1011 static inline int host_to_target_sock_type(int host_type)
1012 {
1013 int target_type;
1014
1015 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
1016 case SOCK_DGRAM:
1017 target_type = TARGET_SOCK_DGRAM;
1018 break;
1019 case SOCK_STREAM:
1020 target_type = TARGET_SOCK_STREAM;
1021 break;
1022 default:
1023 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
1024 break;
1025 }
1026
1027 #if defined(SOCK_CLOEXEC)
1028 if (host_type & SOCK_CLOEXEC) {
1029 target_type |= TARGET_SOCK_CLOEXEC;
1030 }
1031 #endif
1032
1033 #if defined(SOCK_NONBLOCK)
1034 if (host_type & SOCK_NONBLOCK) {
1035 target_type |= TARGET_SOCK_NONBLOCK;
1036 }
1037 #endif
1038
1039 return target_type;
1040 }
1041
1042 static abi_ulong target_brk;
1043 static abi_ulong target_original_brk;
1044 static abi_ulong brk_page;
1045
1046 void target_set_brk(abi_ulong new_brk)
1047 {
1048 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
1049 brk_page = HOST_PAGE_ALIGN(target_brk);
1050 }
1051
1052 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
1053 #define DEBUGF_BRK(message, args...)
1054
1055 /* do_brk() must return target values and target errnos. */
1056 abi_long do_brk(abi_ulong new_brk)
1057 {
1058 abi_long mapped_addr;
1059 abi_ulong new_alloc_size;
1060
1061 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
1062
1063 if (!new_brk) {
1064 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
1065 return target_brk;
1066 }
1067 if (new_brk < target_original_brk) {
1068 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
1069 target_brk);
1070 return target_brk;
1071 }
1072
1073 /* If the new brk is less than the highest page reserved to the
1074 * target heap allocation, set it and we're almost done... */
1075 if (new_brk <= brk_page) {
1076 /* Heap contents are initialized to zero, as for anonymous
1077 * mapped pages. */
1078 if (new_brk > target_brk) {
1079 memset(g2h(target_brk), 0, new_brk - target_brk);
1080 }
1081 target_brk = new_brk;
1082 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
1083 return target_brk;
1084 }
1085
1086 /* We need to allocate more memory after the brk... Note that
1087 * we don't use MAP_FIXED because that will map over the top of
1088 * any existing mapping (like the one with the host libc or qemu
1089 * itself); instead we treat "mapped but at wrong address" as
1090 * a failure and unmap again.
1091 */
1092 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
1093 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
1094 PROT_READ|PROT_WRITE,
1095 MAP_ANON|MAP_PRIVATE, 0, 0));
1096
1097 if (mapped_addr == brk_page) {
1098 /* Heap contents are initialized to zero, as for anonymous
1099 * mapped pages. Technically the new pages are already
1100 * initialized to zero since they *are* anonymous mapped
1101 * pages, however we have to take care with the contents that
1102 * come from the remaining part of the previous page: it may
1103 * contains garbage data due to a previous heap usage (grown
1104 * then shrunken). */
1105 memset(g2h(target_brk), 0, brk_page - target_brk);
1106
1107 target_brk = new_brk;
1108 brk_page = HOST_PAGE_ALIGN(target_brk);
1109 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
1110 target_brk);
1111 return target_brk;
1112 } else if (mapped_addr != -1) {
1113 /* Mapped but at wrong address, meaning there wasn't actually
1114 * enough space for this brk.
1115 */
1116 target_munmap(mapped_addr, new_alloc_size);
1117 mapped_addr = -1;
1118 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
1119 }
1120 else {
1121 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
1122 }
1123
1124 #if defined(TARGET_ALPHA)
1125 /* We (partially) emulate OSF/1 on Alpha, which requires we
1126 return a proper errno, not an unchanged brk value. */
1127 return -TARGET_ENOMEM;
1128 #endif
1129 /* For everything else, return the previous break. */
1130 return target_brk;
1131 }
1132
1133 static inline abi_long copy_from_user_fdset(fd_set *fds,
1134 abi_ulong target_fds_addr,
1135 int n)
1136 {
1137 int i, nw, j, k;
1138 abi_ulong b, *target_fds;
1139
1140 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1141 if (!(target_fds = lock_user(VERIFY_READ,
1142 target_fds_addr,
1143 sizeof(abi_ulong) * nw,
1144 1)))
1145 return -TARGET_EFAULT;
1146
1147 FD_ZERO(fds);
1148 k = 0;
1149 for (i = 0; i < nw; i++) {
1150 /* grab the abi_ulong */
1151 __get_user(b, &target_fds[i]);
1152 for (j = 0; j < TARGET_ABI_BITS; j++) {
1153 /* check the bit inside the abi_ulong */
1154 if ((b >> j) & 1)
1155 FD_SET(k, fds);
1156 k++;
1157 }
1158 }
1159
1160 unlock_user(target_fds, target_fds_addr, 0);
1161
1162 return 0;
1163 }
1164
1165 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
1166 abi_ulong target_fds_addr,
1167 int n)
1168 {
1169 if (target_fds_addr) {
1170 if (copy_from_user_fdset(fds, target_fds_addr, n))
1171 return -TARGET_EFAULT;
1172 *fds_ptr = fds;
1173 } else {
1174 *fds_ptr = NULL;
1175 }
1176 return 0;
1177 }
1178
1179 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1180 const fd_set *fds,
1181 int n)
1182 {
1183 int i, nw, j, k;
1184 abi_long v;
1185 abi_ulong *target_fds;
1186
1187 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1188 if (!(target_fds = lock_user(VERIFY_WRITE,
1189 target_fds_addr,
1190 sizeof(abi_ulong) * nw,
1191 0)))
1192 return -TARGET_EFAULT;
1193
1194 k = 0;
1195 for (i = 0; i < nw; i++) {
1196 v = 0;
1197 for (j = 0; j < TARGET_ABI_BITS; j++) {
1198 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1199 k++;
1200 }
1201 __put_user(v, &target_fds[i]);
1202 }
1203
1204 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1205
1206 return 0;
1207 }
1208
1209 #if defined(__alpha__)
1210 #define HOST_HZ 1024
1211 #else
1212 #define HOST_HZ 100
1213 #endif
1214
1215 static inline abi_long host_to_target_clock_t(long ticks)
1216 {
1217 #if HOST_HZ == TARGET_HZ
1218 return ticks;
1219 #else
1220 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1221 #endif
1222 }
1223
1224 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1225 const struct rusage *rusage)
1226 {
1227 struct target_rusage *target_rusage;
1228
1229 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1230 return -TARGET_EFAULT;
1231 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1232 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1233 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1234 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1235 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1236 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1237 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1238 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1239 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1240 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1241 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1242 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1243 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1244 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1245 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1246 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1247 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1248 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1249 unlock_user_struct(target_rusage, target_addr, 1);
1250
1251 return 0;
1252 }
1253
1254 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1255 {
1256 abi_ulong target_rlim_swap;
1257 rlim_t result;
1258
1259 target_rlim_swap = tswapal(target_rlim);
1260 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1261 return RLIM_INFINITY;
1262
1263 result = target_rlim_swap;
1264 if (target_rlim_swap != (rlim_t)result)
1265 return RLIM_INFINITY;
1266
1267 return result;
1268 }
1269
1270 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1271 {
1272 abi_ulong target_rlim_swap;
1273 abi_ulong result;
1274
1275 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1276 target_rlim_swap = TARGET_RLIM_INFINITY;
1277 else
1278 target_rlim_swap = rlim;
1279 result = tswapal(target_rlim_swap);
1280
1281 return result;
1282 }
1283
1284 static inline int target_to_host_resource(int code)
1285 {
1286 switch (code) {
1287 case TARGET_RLIMIT_AS:
1288 return RLIMIT_AS;
1289 case TARGET_RLIMIT_CORE:
1290 return RLIMIT_CORE;
1291 case TARGET_RLIMIT_CPU:
1292 return RLIMIT_CPU;
1293 case TARGET_RLIMIT_DATA:
1294 return RLIMIT_DATA;
1295 case TARGET_RLIMIT_FSIZE:
1296 return RLIMIT_FSIZE;
1297 case TARGET_RLIMIT_LOCKS:
1298 return RLIMIT_LOCKS;
1299 case TARGET_RLIMIT_MEMLOCK:
1300 return RLIMIT_MEMLOCK;
1301 case TARGET_RLIMIT_MSGQUEUE:
1302 return RLIMIT_MSGQUEUE;
1303 case TARGET_RLIMIT_NICE:
1304 return RLIMIT_NICE;
1305 case TARGET_RLIMIT_NOFILE:
1306 return RLIMIT_NOFILE;
1307 case TARGET_RLIMIT_NPROC:
1308 return RLIMIT_NPROC;
1309 case TARGET_RLIMIT_RSS:
1310 return RLIMIT_RSS;
1311 case TARGET_RLIMIT_RTPRIO:
1312 return RLIMIT_RTPRIO;
1313 case TARGET_RLIMIT_SIGPENDING:
1314 return RLIMIT_SIGPENDING;
1315 case TARGET_RLIMIT_STACK:
1316 return RLIMIT_STACK;
1317 default:
1318 return code;
1319 }
1320 }
1321
1322 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1323 abi_ulong target_tv_addr)
1324 {
1325 struct target_timeval *target_tv;
1326
1327 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1328 return -TARGET_EFAULT;
1329
1330 __get_user(tv->tv_sec, &target_tv->tv_sec);
1331 __get_user(tv->tv_usec, &target_tv->tv_usec);
1332
1333 unlock_user_struct(target_tv, target_tv_addr, 0);
1334
1335 return 0;
1336 }
1337
1338 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1339 const struct timeval *tv)
1340 {
1341 struct target_timeval *target_tv;
1342
1343 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1344 return -TARGET_EFAULT;
1345
1346 __put_user(tv->tv_sec, &target_tv->tv_sec);
1347 __put_user(tv->tv_usec, &target_tv->tv_usec);
1348
1349 unlock_user_struct(target_tv, target_tv_addr, 1);
1350
1351 return 0;
1352 }
1353
1354 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1355 abi_ulong target_tz_addr)
1356 {
1357 struct target_timezone *target_tz;
1358
1359 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1360 return -TARGET_EFAULT;
1361 }
1362
1363 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1364 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1365
1366 unlock_user_struct(target_tz, target_tz_addr, 0);
1367
1368 return 0;
1369 }
1370
1371 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1372 #include <mqueue.h>
1373
1374 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1375 abi_ulong target_mq_attr_addr)
1376 {
1377 struct target_mq_attr *target_mq_attr;
1378
1379 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1380 target_mq_attr_addr, 1))
1381 return -TARGET_EFAULT;
1382
1383 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1384 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1385 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1386 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1387
1388 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1389
1390 return 0;
1391 }
1392
1393 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1394 const struct mq_attr *attr)
1395 {
1396 struct target_mq_attr *target_mq_attr;
1397
1398 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1399 target_mq_attr_addr, 0))
1400 return -TARGET_EFAULT;
1401
1402 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1403 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1404 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1405 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1406
1407 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1408
1409 return 0;
1410 }
1411 #endif
1412
1413 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1414 /* do_select() must return target values and target errnos. */
1415 static abi_long do_select(int n,
1416 abi_ulong rfd_addr, abi_ulong wfd_addr,
1417 abi_ulong efd_addr, abi_ulong target_tv_addr)
1418 {
1419 fd_set rfds, wfds, efds;
1420 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1421 struct timeval tv;
1422 struct timespec ts, *ts_ptr;
1423 abi_long ret;
1424
1425 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1426 if (ret) {
1427 return ret;
1428 }
1429 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1430 if (ret) {
1431 return ret;
1432 }
1433 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1434 if (ret) {
1435 return ret;
1436 }
1437
1438 if (target_tv_addr) {
1439 if (copy_from_user_timeval(&tv, target_tv_addr))
1440 return -TARGET_EFAULT;
1441 ts.tv_sec = tv.tv_sec;
1442 ts.tv_nsec = tv.tv_usec * 1000;
1443 ts_ptr = &ts;
1444 } else {
1445 ts_ptr = NULL;
1446 }
1447
1448 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1449 ts_ptr, NULL));
1450
1451 if (!is_error(ret)) {
1452 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1453 return -TARGET_EFAULT;
1454 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1455 return -TARGET_EFAULT;
1456 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1457 return -TARGET_EFAULT;
1458
1459 if (target_tv_addr) {
1460 tv.tv_sec = ts.tv_sec;
1461 tv.tv_usec = ts.tv_nsec / 1000;
1462 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1463 return -TARGET_EFAULT;
1464 }
1465 }
1466 }
1467
1468 return ret;
1469 }
1470
1471 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1472 static abi_long do_old_select(abi_ulong arg1)
1473 {
1474 struct target_sel_arg_struct *sel;
1475 abi_ulong inp, outp, exp, tvp;
1476 long nsel;
1477
1478 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1479 return -TARGET_EFAULT;
1480 }
1481
1482 nsel = tswapal(sel->n);
1483 inp = tswapal(sel->inp);
1484 outp = tswapal(sel->outp);
1485 exp = tswapal(sel->exp);
1486 tvp = tswapal(sel->tvp);
1487
1488 unlock_user_struct(sel, arg1, 0);
1489
1490 return do_select(nsel, inp, outp, exp, tvp);
1491 }
1492 #endif
1493 #endif
1494
1495 static abi_long do_pipe2(int host_pipe[], int flags)
1496 {
1497 #ifdef CONFIG_PIPE2
1498 return pipe2(host_pipe, flags);
1499 #else
1500 return -ENOSYS;
1501 #endif
1502 }
1503
1504 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1505 int flags, int is_pipe2)
1506 {
1507 int host_pipe[2];
1508 abi_long ret;
1509 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1510
1511 if (is_error(ret))
1512 return get_errno(ret);
1513
1514 /* Several targets have special calling conventions for the original
1515 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1516 if (!is_pipe2) {
1517 #if defined(TARGET_ALPHA)
1518 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1519 return host_pipe[0];
1520 #elif defined(TARGET_MIPS)
1521 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1522 return host_pipe[0];
1523 #elif defined(TARGET_SH4)
1524 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1525 return host_pipe[0];
1526 #elif defined(TARGET_SPARC)
1527 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1528 return host_pipe[0];
1529 #endif
1530 }
1531
1532 if (put_user_s32(host_pipe[0], pipedes)
1533 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1534 return -TARGET_EFAULT;
1535 return get_errno(ret);
1536 }
1537
1538 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1539 abi_ulong target_addr,
1540 socklen_t len)
1541 {
1542 struct target_ip_mreqn *target_smreqn;
1543
1544 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1545 if (!target_smreqn)
1546 return -TARGET_EFAULT;
1547 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1548 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1549 if (len == sizeof(struct target_ip_mreqn))
1550 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1551 unlock_user(target_smreqn, target_addr, 0);
1552
1553 return 0;
1554 }
1555
1556 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1557 abi_ulong target_addr,
1558 socklen_t len)
1559 {
1560 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1561 sa_family_t sa_family;
1562 struct target_sockaddr *target_saddr;
1563
1564 if (fd_trans_target_to_host_addr(fd)) {
1565 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1566 }
1567
1568 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1569 if (!target_saddr)
1570 return -TARGET_EFAULT;
1571
1572 sa_family = tswap16(target_saddr->sa_family);
1573
1574 /* Oops. The caller might send a incomplete sun_path; sun_path
1575 * must be terminated by \0 (see the manual page), but
1576 * unfortunately it is quite common to specify sockaddr_un
1577 * length as "strlen(x->sun_path)" while it should be
1578 * "strlen(...) + 1". We'll fix that here if needed.
1579 * Linux kernel has a similar feature.
1580 */
1581
1582 if (sa_family == AF_UNIX) {
1583 if (len < unix_maxlen && len > 0) {
1584 char *cp = (char*)target_saddr;
1585
1586 if ( cp[len-1] && !cp[len] )
1587 len++;
1588 }
1589 if (len > unix_maxlen)
1590 len = unix_maxlen;
1591 }
1592
1593 memcpy(addr, target_saddr, len);
1594 addr->sa_family = sa_family;
1595 if (sa_family == AF_NETLINK) {
1596 struct sockaddr_nl *nladdr;
1597
1598 nladdr = (struct sockaddr_nl *)addr;
1599 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1600 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1601 } else if (sa_family == AF_PACKET) {
1602 struct target_sockaddr_ll *lladdr;
1603
1604 lladdr = (struct target_sockaddr_ll *)addr;
1605 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1606 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1607 }
1608 unlock_user(target_saddr, target_addr, 0);
1609
1610 return 0;
1611 }
1612
1613 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1614 struct sockaddr *addr,
1615 socklen_t len)
1616 {
1617 struct target_sockaddr *target_saddr;
1618
1619 if (len == 0) {
1620 return 0;
1621 }
1622
1623 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1624 if (!target_saddr)
1625 return -TARGET_EFAULT;
1626 memcpy(target_saddr, addr, len);
1627 if (len >= offsetof(struct target_sockaddr, sa_family) +
1628 sizeof(target_saddr->sa_family)) {
1629 target_saddr->sa_family = tswap16(addr->sa_family);
1630 }
1631 if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1632 struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1633 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1634 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1635 } else if (addr->sa_family == AF_PACKET) {
1636 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1637 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1638 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1639 } else if (addr->sa_family == AF_INET6 &&
1640 len >= sizeof(struct target_sockaddr_in6)) {
1641 struct target_sockaddr_in6 *target_in6 =
1642 (struct target_sockaddr_in6 *)target_saddr;
1643 target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1644 }
1645 unlock_user(target_saddr, target_addr, len);
1646
1647 return 0;
1648 }
1649
1650 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1651 struct target_msghdr *target_msgh)
1652 {
1653 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1654 abi_long msg_controllen;
1655 abi_ulong target_cmsg_addr;
1656 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1657 socklen_t space = 0;
1658
1659 msg_controllen = tswapal(target_msgh->msg_controllen);
1660 if (msg_controllen < sizeof (struct target_cmsghdr))
1661 goto the_end;
1662 target_cmsg_addr = tswapal(target_msgh->msg_control);
1663 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1664 target_cmsg_start = target_cmsg;
1665 if (!target_cmsg)
1666 return -TARGET_EFAULT;
1667
1668 while (cmsg && target_cmsg) {
1669 void *data = CMSG_DATA(cmsg);
1670 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1671
1672 int len = tswapal(target_cmsg->cmsg_len)
1673 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1674
1675 space += CMSG_SPACE(len);
1676 if (space > msgh->msg_controllen) {
1677 space -= CMSG_SPACE(len);
1678 /* This is a QEMU bug, since we allocated the payload
1679 * area ourselves (unlike overflow in host-to-target
1680 * conversion, which is just the guest giving us a buffer
1681 * that's too small). It can't happen for the payload types
1682 * we currently support; if it becomes an issue in future
1683 * we would need to improve our allocation strategy to
1684 * something more intelligent than "twice the size of the
1685 * target buffer we're reading from".
1686 */
1687 gemu_log("Host cmsg overflow\n");
1688 break;
1689 }
1690
1691 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1692 cmsg->cmsg_level = SOL_SOCKET;
1693 } else {
1694 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1695 }
1696 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1697 cmsg->cmsg_len = CMSG_LEN(len);
1698
1699 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1700 int *fd = (int *)data;
1701 int *target_fd = (int *)target_data;
1702 int i, numfds = len / sizeof(int);
1703
1704 for (i = 0; i < numfds; i++) {
1705 __get_user(fd[i], target_fd + i);
1706 }
1707 } else if (cmsg->cmsg_level == SOL_SOCKET
1708 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1709 struct ucred *cred = (struct ucred *)data;
1710 struct target_ucred *target_cred =
1711 (struct target_ucred *)target_data;
1712
1713 __get_user(cred->pid, &target_cred->pid);
1714 __get_user(cred->uid, &target_cred->uid);
1715 __get_user(cred->gid, &target_cred->gid);
1716 } else {
1717 gemu_log("Unsupported ancillary data: %d/%d\n",
1718 cmsg->cmsg_level, cmsg->cmsg_type);
1719 memcpy(data, target_data, len);
1720 }
1721
1722 cmsg = CMSG_NXTHDR(msgh, cmsg);
1723 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1724 target_cmsg_start);
1725 }
1726 unlock_user(target_cmsg, target_cmsg_addr, 0);
1727 the_end:
1728 msgh->msg_controllen = space;
1729 return 0;
1730 }
1731
1732 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1733 struct msghdr *msgh)
1734 {
1735 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1736 abi_long msg_controllen;
1737 abi_ulong target_cmsg_addr;
1738 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1739 socklen_t space = 0;
1740
1741 msg_controllen = tswapal(target_msgh->msg_controllen);
1742 if (msg_controllen < sizeof (struct target_cmsghdr))
1743 goto the_end;
1744 target_cmsg_addr = tswapal(target_msgh->msg_control);
1745 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1746 target_cmsg_start = target_cmsg;
1747 if (!target_cmsg)
1748 return -TARGET_EFAULT;
1749
1750 while (cmsg && target_cmsg) {
1751 void *data = CMSG_DATA(cmsg);
1752 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1753
1754 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1755 int tgt_len, tgt_space;
1756
1757 /* We never copy a half-header but may copy half-data;
1758 * this is Linux's behaviour in put_cmsg(). Note that
1759 * truncation here is a guest problem (which we report
1760 * to the guest via the CTRUNC bit), unlike truncation
1761 * in target_to_host_cmsg, which is a QEMU bug.
1762 */
1763 if (msg_controllen < sizeof(struct cmsghdr)) {
1764 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1765 break;
1766 }
1767
1768 if (cmsg->cmsg_level == SOL_SOCKET) {
1769 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1770 } else {
1771 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1772 }
1773 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1774
1775 tgt_len = TARGET_CMSG_LEN(len);
1776
1777 /* Payload types which need a different size of payload on
1778 * the target must adjust tgt_len here.
1779 */
1780 switch (cmsg->cmsg_level) {
1781 case SOL_SOCKET:
1782 switch (cmsg->cmsg_type) {
1783 case SO_TIMESTAMP:
1784 tgt_len = sizeof(struct target_timeval);
1785 break;
1786 default:
1787 break;
1788 }
1789 default:
1790 break;
1791 }
1792
1793 if (msg_controllen < tgt_len) {
1794 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1795 tgt_len = msg_controllen;
1796 }
1797
1798 /* We must now copy-and-convert len bytes of payload
1799 * into tgt_len bytes of destination space. Bear in mind
1800 * that in both source and destination we may be dealing
1801 * with a truncated value!
1802 */
1803 switch (cmsg->cmsg_level) {
1804 case SOL_SOCKET:
1805 switch (cmsg->cmsg_type) {
1806 case SCM_RIGHTS:
1807 {
1808 int *fd = (int *)data;
1809 int *target_fd = (int *)target_data;
1810 int i, numfds = tgt_len / sizeof(int);
1811
1812 for (i = 0; i < numfds; i++) {
1813 __put_user(fd[i], target_fd + i);
1814 }
1815 break;
1816 }
1817 case SO_TIMESTAMP:
1818 {
1819 struct timeval *tv = (struct timeval *)data;
1820 struct target_timeval *target_tv =
1821 (struct target_timeval *)target_data;
1822
1823 if (len != sizeof(struct timeval) ||
1824 tgt_len != sizeof(struct target_timeval)) {
1825 goto unimplemented;
1826 }
1827
1828 /* copy struct timeval to target */
1829 __put_user(tv->tv_sec, &target_tv->tv_sec);
1830 __put_user(tv->tv_usec, &target_tv->tv_usec);
1831 break;
1832 }
1833 case SCM_CREDENTIALS:
1834 {
1835 struct ucred *cred = (struct ucred *)data;
1836 struct target_ucred *target_cred =
1837 (struct target_ucred *)target_data;
1838
1839 __put_user(cred->pid, &target_cred->pid);
1840 __put_user(cred->uid, &target_cred->uid);
1841 __put_user(cred->gid, &target_cred->gid);
1842 break;
1843 }
1844 default:
1845 goto unimplemented;
1846 }
1847 break;
1848
1849 case SOL_IP:
1850 switch (cmsg->cmsg_type) {
1851 case IP_TTL:
1852 {
1853 uint32_t *v = (uint32_t *)data;
1854 uint32_t *t_int = (uint32_t *)target_data;
1855
1856 __put_user(*v, t_int);
1857 break;
1858 }
1859 case IP_RECVERR:
1860 {
1861 struct errhdr_t {
1862 struct sock_extended_err ee;
1863 struct sockaddr_in offender;
1864 };
1865 struct errhdr_t *errh = (struct errhdr_t *)data;
1866 struct errhdr_t *target_errh =
1867 (struct errhdr_t *)target_data;
1868
1869 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1870 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1871 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1872 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1873 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1874 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1875 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1876 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1877 (void *) &errh->offender, sizeof(errh->offender));
1878 break;
1879 }
1880 default:
1881 goto unimplemented;
1882 }
1883 break;
1884
1885 case SOL_IPV6:
1886 switch (cmsg->cmsg_type) {
1887 case IPV6_HOPLIMIT:
1888 {
1889 uint32_t *v = (uint32_t *)data;
1890 uint32_t *t_int = (uint32_t *)target_data;
1891
1892 __put_user(*v, t_int);
1893 break;
1894 }
1895 case IPV6_RECVERR:
1896 {
1897 struct errhdr6_t {
1898 struct sock_extended_err ee;
1899 struct sockaddr_in6 offender;
1900 };
1901 struct errhdr6_t *errh = (struct errhdr6_t *)data;
1902 struct errhdr6_t *target_errh =
1903 (struct errhdr6_t *)target_data;
1904
1905 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1906 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1907 __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
1908 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1909 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1910 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1911 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1912 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1913 (void *) &errh->offender, sizeof(errh->offender));
1914 break;
1915 }
1916 default:
1917 goto unimplemented;
1918 }
1919 break;
1920
1921 default:
1922 unimplemented:
1923 gemu_log("Unsupported ancillary data: %d/%d\n",
1924 cmsg->cmsg_level, cmsg->cmsg_type);
1925 memcpy(target_data, data, MIN(len, tgt_len));
1926 if (tgt_len > len) {
1927 memset(target_data + len, 0, tgt_len - len);
1928 }
1929 }
1930
1931 target_cmsg->cmsg_len = tswapal(tgt_len);
1932 tgt_space = TARGET_CMSG_SPACE(len);
1933 if (msg_controllen < tgt_space) {
1934 tgt_space = msg_controllen;
1935 }
1936 msg_controllen -= tgt_space;
1937 space += tgt_space;
1938 cmsg = CMSG_NXTHDR(msgh, cmsg);
1939 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1940 target_cmsg_start);
1941 }
1942 unlock_user(target_cmsg, target_cmsg_addr, space);
1943 the_end:
1944 target_msgh->msg_controllen = tswapal(space);
1945 return 0;
1946 }
1947
1948 static void tswap_nlmsghdr(struct nlmsghdr *nlh)
1949 {
1950 nlh->nlmsg_len = tswap32(nlh->nlmsg_len);
1951 nlh->nlmsg_type = tswap16(nlh->nlmsg_type);
1952 nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags);
1953 nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq);
1954 nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid);
1955 }
1956
1957 static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh,
1958 size_t len,
1959 abi_long (*host_to_target_nlmsg)
1960 (struct nlmsghdr *))
1961 {
1962 uint32_t nlmsg_len;
1963 abi_long ret;
1964
1965 while (len > sizeof(struct nlmsghdr)) {
1966
1967 nlmsg_len = nlh->nlmsg_len;
1968 if (nlmsg_len < sizeof(struct nlmsghdr) ||
1969 nlmsg_len > len) {
1970 break;
1971 }
1972
1973 switch (nlh->nlmsg_type) {
1974 case NLMSG_DONE:
1975 tswap_nlmsghdr(nlh);
1976 return 0;
1977 case NLMSG_NOOP:
1978 break;
1979 case NLMSG_ERROR:
1980 {
1981 struct nlmsgerr *e = NLMSG_DATA(nlh);
1982 e->error = tswap32(e->error);
1983 tswap_nlmsghdr(&e->msg);
1984 tswap_nlmsghdr(nlh);
1985 return 0;
1986 }
1987 default:
1988 ret = host_to_target_nlmsg(nlh);
1989 if (ret < 0) {
1990 tswap_nlmsghdr(nlh);
1991 return ret;
1992 }
1993 break;
1994 }
1995 tswap_nlmsghdr(nlh);
1996 len -= NLMSG_ALIGN(nlmsg_len);
1997 nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len));
1998 }
1999 return 0;
2000 }
2001
2002 static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh,
2003 size_t len,
2004 abi_long (*target_to_host_nlmsg)
2005 (struct nlmsghdr *))
2006 {
2007 int ret;
2008
2009 while (len > sizeof(struct nlmsghdr)) {
2010 if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) ||
2011 tswap32(nlh->nlmsg_len) > len) {
2012 break;
2013 }
2014 tswap_nlmsghdr(nlh);
2015 switch (nlh->nlmsg_type) {
2016 case NLMSG_DONE:
2017 return 0;
2018 case NLMSG_NOOP:
2019 break;
2020 case NLMSG_ERROR:
2021 {
2022 struct nlmsgerr *e = NLMSG_DATA(nlh);
2023 e->error = tswap32(e->error);
2024 tswap_nlmsghdr(&e->msg);
2025 return 0;
2026 }
2027 default:
2028 ret = target_to_host_nlmsg(nlh);
2029 if (ret < 0) {
2030 return ret;
2031 }
2032 }
2033 len -= NLMSG_ALIGN(nlh->nlmsg_len);
2034 nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len));
2035 }
2036 return 0;
2037 }
2038
2039 #ifdef CONFIG_RTNETLINK
2040 static abi_long host_to_target_for_each_nlattr(struct nlattr *nlattr,
2041 size_t len, void *context,
2042 abi_long (*host_to_target_nlattr)
2043 (struct nlattr *,
2044 void *context))
2045 {
2046 unsigned short nla_len;
2047 abi_long ret;
2048
2049 while (len > sizeof(struct nlattr)) {
2050 nla_len = nlattr->nla_len;
2051 if (nla_len < sizeof(struct nlattr) ||
2052 nla_len > len) {
2053 break;
2054 }
2055 ret = host_to_target_nlattr(nlattr, context);
2056 nlattr->nla_len = tswap16(nlattr->nla_len);
2057 nlattr->nla_type = tswap16(nlattr->nla_type);
2058 if (ret < 0) {
2059 return ret;
2060 }
2061 len -= NLA_ALIGN(nla_len);
2062 nlattr = (struct nlattr *)(((char *)nlattr) + NLA_ALIGN(nla_len));
2063 }
2064 return 0;
2065 }
2066
2067 static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr,
2068 size_t len,
2069 abi_long (*host_to_target_rtattr)
2070 (struct rtattr *))
2071 {
2072 unsigned short rta_len;
2073 abi_long ret;
2074
2075 while (len > sizeof(struct rtattr)) {
2076 rta_len = rtattr->rta_len;
2077 if (rta_len < sizeof(struct rtattr) ||
2078 rta_len > len) {
2079 break;
2080 }
2081 ret = host_to_target_rtattr(rtattr);
2082 rtattr->rta_len = tswap16(rtattr->rta_len);
2083 rtattr->rta_type = tswap16(rtattr->rta_type);
2084 if (ret < 0) {
2085 return ret;
2086 }
2087 len -= RTA_ALIGN(rta_len);
2088 rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len));
2089 }
2090 return 0;
2091 }
2092
2093 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
2094
2095 static abi_long host_to_target_data_bridge_nlattr(struct nlattr *nlattr,
2096 void *context)
2097 {
2098 uint16_t *u16;
2099 uint32_t *u32;
2100 uint64_t *u64;
2101
2102 switch (nlattr->nla_type) {
2103 /* no data */
2104 case QEMU_IFLA_BR_FDB_FLUSH:
2105 break;
2106 /* binary */
2107 case QEMU_IFLA_BR_GROUP_ADDR:
2108 break;
2109 /* uint8_t */
2110 case QEMU_IFLA_BR_VLAN_FILTERING:
2111 case QEMU_IFLA_BR_TOPOLOGY_CHANGE:
2112 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED:
2113 case QEMU_IFLA_BR_MCAST_ROUTER:
2114 case QEMU_IFLA_BR_MCAST_SNOOPING:
2115 case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR:
2116 case QEMU_IFLA_BR_MCAST_QUERIER:
2117 case QEMU_IFLA_BR_NF_CALL_IPTABLES:
2118 case QEMU_IFLA_BR_NF_CALL_IP6TABLES:
2119 case QEMU_IFLA_BR_NF_CALL_ARPTABLES:
2120 break;
2121 /* uint16_t */
2122 case QEMU_IFLA_BR_PRIORITY:
2123 case QEMU_IFLA_BR_VLAN_PROTOCOL:
2124 case QEMU_IFLA_BR_GROUP_FWD_MASK:
2125 case QEMU_IFLA_BR_ROOT_PORT:
2126 case QEMU_IFLA_BR_VLAN_DEFAULT_PVID:
2127 u16 = NLA_DATA(nlattr);
2128 *u16 = tswap16(*u16);
2129 break;
2130 /* uint32_t */
2131 case QEMU_IFLA_BR_FORWARD_DELAY:
2132 case QEMU_IFLA_BR_HELLO_TIME:
2133 case QEMU_IFLA_BR_MAX_AGE:
2134 case QEMU_IFLA_BR_AGEING_TIME:
2135 case QEMU_IFLA_BR_STP_STATE:
2136 case QEMU_IFLA_BR_ROOT_PATH_COST:
2137 case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY:
2138 case QEMU_IFLA_BR_MCAST_HASH_MAX:
2139 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT:
2140 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT:
2141 u32 = NLA_DATA(nlattr);
2142 *u32 = tswap32(*u32);
2143 break;
2144 /* uint64_t */
2145 case QEMU_IFLA_BR_HELLO_TIMER:
2146 case QEMU_IFLA_BR_TCN_TIMER:
2147 case QEMU_IFLA_BR_GC_TIMER:
2148 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER:
2149 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL:
2150 case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL:
2151 case QEMU_IFLA_BR_MCAST_QUERIER_INTVL:
2152 case QEMU_IFLA_BR_MCAST_QUERY_INTVL:
2153 case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL:
2154 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL:
2155 u64 = NLA_DATA(nlattr);
2156 *u64 = tswap64(*u64);
2157 break;
2158 /* ifla_bridge_id: uin8_t[] */
2159 case QEMU_IFLA_BR_ROOT_ID:
2160 case QEMU_IFLA_BR_BRIDGE_ID:
2161 break;
2162 default:
2163 gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr->nla_type);
2164 break;
2165 }
2166 return 0;
2167 }
2168
2169 static abi_long host_to_target_slave_data_bridge_nlattr(struct nlattr *nlattr,
2170 void *context)
2171 {
2172 uint16_t *u16;
2173 uint32_t *u32;
2174 uint64_t *u64;
2175
2176 switch (nlattr->nla_type) {
2177 /* uint8_t */
2178 case QEMU_IFLA_BRPORT_STATE:
2179 case QEMU_IFLA_BRPORT_MODE:
2180 case QEMU_IFLA_BRPORT_GUARD:
2181 case QEMU_IFLA_BRPORT_PROTECT:
2182 case QEMU_IFLA_BRPORT_FAST_LEAVE:
2183 case QEMU_IFLA_BRPORT_LEARNING:
2184 case QEMU_IFLA_BRPORT_UNICAST_FLOOD:
2185 case QEMU_IFLA_BRPORT_PROXYARP:
2186 case QEMU_IFLA_BRPORT_LEARNING_SYNC:
2187 case QEMU_IFLA_BRPORT_PROXYARP_WIFI:
2188 case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK:
2189 case QEMU_IFLA_BRPORT_CONFIG_PENDING:
2190 case QEMU_IFLA_BRPORT_MULTICAST_ROUTER:
2191 break;
2192 /* uint16_t */
2193 case QEMU_IFLA_BRPORT_PRIORITY:
2194 case QEMU_IFLA_BRPORT_DESIGNATED_PORT:
2195 case QEMU_IFLA_BRPORT_DESIGNATED_COST:
2196 case QEMU_IFLA_BRPORT_ID:
2197 case QEMU_IFLA_BRPORT_NO:
2198 u16 = NLA_DATA(nlattr);
2199 *u16 = tswap16(*u16);
2200 break;
2201 /* uin32_t */
2202 case QEMU_IFLA_BRPORT_COST:
2203 u32 = NLA_DATA(nlattr);
2204 *u32 = tswap32(*u32);
2205 break;
2206 /* uint64_t */
2207 case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER:
2208 case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER:
2209 case QEMU_IFLA_BRPORT_HOLD_TIMER:
2210 u64 = NLA_DATA(nlattr);
2211 *u64 = tswap64(*u64);
2212 break;
2213 /* ifla_bridge_id: uint8_t[] */
2214 case QEMU_IFLA_BRPORT_ROOT_ID:
2215 case QEMU_IFLA_BRPORT_BRIDGE_ID:
2216 break;
2217 default:
2218 gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr->nla_type);
2219 break;
2220 }
2221 return 0;
2222 }
2223
2224 struct linkinfo_context {
2225 int len;
2226 char *name;
2227 int slave_len;
2228 char *slave_name;
2229 };
2230
2231 static abi_long host_to_target_data_linkinfo_nlattr(struct nlattr *nlattr,
2232 void *context)
2233 {
2234 struct linkinfo_context *li_context = context;
2235
2236 switch (nlattr->nla_type) {
2237 /* string */
2238 case QEMU_IFLA_INFO_KIND:
2239 li_context->name = NLA_DATA(nlattr);
2240 li_context->len = nlattr->nla_len - NLA_HDRLEN;
2241 break;
2242 case QEMU_IFLA_INFO_SLAVE_KIND:
2243 li_context->slave_name = NLA_DATA(nlattr);
2244 li_context->slave_len = nlattr->nla_len - NLA_HDRLEN;
2245 break;
2246 /* stats */
2247 case QEMU_IFLA_INFO_XSTATS:
2248 /* FIXME: only used by CAN */
2249 break;
2250 /* nested */
2251 case QEMU_IFLA_INFO_DATA:
2252 if (strncmp(li_context->name, "bridge",
2253 li_context->len) == 0) {
2254 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2255 nlattr->nla_len,
2256 NULL,
2257 host_to_target_data_bridge_nlattr);
2258 } else {
2259 gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context->name);
2260 }
2261 break;
2262 case QEMU_IFLA_INFO_SLAVE_DATA:
2263 if (strncmp(li_context->slave_name, "bridge",
2264 li_context->slave_len) == 0) {
2265 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2266 nlattr->nla_len,
2267 NULL,
2268 host_to_target_slave_data_bridge_nlattr);
2269 } else {
2270 gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2271 li_context->slave_name);
2272 }
2273 break;
2274 default:
2275 gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr->nla_type);
2276 break;
2277 }
2278
2279 return 0;
2280 }
2281
2282 static abi_long host_to_target_data_inet_nlattr(struct nlattr *nlattr,
2283 void *context)
2284 {
2285 uint32_t *u32;
2286 int i;
2287
2288 switch (nlattr->nla_type) {
2289 case QEMU_IFLA_INET_CONF:
2290 u32 = NLA_DATA(nlattr);
2291 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2292 i++) {
2293 u32[i] = tswap32(u32[i]);
2294 }
2295 break;
2296 default:
2297 gemu_log("Unknown host AF_INET type: %d\n", nlattr->nla_type);
2298 }
2299 return 0;
2300 }
2301
2302 static abi_long host_to_target_data_inet6_nlattr(struct nlattr *nlattr,
2303 void *context)
2304 {
2305 uint32_t *u32;
2306 uint64_t *u64;
2307 struct ifla_cacheinfo *ci;
2308 int i;
2309
2310 switch (nlattr->nla_type) {
2311 /* binaries */
2312 case QEMU_IFLA_INET6_TOKEN:
2313 break;
2314 /* uint8_t */
2315 case QEMU_IFLA_INET6_ADDR_GEN_MODE:
2316 break;
2317 /* uint32_t */
2318 case QEMU_IFLA_INET6_FLAGS:
2319 u32 = NLA_DATA(nlattr);
2320 *u32 = tswap32(*u32);
2321 break;
2322 /* uint32_t[] */
2323 case QEMU_IFLA_INET6_CONF:
2324 u32 = NLA_DATA(nlattr);
2325 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2326 i++) {
2327 u32[i] = tswap32(u32[i]);
2328 }
2329 break;
2330 /* ifla_cacheinfo */
2331 case QEMU_IFLA_INET6_CACHEINFO:
2332 ci = NLA_DATA(nlattr);
2333 ci->max_reasm_len = tswap32(ci->max_reasm_len);
2334 ci->tstamp = tswap32(ci->tstamp);
2335 ci->reachable_time = tswap32(ci->reachable_time);
2336 ci->retrans_time = tswap32(ci->retrans_time);
2337 break;
2338 /* uint64_t[] */
2339 case QEMU_IFLA_INET6_STATS:
2340 case QEMU_IFLA_INET6_ICMP6STATS:
2341 u64 = NLA_DATA(nlattr);
2342 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u64);
2343 i++) {
2344 u64[i] = tswap64(u64[i]);
2345 }
2346 break;
2347 default:
2348 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr->nla_type);
2349 }
2350 return 0;
2351 }
2352
2353 static abi_long host_to_target_data_spec_nlattr(struct nlattr *nlattr,
2354 void *context)
2355 {
2356 switch (nlattr->nla_type) {
2357 case AF_INET:
2358 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2359 NULL,
2360 host_to_target_data_inet_nlattr);
2361 case AF_INET6:
2362 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2363 NULL,
2364 host_to_target_data_inet6_nlattr);
2365 default:
2366 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr->nla_type);
2367 break;
2368 }
2369 return 0;
2370 }
2371
2372 static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
2373 {
2374 uint32_t *u32;
2375 struct rtnl_link_stats *st;
2376 struct rtnl_link_stats64 *st64;
2377 struct rtnl_link_ifmap *map;
2378 struct linkinfo_context li_context;
2379
2380 switch (rtattr->rta_type) {
2381 /* binary stream */
2382 case QEMU_IFLA_ADDRESS:
2383 case QEMU_IFLA_BROADCAST:
2384 /* string */
2385 case QEMU_IFLA_IFNAME:
2386 case QEMU_IFLA_QDISC:
2387 break;
2388 /* uin8_t */
2389 case QEMU_IFLA_OPERSTATE:
2390 case QEMU_IFLA_LINKMODE:
2391 case QEMU_IFLA_CARRIER:
2392 case QEMU_IFLA_PROTO_DOWN:
2393 break;
2394 /* uint32_t */
2395 case QEMU_IFLA_MTU:
2396 case QEMU_IFLA_LINK:
2397 case QEMU_IFLA_WEIGHT:
2398 case QEMU_IFLA_TXQLEN:
2399 case QEMU_IFLA_CARRIER_CHANGES:
2400 case QEMU_IFLA_NUM_RX_QUEUES:
2401 case QEMU_IFLA_NUM_TX_QUEUES:
2402 case QEMU_IFLA_PROMISCUITY:
2403 case QEMU_IFLA_EXT_MASK:
2404 case QEMU_IFLA_LINK_NETNSID:
2405 case QEMU_IFLA_GROUP:
2406 case QEMU_IFLA_MASTER:
2407 case QEMU_IFLA_NUM_VF:
2408 case QEMU_IFLA_GSO_MAX_SEGS:
2409 case QEMU_IFLA_GSO_MAX_SIZE:
2410 u32 = RTA_DATA(rtattr);
2411 *u32 = tswap32(*u32);
2412 break;
2413 /* struct rtnl_link_stats */
2414 case QEMU_IFLA_STATS:
2415 st = RTA_DATA(rtattr);
2416 st->rx_packets = tswap32(st->rx_packets);
2417 st->tx_packets = tswap32(st->tx_packets);
2418 st->rx_bytes = tswap32(st->rx_bytes);
2419 st->tx_bytes = tswap32(st->tx_bytes);
2420 st->rx_errors = tswap32(st->rx_errors);
2421 st->tx_errors = tswap32(st->tx_errors);
2422 st->rx_dropped = tswap32(st->rx_dropped);
2423 st->tx_dropped = tswap32(st->tx_dropped);
2424 st->multicast = tswap32(st->multicast);
2425 st->collisions = tswap32(st->collisions);
2426
2427 /* detailed rx_errors: */
2428 st->rx_length_errors = tswap32(st->rx_length_errors);
2429 st->rx_over_errors = tswap32(st->rx_over_errors);
2430 st->rx_crc_errors = tswap32(st->rx_crc_errors);
2431 st->rx_frame_errors = tswap32(st->rx_frame_errors);
2432 st->rx_fifo_errors = tswap32(st->rx_fifo_errors);
2433 st->rx_missed_errors = tswap32(st->rx_missed_errors);
2434
2435 /* detailed tx_errors */
2436 st->tx_aborted_errors = tswap32(st->tx_aborted_errors);
2437 st->tx_carrier_errors = tswap32(st->tx_carrier_errors);
2438 st->tx_fifo_errors = tswap32(st->tx_fifo_errors);
2439 st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors);
2440 st->tx_window_errors = tswap32(st->tx_window_errors);
2441
2442 /* for cslip etc */
2443 st->rx_compressed = tswap32(st->rx_compressed);
2444 st->tx_compressed = tswap32(st->tx_compressed);
2445 break;
2446 /* struct rtnl_link_stats64 */
2447 case QEMU_IFLA_STATS64:
2448 st64 = RTA_DATA(rtattr);
2449 st64->rx_packets = tswap64(st64->rx_packets);
2450 st64->tx_packets = tswap64(st64->tx_packets);
2451 st64->rx_bytes = tswap64(st64->rx_bytes);
2452 st64->tx_bytes = tswap64(st64->tx_bytes);
2453 st64->rx_errors = tswap64(st64->rx_errors);
2454 st64->tx_errors = tswap64(st64->tx_errors);
2455 st64->rx_dropped = tswap64(st64->rx_dropped);
2456 st64->tx_dropped = tswap64(st64->tx_dropped);
2457 st64->multicast = tswap64(st64->multicast);
2458 st64->collisions = tswap64(st64->collisions);
2459
2460 /* detailed rx_errors: */
2461 st64->rx_length_errors = tswap64(st64->rx_length_errors);
2462 st64->rx_over_errors = tswap64(st64->rx_over_errors);
2463 st64->rx_crc_errors = tswap64(st64->rx_crc_errors);
2464 st64->rx_frame_errors = tswap64(st64->rx_frame_errors);
2465 st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors);
2466 st64->rx_missed_errors = tswap64(st64->rx_missed_errors);
2467
2468 /* detailed tx_errors */
2469 st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors);
2470 st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors);
2471 st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors);
2472 st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors);
2473 st64->tx_window_errors = tswap64(st64->tx_window_errors);
2474
2475 /* for cslip etc */
2476 st64->rx_compressed = tswap64(st64->rx_compressed);
2477 st64->tx_compressed = tswap64(st64->tx_compressed);
2478 break;
2479 /* struct rtnl_link_ifmap */
2480 case QEMU_IFLA_MAP:
2481 map = RTA_DATA(rtattr);
2482 map->mem_start = tswap64(map->mem_start);
2483 map->mem_end = tswap64(map->mem_end);
2484 map->base_addr = tswap64(map->base_addr);
2485 map->irq = tswap16(map->irq);
2486 break;
2487 /* nested */
2488 case QEMU_IFLA_LINKINFO:
2489 memset(&li_context, 0, sizeof(li_context));
2490 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2491 &li_context,
2492 host_to_target_data_linkinfo_nlattr);
2493 case QEMU_IFLA_AF_SPEC:
2494 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2495 NULL,
2496 host_to_target_data_spec_nlattr);
2497 default:
2498 gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr->rta_type);
2499 break;
2500 }
2501 return 0;
2502 }
2503
2504 static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr)
2505 {
2506 uint32_t *u32;
2507 struct ifa_cacheinfo *ci;
2508
2509 switch (rtattr->rta_type) {
2510 /* binary: depends on family type */
2511 case IFA_ADDRESS:
2512 case IFA_LOCAL:
2513 break;
2514 /* string */
2515 case IFA_LABEL:
2516 break;
2517 /* u32 */
2518 case IFA_FLAGS:
2519 case IFA_BROADCAST:
2520 u32 = RTA_DATA(rtattr);
2521 *u32 = tswap32(*u32);
2522 break;
2523 /* struct ifa_cacheinfo */
2524 case IFA_CACHEINFO:
2525 ci = RTA_DATA(rtattr);
2526 ci->ifa_prefered = tswap32(ci->ifa_prefered);
2527 ci->ifa_valid = tswap32(ci->ifa_valid);
2528 ci->cstamp = tswap32(ci->cstamp);
2529 ci->tstamp = tswap32(ci->tstamp);
2530 break;
2531 default:
2532 gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type);
2533 break;
2534 }
2535 return 0;
2536 }
2537
2538 static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr)
2539 {
2540 uint32_t *u32;
2541 switch (rtattr->rta_type) {
2542 /* binary: depends on family type */
2543 case RTA_GATEWAY:
2544 case RTA_DST:
2545 case RTA_PREFSRC:
2546 break;
2547 /* u32 */
2548 case RTA_PRIORITY:
2549 case RTA_TABLE:
2550 case RTA_OIF:
2551 u32 = RTA_DATA(rtattr);
2552 *u32 = tswap32(*u32);
2553 break;
2554 default:
2555 gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type);
2556 break;
2557 }
2558 return 0;
2559 }
2560
2561 static abi_long host_to_target_link_rtattr(struct rtattr *rtattr,
2562 uint32_t rtattr_len)
2563 {
2564 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2565 host_to_target_data_link_rtattr);
2566 }
2567
2568 static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr,
2569 uint32_t rtattr_len)
2570 {
2571 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2572 host_to_target_data_addr_rtattr);
2573 }
2574
2575 static abi_long host_to_target_route_rtattr(struct rtattr *rtattr,
2576 uint32_t rtattr_len)
2577 {
2578 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2579 host_to_target_data_route_rtattr);
2580 }
2581
2582 static abi_long host_to_target_data_route(struct nlmsghdr *nlh)
2583 {
2584 uint32_t nlmsg_len;
2585 struct ifinfomsg *ifi;
2586 struct ifaddrmsg *ifa;
2587 struct rtmsg *rtm;
2588
2589 nlmsg_len = nlh->nlmsg_len;
2590 switch (nlh->nlmsg_type) {
2591 case RTM_NEWLINK:
2592 case RTM_DELLINK:
2593 case RTM_GETLINK:
2594 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2595 ifi = NLMSG_DATA(nlh);
2596 ifi->ifi_type = tswap16(ifi->ifi_type);
2597 ifi->ifi_index = tswap32(ifi->ifi_index);
2598 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2599 ifi->ifi_change = tswap32(ifi->ifi_change);
2600 host_to_target_link_rtattr(IFLA_RTA(ifi),
2601 nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)));
2602 }
2603 break;
2604 case RTM_NEWADDR:
2605 case RTM_DELADDR:
2606 case RTM_GETADDR:
2607 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2608 ifa = NLMSG_DATA(nlh);
2609 ifa->ifa_index = tswap32(ifa->ifa_index);
2610 host_to_target_addr_rtattr(IFA_RTA(ifa),
2611 nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
2612 }
2613 break;
2614 case RTM_NEWROUTE:
2615 case RTM_DELROUTE:
2616 case RTM_GETROUTE:
2617 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2618 rtm = NLMSG_DATA(nlh);
2619 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2620 host_to_target_route_rtattr(RTM_RTA(rtm),
2621 nlmsg_len - NLMSG_LENGTH(sizeof(*rtm)));
2622 }
2623 break;
2624 default:
2625 return -TARGET_EINVAL;
2626 }
2627 return 0;
2628 }
2629
2630 static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh,
2631 size_t len)
2632 {
2633 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route);
2634 }
2635
2636 static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr,
2637 size_t len,
2638 abi_long (*target_to_host_rtattr)
2639 (struct rtattr *))
2640 {
2641 abi_long ret;
2642
2643 while (len >= sizeof(struct rtattr)) {
2644 if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) ||
2645 tswap16(rtattr->rta_len) > len) {
2646 break;
2647 }
2648 rtattr->rta_len = tswap16(rtattr->rta_len);
2649 rtattr->rta_type = tswap16(rtattr->rta_type);
2650 ret = target_to_host_rtattr(rtattr);
2651 if (ret < 0) {
2652 return ret;
2653 }
2654 len -= RTA_ALIGN(rtattr->rta_len);
2655 rtattr = (struct rtattr *)(((char *)rtattr) +
2656 RTA_ALIGN(rtattr->rta_len));
2657 }
2658 return 0;
2659 }
2660
2661 static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr)
2662 {
2663 switch (rtattr->rta_type) {
2664 default:
2665 gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr->rta_type);
2666 break;
2667 }
2668 return 0;
2669 }
2670
2671 static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr)
2672 {
2673 switch (rtattr->rta_type) {
2674 /* binary: depends on family type */
2675 case IFA_LOCAL:
2676 case IFA_ADDRESS:
2677 break;
2678 default:
2679 gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type);
2680 break;
2681 }
2682 return 0;
2683 }
2684
2685 static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr)
2686 {
2687 uint32_t *u32;
2688 switch (rtattr->rta_type) {
2689 /* binary: depends on family type */
2690 case RTA_DST:
2691 case RTA_SRC:
2692 case RTA_GATEWAY:
2693 break;
2694 /* u32 */
2695 case RTA_PRIORITY:
2696 case RTA_OIF:
2697 u32 = RTA_DATA(rtattr);
2698 *u32 = tswap32(*u32);
2699 break;
2700 default:
2701 gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type);
2702 break;
2703 }
2704 return 0;
2705 }
2706
2707 static void target_to_host_link_rtattr(struct rtattr *rtattr,
2708 uint32_t rtattr_len)
2709 {
2710 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2711 target_to_host_data_link_rtattr);
2712 }
2713
2714 static void target_to_host_addr_rtattr(struct rtattr *rtattr,
2715 uint32_t rtattr_len)
2716 {
2717 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2718 target_to_host_data_addr_rtattr);
2719 }
2720
2721 static void target_to_host_route_rtattr(struct rtattr *rtattr,
2722 uint32_t rtattr_len)
2723 {
2724 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2725 target_to_host_data_route_rtattr);
2726 }
2727
2728 static abi_long target_to_host_data_route(struct nlmsghdr *nlh)
2729 {
2730 struct ifinfomsg *ifi;
2731 struct ifaddrmsg *ifa;
2732 struct rtmsg *rtm;
2733
2734 switch (nlh->nlmsg_type) {
2735 case RTM_GETLINK:
2736 break;
2737 case RTM_NEWLINK:
2738 case RTM_DELLINK:
2739 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2740 ifi = NLMSG_DATA(nlh);
2741 ifi->ifi_type = tswap16(ifi->ifi_type);
2742 ifi->ifi_index = tswap32(ifi->ifi_index);
2743 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2744 ifi->ifi_change = tswap32(ifi->ifi_change);
2745 target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len -
2746 NLMSG_LENGTH(sizeof(*ifi)));
2747 }
2748 break;
2749 case RTM_GETADDR:
2750 case RTM_NEWADDR:
2751 case RTM_DELADDR:
2752 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2753 ifa = NLMSG_DATA(nlh);
2754 ifa->ifa_index = tswap32(ifa->ifa_index);
2755 target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len -
2756 NLMSG_LENGTH(sizeof(*ifa)));
2757 }
2758 break;
2759 case RTM_GETROUTE:
2760 break;
2761 case RTM_NEWROUTE:
2762 case RTM_DELROUTE:
2763 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2764 rtm = NLMSG_DATA(nlh);
2765 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2766 target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len -
2767 NLMSG_LENGTH(sizeof(*rtm)));
2768 }
2769 break;
2770 default:
2771 return -TARGET_EOPNOTSUPP;
2772 }
2773 return 0;
2774 }
2775
2776 static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len)
2777 {
2778 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route);
2779 }
2780 #endif /* CONFIG_RTNETLINK */
2781
2782 static abi_long host_to_target_data_audit(struct nlmsghdr *nlh)
2783 {
2784 switch (nlh->nlmsg_type) {
2785 default:
2786 gemu_log("Unknown host audit message type %d\n",
2787 nlh->nlmsg_type);
2788 return -TARGET_EINVAL;
2789 }
2790 return 0;
2791 }
2792
2793 static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh,
2794 size_t len)
2795 {
2796 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit);
2797 }
2798
2799 static abi_long target_to_host_data_audit(struct nlmsghdr *nlh)
2800 {
2801 switch (nlh->nlmsg_type) {
2802 case AUDIT_USER:
2803 case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
2804 case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
2805 break;
2806 default:
2807 gemu_log("Unknown target audit message type %d\n",
2808 nlh->nlmsg_type);
2809 return -TARGET_EINVAL;
2810 }
2811
2812 return 0;
2813 }
2814
2815 static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len)
2816 {
2817 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit);
2818 }
2819
2820 /* do_setsockopt() Must return target values and target errnos. */
2821 static abi_long do_setsockopt(int sockfd, int level, int optname,
2822 abi_ulong optval_addr, socklen_t optlen)
2823 {
2824 abi_long ret;
2825 int val;
2826 struct ip_mreqn *ip_mreq;
2827 struct ip_mreq_source *ip_mreq_source;
2828
2829 switch(level) {
2830 case SOL_TCP:
2831 /* TCP options all take an 'int' value. */
2832 if (optlen < sizeof(uint32_t))
2833 return -TARGET_EINVAL;
2834
2835 if (get_user_u32(val, optval_addr))
2836 return -TARGET_EFAULT;
2837 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2838 break;
2839 case SOL_IP:
2840 switch(optname) {
2841 case IP_TOS:
2842 case IP_TTL:
2843 case IP_HDRINCL:
2844 case IP_ROUTER_ALERT:
2845 case IP_RECVOPTS:
2846 case IP_RETOPTS:
2847 case IP_PKTINFO:
2848 case IP_MTU_DISCOVER:
2849 case IP_RECVERR:
2850 case IP_RECVTTL:
2851 case IP_RECVTOS:
2852 #ifdef IP_FREEBIND
2853 case IP_FREEBIND:
2854 #endif
2855 case IP_MULTICAST_TTL:
2856 case IP_MULTICAST_LOOP:
2857 val = 0;
2858 if (optlen >= sizeof(uint32_t)) {
2859 if (get_user_u32(val, optval_addr))
2860 return -TARGET_EFAULT;
2861 } else if (optlen >= 1) {
2862 if (get_user_u8(val, optval_addr))
2863 return -TARGET_EFAULT;
2864 }
2865 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2866 break;
2867 case IP_ADD_MEMBERSHIP:
2868 case IP_DROP_MEMBERSHIP:
2869 if (optlen < sizeof (struct target_ip_mreq) ||
2870 optlen > sizeof (struct target_ip_mreqn))
2871 return -TARGET_EINVAL;
2872
2873 ip_mreq = (struct ip_mreqn *) alloca(optlen);
2874 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2875 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2876 break;
2877
2878 case IP_BLOCK_SOURCE:
2879 case IP_UNBLOCK_SOURCE:
2880 case IP_ADD_SOURCE_MEMBERSHIP:
2881 case IP_DROP_SOURCE_MEMBERSHIP:
2882 if (optlen != sizeof (struct target_ip_mreq_source))
2883 return -TARGET_EINVAL;
2884
2885 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2886 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2887 unlock_user (ip_mreq_source, optval_addr, 0);
2888 break;
2889
2890 default:
2891 goto unimplemented;
2892 }
2893 break;
2894 case SOL_IPV6:
2895 switch (optname) {
2896 case IPV6_MTU_DISCOVER:
2897 case IPV6_MTU:
2898 case IPV6_V6ONLY:
2899 case IPV6_RECVPKTINFO:
2900 case IPV6_UNICAST_HOPS:
2901 case IPV6_RECVERR:
2902 case IPV6_RECVHOPLIMIT:
2903 case IPV6_2292HOPLIMIT:
2904 case IPV6_CHECKSUM:
2905 val = 0;
2906 if (optlen < sizeof(uint32_t)) {
2907 return -TARGET_EINVAL;
2908 }
2909 if (get_user_u32(val, optval_addr)) {
2910 return -TARGET_EFAULT;
2911 }
2912 ret = get_errno(setsockopt(sockfd, level, optname,
2913 &val, sizeof(val)));
2914 break;
2915 case IPV6_PKTINFO:
2916 {
2917 struct in6_pktinfo pki;
2918
2919 if (optlen < sizeof(pki)) {
2920 return -TARGET_EINVAL;
2921 }
2922
2923 if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2924 return -TARGET_EFAULT;
2925 }
2926
2927 pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2928
2929 ret = get_errno(setsockopt(sockfd, level, optname,
2930 &pki, sizeof(pki)));
2931 break;
2932 }
2933 default:
2934 goto unimplemented;
2935 }
2936 break;
2937 case SOL_ICMPV6:
2938 switch (optname) {
2939 case ICMPV6_FILTER:
2940 {
2941 struct icmp6_filter icmp6f;
2942
2943 if (optlen > sizeof(icmp6f)) {
2944 optlen = sizeof(icmp6f);
2945 }
2946
2947 if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2948 return -TARGET_EFAULT;
2949 }
2950
2951 for (val = 0; val < 8; val++) {
2952 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2953 }
2954
2955 ret = get_errno(setsockopt(sockfd, level, optname,
2956 &icmp6f, optlen));
2957 break;
2958 }
2959 default:
2960 goto unimplemented;
2961 }
2962 break;
2963 case SOL_RAW:
2964 switch (optname) {
2965 case ICMP_FILTER:
2966 case IPV6_CHECKSUM:
2967 /* those take an u32 value */
2968 if (optlen < sizeof(uint32_t)) {
2969 return -TARGET_EINVAL;
2970 }
2971
2972 if (get_user_u32(val, optval_addr)) {
2973 return -TARGET_EFAULT;
2974 }
2975 ret = get_errno(setsockopt(sockfd, level, optname,
2976 &val, sizeof(val)));
2977 break;
2978
2979 default:
2980 goto unimplemented;
2981 }
2982 break;
2983 case TARGET_SOL_SOCKET:
2984 switch (optname) {
2985 case TARGET_SO_RCVTIMEO:
2986 {
2987 struct timeval tv;
2988
2989 optname = SO_RCVTIMEO;
2990
2991 set_timeout:
2992 if (optlen != sizeof(struct target_timeval)) {
2993 return -TARGET_EINVAL;
2994 }
2995
2996 if (copy_from_user_timeval(&tv, optval_addr)) {
2997 return -TARGET_EFAULT;
2998 }
2999
3000 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
3001 &tv, sizeof(tv)));
3002 return ret;
3003 }
3004 case TARGET_SO_SNDTIMEO:
3005 optname = SO_SNDTIMEO;
3006 goto set_timeout;
3007 case TARGET_SO_ATTACH_FILTER:
3008 {
3009 struct target_sock_fprog *tfprog;
3010 struct target_sock_filter *tfilter;
3011 struct sock_fprog fprog;
3012 struct sock_filter *filter;
3013 int i;
3014
3015 if (optlen != sizeof(*tfprog)) {
3016 return -TARGET_EINVAL;
3017 }
3018 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
3019 return -TARGET_EFAULT;
3020 }
3021 if (!lock_user_struct(VERIFY_READ, tfilter,
3022 tswapal(tfprog->filter), 0)) {
3023 unlock_user_struct(tfprog, optval_addr, 1);
3024 return -TARGET_EFAULT;
3025 }
3026
3027 fprog.len = tswap16(tfprog->len);
3028 filter = g_try_new(struct sock_filter, fprog.len);
3029 if (filter == NULL) {
3030 unlock_user_struct(tfilter, tfprog->filter, 1);
3031 unlock_user_struct(tfprog, optval_addr, 1);
3032 return -TARGET_ENOMEM;
3033 }
3034 for (i = 0; i < fprog.len; i++) {
3035 filter[i].code = tswap16(tfilter[i].code);
3036 filter[i].jt = tfilter[i].jt;
3037 filter[i].jf = tfilter[i].jf;
3038 filter[i].k = tswap32(tfilter[i].k);
3039 }
3040 fprog.filter = filter;
3041
3042 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
3043 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
3044 g_free(filter);
3045
3046 unlock_user_struct(tfilter, tfprog->filter, 1);
3047 unlock_user_struct(tfprog, optval_addr, 1);
3048 return ret;
3049 }
3050 case TARGET_SO_BINDTODEVICE:
3051 {
3052 char *dev_ifname, *addr_ifname;
3053
3054 if (optlen > IFNAMSIZ - 1) {
3055 optlen = IFNAMSIZ - 1;
3056 }
3057 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
3058 if (!dev_ifname) {
3059 return -TARGET_EFAULT;
3060 }
3061 optname = SO_BINDTODEVICE;
3062 addr_ifname = alloca(IFNAMSIZ);
3063 memcpy(addr_ifname, dev_ifname, optlen);
3064 addr_ifname[optlen] = 0;
3065 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
3066 addr_ifname, optlen));
3067 unlock_user (dev_ifname, optval_addr, 0);
3068 return ret;
3069 }
3070 /* Options with 'int' argument. */
3071 case TARGET_SO_DEBUG:
3072 optname = SO_DEBUG;
3073 break;
3074 case TARGET_SO_REUSEADDR:
3075 optname = SO_REUSEADDR;
3076 break;
3077 case TARGET_SO_TYPE:
3078 optname = SO_TYPE;
3079 break;
3080 case TARGET_SO_ERROR:
3081 optname = SO_ERROR;
3082 break;
3083 case TARGET_SO_DONTROUTE:
3084 optname = SO_DONTROUTE;
3085 break;
3086 case TARGET_SO_BROADCAST:
3087 optname = SO_BROADCAST;
3088 break;
3089 case TARGET_SO_SNDBUF:
3090 optname = SO_SNDBUF;
3091 break;
3092 case TARGET_SO_SNDBUFFORCE:
3093 optname = SO_SNDBUFFORCE;
3094 break;
3095 case TARGET_SO_RCVBUF:
3096 optname = SO_RCVBUF;
3097 break;
3098 case TARGET_SO_RCVBUFFORCE:
3099 optname = SO_RCVBUFFORCE;
3100 break;
3101 case TARGET_SO_KEEPALIVE:
3102 optname = SO_KEEPALIVE;
3103 break;
3104 case TARGET_SO_OOBINLINE:
3105 optname = SO_OOBINLINE;
3106 break;
3107 case TARGET_SO_NO_CHECK:
3108 optname = SO_NO_CHECK;
3109 break;
3110 case TARGET_SO_PRIORITY:
3111 optname = SO_PRIORITY;
3112 break;
3113 #ifdef SO_BSDCOMPAT
3114 case TARGET_SO_BSDCOMPAT:
3115 optname = SO_BSDCOMPAT;
3116 break;
3117 #endif
3118 case TARGET_SO_PASSCRED:
3119 optname = SO_PASSCRED;
3120 break;
3121 case TARGET_SO_PASSSEC:
3122 optname = SO_PASSSEC;
3123 break;
3124 case TARGET_SO_TIMESTAMP:
3125 optname = SO_TIMESTAMP;
3126 break;
3127 case TARGET_SO_RCVLOWAT:
3128 optname = SO_RCVLOWAT;
3129 break;
3130 break;
3131 default:
3132 goto unimplemented;
3133 }
3134 if (optlen < sizeof(uint32_t))
3135 return -TARGET_EINVAL;
3136
3137 if (get_user_u32(val, optval_addr))
3138 return -TARGET_EFAULT;
3139 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
3140 break;
3141 default:
3142 unimplemented:
3143 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
3144 ret = -TARGET_ENOPROTOOPT;
3145 }
3146 return ret;
3147 }
3148
3149 /* do_getsockopt() Must return target values and target errnos. */
3150 static abi_long do_getsockopt(int sockfd, int level, int optname,
3151 abi_ulong optval_addr, abi_ulong optlen)
3152 {
3153 abi_long ret;
3154 int len, val;
3155 socklen_t lv;
3156
3157 switch(level) {
3158 case TARGET_SOL_SOCKET:
3159 level = SOL_SOCKET;
3160 switch (optname) {
3161 /* These don't just return a single integer */
3162 case TARGET_SO_LINGER:
3163 case TARGET_SO_RCVTIMEO:
3164 case TARGET_SO_SNDTIMEO:
3165 case TARGET_SO_PEERNAME:
3166 goto unimplemented;
3167 case TARGET_SO_PEERCRED: {
3168 struct ucred cr;
3169 socklen_t crlen;
3170 struct target_ucred *tcr;
3171
3172 if (get_user_u32(len, optlen)) {
3173 return -TARGET_EFAULT;
3174 }
3175 if (len < 0) {
3176 return -TARGET_EINVAL;
3177 }
3178
3179 crlen = sizeof(cr);
3180 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
3181 &cr, &crlen));
3182 if (ret < 0) {
3183 return ret;
3184 }
3185 if (len > crlen) {
3186 len = crlen;
3187 }
3188 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
3189 return -TARGET_EFAULT;
3190 }
3191 __put_user(cr.pid, &tcr->pid);
3192 __put_user(cr.uid, &tcr->uid);
3193 __put_user(cr.gid, &tcr->gid);
3194 unlock_user_struct(tcr, optval_addr, 1);
3195 if (put_user_u32(len, optlen)) {
3196 return -TARGET_EFAULT;
3197 }
3198 break;
3199 }
3200 /* Options with 'int' argument. */
3201 case TARGET_SO_DEBUG:
3202 optname = SO_DEBUG;
3203 goto int_case;
3204 case TARGET_SO_REUSEADDR:
3205 optname = SO_REUSEADDR;
3206 goto int_case;
3207 case TARGET_SO_TYPE:
3208 optname = SO_TYPE;
3209 goto int_case;
3210 case TARGET_SO_ERROR:
3211 optname = SO_ERROR;
3212 goto int_case;
3213 case TARGET_SO_DONTROUTE:
3214 optname = SO_DONTROUTE;
3215 goto int_case;
3216 case TARGET_SO_BROADCAST:
3217 optname = SO_BROADCAST;
3218 goto int_case;
3219 case TARGET_SO_SNDBUF:
3220 optname = SO_SNDBUF;
3221 goto int_case;
3222 case TARGET_SO_RCVBUF:
3223 optname = SO_RCVBUF;
3224 goto int_case;
3225 case TARGET_SO_KEEPALIVE:
3226 optname = SO_KEEPALIVE;
3227 goto int_case;
3228 case TARGET_SO_OOBINLINE:
3229 optname = SO_OOBINLINE;
3230 goto int_case;
3231 case TARGET_SO_NO_CHECK:
3232 optname = SO_NO_CHECK;
3233 goto int_case;
3234 case TARGET_SO_PRIORITY:
3235 optname = SO_PRIORITY;
3236 goto int_case;
3237 #ifdef SO_BSDCOMPAT
3238 case TARGET_SO_BSDCOMPAT:
3239 optname = SO_BSDCOMPAT;
3240 goto int_case;
3241 #endif
3242 case TARGET_SO_PASSCRED:
3243 optname = SO_PASSCRED;
3244 goto int_case;
3245 case TARGET_SO_TIMESTAMP:
3246 optname = SO_TIMESTAMP;
3247 goto int_case;
3248 case TARGET_SO_RCVLOWAT:
3249 optname = SO_RCVLOWAT;
3250 goto int_case;
3251 case TARGET_SO_ACCEPTCONN:
3252 optname = SO_ACCEPTCONN;
3253 goto int_case;
3254 default:
3255 goto int_case;
3256 }
3257 break;
3258 case SOL_TCP:
3259 /* TCP options all take an 'int' value. */
3260 int_case:
3261 if (get_user_u32(len, optlen))
3262 return -TARGET_EFAULT;
3263 if (len < 0)
3264 return -TARGET_EINVAL;
3265 lv = sizeof(lv);
3266 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3267 if (ret < 0)
3268 return ret;
3269 if (optname == SO_TYPE) {
3270 val = host_to_target_sock_type(val);
3271 }
3272 if (len > lv)
3273 len = lv;
3274 if (len == 4) {
3275 if (put_user_u32(val, optval_addr))
3276 return -TARGET_EFAULT;
3277 } else {
3278 if (put_user_u8(val, optval_addr))
3279 return -TARGET_EFAULT;
3280 }
3281 if (put_user_u32(len, optlen))
3282 return -TARGET_EFAULT;
3283 break;
3284 case SOL_IP:
3285 switch(optname) {
3286 case IP_TOS:
3287 case IP_TTL:
3288 case IP_HDRINCL:
3289 case IP_ROUTER_ALERT:
3290 case IP_RECVOPTS:
3291 case IP_RETOPTS:
3292 case IP_PKTINFO:
3293 case IP_MTU_DISCOVER:
3294 case IP_RECVERR:
3295 case IP_RECVTOS:
3296 #ifdef IP_FREEBIND
3297 case IP_FREEBIND:
3298 #endif
3299 case IP_MULTICAST_TTL:
3300 case IP_MULTICAST_LOOP:
3301 if (get_user_u32(len, optlen))
3302 return -TARGET_EFAULT;
3303 if (len < 0)
3304 return -TARGET_EINVAL;
3305 lv = sizeof(lv);
3306 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3307 if (ret < 0)
3308 return ret;
3309 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
3310 len = 1;
3311 if (put_user_u32(len, optlen)
3312 || put_user_u8(val, optval_addr))
3313 return -TARGET_EFAULT;
3314 } else {
3315 if (len >