Merge tag 'linux-user-for-7.1-pull-request' of https://gitlab.com/laurent_vivier...
[qemu.git] / bsd-user / mmap.c
1 /*
2 * mmap support for qemu
3 *
4 * Copyright (c) 2003 - 2008 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20
21 #include "qemu.h"
22 #include "qemu-common.h"
23
24 static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER;
25 static __thread int mmap_lock_count;
26
27 void mmap_lock(void)
28 {
29 if (mmap_lock_count++ == 0) {
30 pthread_mutex_lock(&mmap_mutex);
31 }
32 }
33
34 void mmap_unlock(void)
35 {
36 if (--mmap_lock_count == 0) {
37 pthread_mutex_unlock(&mmap_mutex);
38 }
39 }
40
41 bool have_mmap_lock(void)
42 {
43 return mmap_lock_count > 0 ? true : false;
44 }
45
46 /* Grab lock to make sure things are in a consistent state after fork(). */
47 void mmap_fork_start(void)
48 {
49 if (mmap_lock_count)
50 abort();
51 pthread_mutex_lock(&mmap_mutex);
52 }
53
54 void mmap_fork_end(int child)
55 {
56 if (child)
57 pthread_mutex_init(&mmap_mutex, NULL);
58 else
59 pthread_mutex_unlock(&mmap_mutex);
60 }
61
62 /* NOTE: all the constants are the HOST ones, but addresses are target. */
63 int target_mprotect(abi_ulong start, abi_ulong len, int prot)
64 {
65 abi_ulong end, host_start, host_end, addr;
66 int prot1, ret;
67
68 qemu_log_mask(CPU_LOG_PAGE, "mprotect: start=0x" TARGET_ABI_FMT_lx
69 " len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c\n", start, len,
70 prot & PROT_READ ? 'r' : '-',
71 prot & PROT_WRITE ? 'w' : '-',
72 prot & PROT_EXEC ? 'x' : '-');
73 if ((start & ~TARGET_PAGE_MASK) != 0)
74 return -EINVAL;
75 len = TARGET_PAGE_ALIGN(len);
76 end = start + len;
77 if (end < start)
78 return -EINVAL;
79 prot &= PROT_READ | PROT_WRITE | PROT_EXEC;
80 if (len == 0)
81 return 0;
82
83 mmap_lock();
84 host_start = start & qemu_host_page_mask;
85 host_end = HOST_PAGE_ALIGN(end);
86 if (start > host_start) {
87 /* handle host page containing start */
88 prot1 = prot;
89 for (addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
90 prot1 |= page_get_flags(addr);
91 }
92 if (host_end == host_start + qemu_host_page_size) {
93 for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
94 prot1 |= page_get_flags(addr);
95 }
96 end = host_end;
97 }
98 ret = mprotect(g2h_untagged(host_start),
99 qemu_host_page_size, prot1 & PAGE_BITS);
100 if (ret != 0)
101 goto error;
102 host_start += qemu_host_page_size;
103 }
104 if (end < host_end) {
105 prot1 = prot;
106 for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
107 prot1 |= page_get_flags(addr);
108 }
109 ret = mprotect(g2h_untagged(host_end - qemu_host_page_size),
110 qemu_host_page_size, prot1 & PAGE_BITS);
111 if (ret != 0)
112 goto error;
113 host_end -= qemu_host_page_size;
114 }
115
116 /* handle the pages in the middle */
117 if (host_start < host_end) {
118 ret = mprotect(g2h_untagged(host_start), host_end - host_start, prot);
119 if (ret != 0)
120 goto error;
121 }
122 page_set_flags(start, start + len, prot | PAGE_VALID);
123 mmap_unlock();
124 return 0;
125 error:
126 mmap_unlock();
127 return ret;
128 }
129
130 /*
131 * map an incomplete host page
132 *
133 * mmap_frag can be called with a valid fd, if flags doesn't contain one of
134 * MAP_ANON, MAP_STACK, MAP_GUARD. If we need to map a page in those cases, we
135 * pass fd == -1. However, if flags contains MAP_GUARD then MAP_ANON cannot be
136 * added.
137 *
138 * * If fd is valid (not -1) we want to map the pages with MAP_ANON.
139 * * If flags contains MAP_GUARD we don't want to add MAP_ANON because it
140 * will be rejected. See kern_mmap's enforcing of constraints for MAP_GUARD
141 * in sys/vm/vm_mmap.c.
142 * * If flags contains MAP_ANON it doesn't matter if we add it or not.
143 * * If flags contains MAP_STACK, mmap adds MAP_ANON when called so doesn't
144 * matter if we add it or not either. See enforcing of constraints for
145 * MAP_STACK in kern_mmap.
146 *
147 * Don't add MAP_ANON for the flags that use fd == -1 without specifying the
148 * flags directly, with the assumption that future flags that require fd == -1
149 * will also not require MAP_ANON.
150 */
151 static int mmap_frag(abi_ulong real_start,
152 abi_ulong start, abi_ulong end,
153 int prot, int flags, int fd, abi_ulong offset)
154 {
155 abi_ulong real_end, addr;
156 void *host_start;
157 int prot1, prot_new;
158
159 real_end = real_start + qemu_host_page_size;
160 host_start = g2h_untagged(real_start);
161
162 /* get the protection of the target pages outside the mapping */
163 prot1 = 0;
164 for (addr = real_start; addr < real_end; addr++) {
165 if (addr < start || addr >= end)
166 prot1 |= page_get_flags(addr);
167 }
168
169 if (prot1 == 0) {
170 /* no page was there, so we allocate one. See also above. */
171 void *p = mmap(host_start, qemu_host_page_size, prot,
172 flags | ((fd != -1) ? MAP_ANON : 0), -1, 0);
173 if (p == MAP_FAILED)
174 return -1;
175 prot1 = prot;
176 }
177 prot1 &= PAGE_BITS;
178
179 prot_new = prot | prot1;
180 if (fd != -1) {
181 /* msync() won't work here, so we return an error if write is
182 possible while it is a shared mapping */
183 if ((flags & TARGET_BSD_MAP_FLAGMASK) == MAP_SHARED &&
184 (prot & PROT_WRITE))
185 return -1;
186
187 /* adjust protection to be able to read */
188 if (!(prot1 & PROT_WRITE))
189 mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
190
191 /* read the corresponding file data */
192 if (pread(fd, g2h_untagged(start), end - start, offset) == -1) {
193 return -1;
194 }
195
196 /* put final protection */
197 if (prot_new != (prot1 | PROT_WRITE))
198 mprotect(host_start, qemu_host_page_size, prot_new);
199 } else {
200 if (prot_new != prot1) {
201 mprotect(host_start, qemu_host_page_size, prot_new);
202 }
203 if (prot_new & PROT_WRITE) {
204 memset(g2h_untagged(start), 0, end - start);
205 }
206 }
207 return 0;
208 }
209
210 #if HOST_LONG_BITS == 64 && TARGET_ABI_BITS == 64
211 # define TASK_UNMAPPED_BASE (1ul << 38)
212 #else
213 # define TASK_UNMAPPED_BASE 0x40000000
214 #endif
215 abi_ulong mmap_next_start = TASK_UNMAPPED_BASE;
216
217 unsigned long last_brk;
218
219 /*
220 * Subroutine of mmap_find_vma, used when we have pre-allocated a chunk of guest
221 * address space.
222 */
223 static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
224 abi_ulong alignment)
225 {
226 abi_ulong addr;
227 abi_ulong end_addr;
228 int prot;
229 int looped = 0;
230
231 if (size > reserved_va) {
232 return (abi_ulong)-1;
233 }
234
235 size = HOST_PAGE_ALIGN(size) + alignment;
236 end_addr = start + size;
237 if (end_addr > reserved_va) {
238 end_addr = reserved_va;
239 }
240 addr = end_addr - qemu_host_page_size;
241
242 while (1) {
243 if (addr > end_addr) {
244 if (looped) {
245 return (abi_ulong)-1;
246 }
247 end_addr = reserved_va;
248 addr = end_addr - qemu_host_page_size;
249 looped = 1;
250 continue;
251 }
252 prot = page_get_flags(addr);
253 if (prot) {
254 end_addr = addr;
255 }
256 if (end_addr - addr >= size) {
257 break;
258 }
259 addr -= qemu_host_page_size;
260 }
261
262 if (start == mmap_next_start) {
263 mmap_next_start = addr;
264 }
265 /* addr is sufficiently low to align it up */
266 if (alignment != 0) {
267 addr = (addr + alignment) & ~(alignment - 1);
268 }
269 return addr;
270 }
271
272 /*
273 * Find and reserve a free memory area of size 'size'. The search
274 * starts at 'start'.
275 * It must be called with mmap_lock() held.
276 * Return -1 if error.
277 */
278 static abi_ulong mmap_find_vma_aligned(abi_ulong start, abi_ulong size,
279 abi_ulong alignment)
280 {
281 void *ptr, *prev;
282 abi_ulong addr;
283 int flags;
284 int wrapped, repeat;
285
286 /* If 'start' == 0, then a default start address is used. */
287 if (start == 0) {
288 start = mmap_next_start;
289 } else {
290 start &= qemu_host_page_mask;
291 }
292
293 size = HOST_PAGE_ALIGN(size);
294
295 if (reserved_va) {
296 return mmap_find_vma_reserved(start, size,
297 (alignment != 0 ? 1 << alignment : 0));
298 }
299
300 addr = start;
301 wrapped = repeat = 0;
302 prev = 0;
303 flags = MAP_ANON | MAP_PRIVATE;
304 if (alignment != 0) {
305 flags |= MAP_ALIGNED(alignment);
306 }
307
308 for (;; prev = ptr) {
309 /*
310 * Reserve needed memory area to avoid a race.
311 * It should be discarded using:
312 * - mmap() with MAP_FIXED flag
313 * - mremap() with MREMAP_FIXED flag
314 * - shmat() with SHM_REMAP flag
315 */
316 ptr = mmap(g2h_untagged(addr), size, PROT_NONE,
317 flags, -1, 0);
318
319 /* ENOMEM, if host address space has no memory */
320 if (ptr == MAP_FAILED) {
321 return (abi_ulong)-1;
322 }
323
324 /*
325 * Count the number of sequential returns of the same address.
326 * This is used to modify the search algorithm below.
327 */
328 repeat = (ptr == prev ? repeat + 1 : 0);
329
330 if (h2g_valid(ptr + size - 1)) {
331 addr = h2g(ptr);
332
333 if ((addr & ~TARGET_PAGE_MASK) == 0) {
334 /* Success. */
335 if (start == mmap_next_start && addr >= TASK_UNMAPPED_BASE) {
336 mmap_next_start = addr + size;
337 }
338 return addr;
339 }
340
341 /* The address is not properly aligned for the target. */
342 switch (repeat) {
343 case 0:
344 /*
345 * Assume the result that the kernel gave us is the
346 * first with enough free space, so start again at the
347 * next higher target page.
348 */
349 addr = TARGET_PAGE_ALIGN(addr);
350 break;
351 case 1:
352 /*
353 * Sometimes the kernel decides to perform the allocation
354 * at the top end of memory instead.
355 */
356 addr &= TARGET_PAGE_MASK;
357 break;
358 case 2:
359 /* Start over at low memory. */
360 addr = 0;
361 break;
362 default:
363 /* Fail. This unaligned block must the last. */
364 addr = -1;
365 break;
366 }
367 } else {
368 /*
369 * Since the result the kernel gave didn't fit, start
370 * again at low memory. If any repetition, fail.
371 */
372 addr = (repeat ? -1 : 0);
373 }
374
375 /* Unmap and try again. */
376 munmap(ptr, size);
377
378 /* ENOMEM if we checked the whole of the target address space. */
379 if (addr == (abi_ulong)-1) {
380 return (abi_ulong)-1;
381 } else if (addr == 0) {
382 if (wrapped) {
383 return (abi_ulong)-1;
384 }
385 wrapped = 1;
386 /*
387 * Don't actually use 0 when wrapping, instead indicate
388 * that we'd truly like an allocation in low memory.
389 */
390 addr = TARGET_PAGE_SIZE;
391 } else if (wrapped && addr >= start) {
392 return (abi_ulong)-1;
393 }
394 }
395 }
396
397 abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size)
398 {
399 return mmap_find_vma_aligned(start, size, 0);
400 }
401
402 /* NOTE: all the constants are the HOST ones */
403 abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
404 int flags, int fd, off_t offset)
405 {
406 abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len;
407
408 mmap_lock();
409 if (qemu_loglevel_mask(CPU_LOG_PAGE)) {
410 qemu_log("mmap: start=0x" TARGET_ABI_FMT_lx
411 " len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c flags=",
412 start, len,
413 prot & PROT_READ ? 'r' : '-',
414 prot & PROT_WRITE ? 'w' : '-',
415 prot & PROT_EXEC ? 'x' : '-');
416 if (flags & MAP_ALIGNMENT_MASK) {
417 qemu_log("MAP_ALIGNED(%u) ",
418 (flags & MAP_ALIGNMENT_MASK) >> MAP_ALIGNMENT_SHIFT);
419 }
420 if (flags & MAP_GUARD) {
421 qemu_log("MAP_GUARD ");
422 }
423 if (flags & MAP_FIXED) {
424 qemu_log("MAP_FIXED ");
425 }
426 if (flags & MAP_ANON) {
427 qemu_log("MAP_ANON ");
428 }
429 if (flags & MAP_EXCL) {
430 qemu_log("MAP_EXCL ");
431 }
432 if (flags & MAP_PRIVATE) {
433 qemu_log("MAP_PRIVATE ");
434 }
435 if (flags & MAP_SHARED) {
436 qemu_log("MAP_SHARED ");
437 }
438 if (flags & MAP_NOCORE) {
439 qemu_log("MAP_NOCORE ");
440 }
441 if (flags & MAP_STACK) {
442 qemu_log("MAP_STACK ");
443 }
444 qemu_log("fd=%d offset=0x%lx\n", fd, offset);
445 }
446
447 if ((flags & MAP_ANON) && fd != -1) {
448 errno = EINVAL;
449 goto fail;
450 }
451 if (flags & MAP_STACK) {
452 if ((fd != -1) || ((prot & (PROT_READ | PROT_WRITE)) !=
453 (PROT_READ | PROT_WRITE))) {
454 errno = EINVAL;
455 goto fail;
456 }
457 }
458 if ((flags & MAP_GUARD) && (prot != PROT_NONE || fd != -1 ||
459 offset != 0 || (flags & (MAP_SHARED | MAP_PRIVATE |
460 /* MAP_PREFAULT | */ /* MAP_PREFAULT not in mman.h */
461 MAP_PREFAULT_READ | MAP_ANON | MAP_STACK)) != 0)) {
462 errno = EINVAL;
463 goto fail;
464 }
465
466 if (offset & ~TARGET_PAGE_MASK) {
467 errno = EINVAL;
468 goto fail;
469 }
470
471 if (len == 0) {
472 errno = EINVAL;
473 goto fail;
474 }
475
476 /* Check for overflows */
477 len = TARGET_PAGE_ALIGN(len);
478 if (len == 0) {
479 errno = ENOMEM;
480 goto fail;
481 }
482
483 real_start = start & qemu_host_page_mask;
484 host_offset = offset & qemu_host_page_mask;
485
486 /*
487 * If the user is asking for the kernel to find a location, do that
488 * before we truncate the length for mapping files below.
489 */
490 if (!(flags & MAP_FIXED)) {
491 host_len = len + offset - host_offset;
492 host_len = HOST_PAGE_ALIGN(host_len);
493 if ((flags & MAP_ALIGNMENT_MASK) != 0)
494 start = mmap_find_vma_aligned(real_start, host_len,
495 (flags & MAP_ALIGNMENT_MASK) >> MAP_ALIGNMENT_SHIFT);
496 else
497 start = mmap_find_vma(real_start, host_len);
498 if (start == (abi_ulong)-1) {
499 errno = ENOMEM;
500 goto fail;
501 }
502 }
503
504 /*
505 * When mapping files into a memory area larger than the file, accesses
506 * to pages beyond the file size will cause a SIGBUS.
507 *
508 * For example, if mmaping a file of 100 bytes on a host with 4K pages
509 * emulating a target with 8K pages, the target expects to be able to
510 * access the first 8K. But the host will trap us on any access beyond
511 * 4K.
512 *
513 * When emulating a target with a larger page-size than the hosts, we
514 * may need to truncate file maps at EOF and add extra anonymous pages
515 * up to the targets page boundary.
516 */
517
518 if ((qemu_real_host_page_size < qemu_host_page_size) && fd != -1) {
519 struct stat sb;
520
521 if (fstat(fd, &sb) == -1) {
522 goto fail;
523 }
524
525 /* Are we trying to create a map beyond EOF?. */
526 if (offset + len > sb.st_size) {
527 /*
528 * If so, truncate the file map at eof aligned with
529 * the hosts real pagesize. Additional anonymous maps
530 * will be created beyond EOF.
531 */
532 len = REAL_HOST_PAGE_ALIGN(sb.st_size - offset);
533 }
534 }
535
536 if (!(flags & MAP_FIXED)) {
537 unsigned long host_start;
538 void *p;
539
540 host_len = len + offset - host_offset;
541 host_len = HOST_PAGE_ALIGN(host_len);
542
543 /*
544 * Note: we prefer to control the mapping address. It is
545 * especially important if qemu_host_page_size >
546 * qemu_real_host_page_size
547 */
548 p = mmap(g2h_untagged(start), host_len, prot,
549 flags | MAP_FIXED | ((fd != -1) ? MAP_ANON : 0), -1, 0);
550 if (p == MAP_FAILED)
551 goto fail;
552 /* update start so that it points to the file position at 'offset' */
553 host_start = (unsigned long)p;
554 if (fd != -1) {
555 p = mmap(g2h_untagged(start), len, prot,
556 flags | MAP_FIXED, fd, host_offset);
557 if (p == MAP_FAILED) {
558 munmap(g2h_untagged(start), host_len);
559 goto fail;
560 }
561 host_start += offset - host_offset;
562 }
563 start = h2g(host_start);
564 } else {
565 if (start & ~TARGET_PAGE_MASK) {
566 errno = EINVAL;
567 goto fail;
568 }
569 end = start + len;
570 real_end = HOST_PAGE_ALIGN(end);
571
572 /*
573 * Test if requested memory area fits target address space
574 * It can fail only on 64-bit host with 32-bit target.
575 * On any other target/host host mmap() handles this error correctly.
576 */
577 if (!guest_range_valid_untagged(start, len)) {
578 errno = EINVAL;
579 goto fail;
580 }
581
582 /*
583 * worst case: we cannot map the file because the offset is not
584 * aligned, so we read it
585 */
586 if (fd != -1 &&
587 (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
588 /*
589 * msync() won't work here, so we return an error if write is
590 * possible while it is a shared mapping
591 */
592 if ((flags & TARGET_BSD_MAP_FLAGMASK) == MAP_SHARED &&
593 (prot & PROT_WRITE)) {
594 errno = EINVAL;
595 goto fail;
596 }
597 retaddr = target_mmap(start, len, prot | PROT_WRITE,
598 MAP_FIXED | MAP_PRIVATE | MAP_ANON,
599 -1, 0);
600 if (retaddr == -1)
601 goto fail;
602 if (pread(fd, g2h_untagged(start), len, offset) == -1) {
603 goto fail;
604 }
605 if (!(prot & PROT_WRITE)) {
606 ret = target_mprotect(start, len, prot);
607 assert(ret == 0);
608 }
609 goto the_end;
610 }
611
612 /* Reject the mapping if any page within the range is mapped */
613 if ((flags & MAP_EXCL) && page_check_range(start, len, 0) < 0) {
614 errno = EINVAL;
615 goto fail;
616 }
617
618 /* handle the start of the mapping */
619 if (start > real_start) {
620 if (real_end == real_start + qemu_host_page_size) {
621 /* one single host page */
622 ret = mmap_frag(real_start, start, end,
623 prot, flags, fd, offset);
624 if (ret == -1)
625 goto fail;
626 goto the_end1;
627 }
628 ret = mmap_frag(real_start, start, real_start + qemu_host_page_size,
629 prot, flags, fd, offset);
630 if (ret == -1)
631 goto fail;
632 real_start += qemu_host_page_size;
633 }
634 /* handle the end of the mapping */
635 if (end < real_end) {
636 ret = mmap_frag(real_end - qemu_host_page_size,
637 real_end - qemu_host_page_size, end,
638 prot, flags, fd,
639 offset + real_end - qemu_host_page_size - start);
640 if (ret == -1)
641 goto fail;
642 real_end -= qemu_host_page_size;
643 }
644
645 /* map the middle (easier) */
646 if (real_start < real_end) {
647 void *p;
648 unsigned long offset1;
649 if (flags & MAP_ANON)
650 offset1 = 0;
651 else
652 offset1 = offset + real_start - start;
653 p = mmap(g2h_untagged(real_start), real_end - real_start,
654 prot, flags, fd, offset1);
655 if (p == MAP_FAILED)
656 goto fail;
657 }
658 }
659 the_end1:
660 page_set_flags(start, start + len, prot | PAGE_VALID);
661 the_end:
662 #ifdef DEBUG_MMAP
663 printf("ret=0x" TARGET_ABI_FMT_lx "\n", start);
664 page_dump(stdout);
665 printf("\n");
666 #endif
667 tb_invalidate_phys_range(start, start + len);
668 mmap_unlock();
669 return start;
670 fail:
671 mmap_unlock();
672 return -1;
673 }
674
675 static void mmap_reserve(abi_ulong start, abi_ulong size)
676 {
677 abi_ulong real_start;
678 abi_ulong real_end;
679 abi_ulong addr;
680 abi_ulong end;
681 int prot;
682
683 real_start = start & qemu_host_page_mask;
684 real_end = HOST_PAGE_ALIGN(start + size);
685 end = start + size;
686 if (start > real_start) {
687 /* handle host page containing start */
688 prot = 0;
689 for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
690 prot |= page_get_flags(addr);
691 }
692 if (real_end == real_start + qemu_host_page_size) {
693 for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
694 prot |= page_get_flags(addr);
695 }
696 end = real_end;
697 }
698 if (prot != 0) {
699 real_start += qemu_host_page_size;
700 }
701 }
702 if (end < real_end) {
703 prot = 0;
704 for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
705 prot |= page_get_flags(addr);
706 }
707 if (prot != 0) {
708 real_end -= qemu_host_page_size;
709 }
710 }
711 if (real_start != real_end) {
712 mmap(g2h_untagged(real_start), real_end - real_start, PROT_NONE,
713 MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
714 }
715 }
716
717 int target_munmap(abi_ulong start, abi_ulong len)
718 {
719 abi_ulong end, real_start, real_end, addr;
720 int prot, ret;
721
722 #ifdef DEBUG_MMAP
723 printf("munmap: start=0x" TARGET_ABI_FMT_lx " len=0x"
724 TARGET_ABI_FMT_lx "\n",
725 start, len);
726 #endif
727 if (start & ~TARGET_PAGE_MASK)
728 return -EINVAL;
729 len = TARGET_PAGE_ALIGN(len);
730 if (len == 0)
731 return -EINVAL;
732 mmap_lock();
733 end = start + len;
734 real_start = start & qemu_host_page_mask;
735 real_end = HOST_PAGE_ALIGN(end);
736
737 if (start > real_start) {
738 /* handle host page containing start */
739 prot = 0;
740 for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
741 prot |= page_get_flags(addr);
742 }
743 if (real_end == real_start + qemu_host_page_size) {
744 for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
745 prot |= page_get_flags(addr);
746 }
747 end = real_end;
748 }
749 if (prot != 0)
750 real_start += qemu_host_page_size;
751 }
752 if (end < real_end) {
753 prot = 0;
754 for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
755 prot |= page_get_flags(addr);
756 }
757 if (prot != 0)
758 real_end -= qemu_host_page_size;
759 }
760
761 ret = 0;
762 /* unmap what we can */
763 if (real_start < real_end) {
764 if (reserved_va) {
765 mmap_reserve(real_start, real_end - real_start);
766 } else {
767 ret = munmap(g2h_untagged(real_start), real_end - real_start);
768 }
769 }
770
771 if (ret == 0) {
772 page_set_flags(start, start + len, 0);
773 tb_invalidate_phys_range(start, start + len);
774 }
775 mmap_unlock();
776 return ret;
777 }
778
779 int target_msync(abi_ulong start, abi_ulong len, int flags)
780 {
781 abi_ulong end;
782
783 if (start & ~TARGET_PAGE_MASK)
784 return -EINVAL;
785 len = TARGET_PAGE_ALIGN(len);
786 end = start + len;
787 if (end < start)
788 return -EINVAL;
789 if (end == start)
790 return 0;
791
792 start &= qemu_host_page_mask;
793 return msync(g2h_untagged(start), end - start, flags);
794 }