Merge remote-tracking branch 'remotes/sstabellini/xen-2014-11-14' into staging
[qemu.git] / translate-all.c
1 /*
2 * Host code generation
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #ifdef _WIN32
20 #include <windows.h>
21 #else
22 #include <sys/types.h>
23 #include <sys/mman.h>
24 #endif
25 #include <stdarg.h>
26 #include <stdlib.h>
27 #include <stdio.h>
28 #include <string.h>
29 #include <inttypes.h>
30
31 #include "config.h"
32
33 #include "qemu-common.h"
34 #define NO_CPU_IO_DEFS
35 #include "cpu.h"
36 #include "trace.h"
37 #include "disas/disas.h"
38 #include "tcg.h"
39 #if defined(CONFIG_USER_ONLY)
40 #include "qemu.h"
41 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
42 #include <sys/param.h>
43 #if __FreeBSD_version >= 700104
44 #define HAVE_KINFO_GETVMMAP
45 #define sigqueue sigqueue_freebsd /* avoid redefinition */
46 #include <sys/time.h>
47 #include <sys/proc.h>
48 #include <machine/profile.h>
49 #define _KERNEL
50 #include <sys/user.h>
51 #undef _KERNEL
52 #undef sigqueue
53 #include <libutil.h>
54 #endif
55 #endif
56 #else
57 #include "exec/address-spaces.h"
58 #endif
59
60 #include "exec/cputlb.h"
61 #include "translate-all.h"
62 #include "qemu/timer.h"
63
64 //#define DEBUG_TB_INVALIDATE
65 //#define DEBUG_FLUSH
66 /* make various TB consistency checks */
67 //#define DEBUG_TB_CHECK
68
69 #if !defined(CONFIG_USER_ONLY)
70 /* TB consistency checks only implemented for usermode emulation. */
71 #undef DEBUG_TB_CHECK
72 #endif
73
74 #define SMC_BITMAP_USE_THRESHOLD 10
75
76 typedef struct PageDesc {
77 /* list of TBs intersecting this ram page */
78 TranslationBlock *first_tb;
79 /* in order to optimize self modifying code, we count the number
80 of lookups we do to a given page to use a bitmap */
81 unsigned int code_write_count;
82 uint8_t *code_bitmap;
83 #if defined(CONFIG_USER_ONLY)
84 unsigned long flags;
85 #endif
86 } PageDesc;
87
88 /* In system mode we want L1_MAP to be based on ram offsets,
89 while in user mode we want it to be based on virtual addresses. */
90 #if !defined(CONFIG_USER_ONLY)
91 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
92 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
93 #else
94 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
95 #endif
96 #else
97 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
98 #endif
99
100 /* Size of the L2 (and L3, etc) page tables. */
101 #define V_L2_BITS 10
102 #define V_L2_SIZE (1 << V_L2_BITS)
103
104 /* The bits remaining after N lower levels of page tables. */
105 #define V_L1_BITS_REM \
106 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS)
107
108 #if V_L1_BITS_REM < 4
109 #define V_L1_BITS (V_L1_BITS_REM + V_L2_BITS)
110 #else
111 #define V_L1_BITS V_L1_BITS_REM
112 #endif
113
114 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
115
116 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
117
118 uintptr_t qemu_real_host_page_size;
119 uintptr_t qemu_host_page_size;
120 uintptr_t qemu_host_page_mask;
121
122 /* This is a multi-level map on the virtual address space.
123 The bottom level has pointers to PageDesc. */
124 static void *l1_map[V_L1_SIZE];
125
126 /* code generation context */
127 TCGContext tcg_ctx;
128
129 static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
130 tb_page_addr_t phys_page2);
131 static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);
132
133 void cpu_gen_init(void)
134 {
135 tcg_context_init(&tcg_ctx);
136 }
137
138 /* return non zero if the very first instruction is invalid so that
139 the virtual CPU can trigger an exception.
140
141 '*gen_code_size_ptr' contains the size of the generated code (host
142 code).
143 */
144 int cpu_gen_code(CPUArchState *env, TranslationBlock *tb, int *gen_code_size_ptr)
145 {
146 TCGContext *s = &tcg_ctx;
147 tcg_insn_unit *gen_code_buf;
148 int gen_code_size;
149 #ifdef CONFIG_PROFILER
150 int64_t ti;
151 #endif
152
153 #ifdef CONFIG_PROFILER
154 s->tb_count1++; /* includes aborted translations because of
155 exceptions */
156 ti = profile_getclock();
157 #endif
158 tcg_func_start(s);
159
160 gen_intermediate_code(env, tb);
161
162 trace_translate_block(tb, tb->pc, tb->tc_ptr);
163
164 /* generate machine code */
165 gen_code_buf = tb->tc_ptr;
166 tb->tb_next_offset[0] = 0xffff;
167 tb->tb_next_offset[1] = 0xffff;
168 s->tb_next_offset = tb->tb_next_offset;
169 #ifdef USE_DIRECT_JUMP
170 s->tb_jmp_offset = tb->tb_jmp_offset;
171 s->tb_next = NULL;
172 #else
173 s->tb_jmp_offset = NULL;
174 s->tb_next = tb->tb_next;
175 #endif
176
177 #ifdef CONFIG_PROFILER
178 s->tb_count++;
179 s->interm_time += profile_getclock() - ti;
180 s->code_time -= profile_getclock();
181 #endif
182 gen_code_size = tcg_gen_code(s, gen_code_buf);
183 *gen_code_size_ptr = gen_code_size;
184 #ifdef CONFIG_PROFILER
185 s->code_time += profile_getclock();
186 s->code_in_len += tb->size;
187 s->code_out_len += gen_code_size;
188 #endif
189
190 #ifdef DEBUG_DISAS
191 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
192 qemu_log("OUT: [size=%d]\n", gen_code_size);
193 log_disas(tb->tc_ptr, gen_code_size);
194 qemu_log("\n");
195 qemu_log_flush();
196 }
197 #endif
198 return 0;
199 }
200
201 /* The cpu state corresponding to 'searched_pc' is restored.
202 */
203 static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
204 uintptr_t searched_pc)
205 {
206 CPUArchState *env = cpu->env_ptr;
207 TCGContext *s = &tcg_ctx;
208 int j;
209 uintptr_t tc_ptr;
210 #ifdef CONFIG_PROFILER
211 int64_t ti;
212 #endif
213
214 #ifdef CONFIG_PROFILER
215 ti = profile_getclock();
216 #endif
217 tcg_func_start(s);
218
219 gen_intermediate_code_pc(env, tb);
220
221 if (use_icount) {
222 /* Reset the cycle counter to the start of the block. */
223 cpu->icount_decr.u16.low += tb->icount;
224 /* Clear the IO flag. */
225 cpu->can_do_io = 0;
226 }
227
228 /* find opc index corresponding to search_pc */
229 tc_ptr = (uintptr_t)tb->tc_ptr;
230 if (searched_pc < tc_ptr)
231 return -1;
232
233 s->tb_next_offset = tb->tb_next_offset;
234 #ifdef USE_DIRECT_JUMP
235 s->tb_jmp_offset = tb->tb_jmp_offset;
236 s->tb_next = NULL;
237 #else
238 s->tb_jmp_offset = NULL;
239 s->tb_next = tb->tb_next;
240 #endif
241 j = tcg_gen_code_search_pc(s, (tcg_insn_unit *)tc_ptr,
242 searched_pc - tc_ptr);
243 if (j < 0)
244 return -1;
245 /* now find start of instruction before */
246 while (s->gen_opc_instr_start[j] == 0) {
247 j--;
248 }
249 cpu->icount_decr.u16.low -= s->gen_opc_icount[j];
250
251 restore_state_to_opc(env, tb, j);
252
253 #ifdef CONFIG_PROFILER
254 s->restore_time += profile_getclock() - ti;
255 s->restore_count++;
256 #endif
257 return 0;
258 }
259
260 bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr)
261 {
262 TranslationBlock *tb;
263
264 tb = tb_find_pc(retaddr);
265 if (tb) {
266 cpu_restore_state_from_tb(cpu, tb, retaddr);
267 return true;
268 }
269 return false;
270 }
271
272 #ifdef _WIN32
273 static inline void map_exec(void *addr, long size)
274 {
275 DWORD old_protect;
276 VirtualProtect(addr, size,
277 PAGE_EXECUTE_READWRITE, &old_protect);
278 }
279 #else
280 static inline void map_exec(void *addr, long size)
281 {
282 unsigned long start, end, page_size;
283
284 page_size = getpagesize();
285 start = (unsigned long)addr;
286 start &= ~(page_size - 1);
287
288 end = (unsigned long)addr + size;
289 end += page_size - 1;
290 end &= ~(page_size - 1);
291
292 mprotect((void *)start, end - start,
293 PROT_READ | PROT_WRITE | PROT_EXEC);
294 }
295 #endif
296
297 void page_size_init(void)
298 {
299 /* NOTE: we can always suppose that qemu_host_page_size >=
300 TARGET_PAGE_SIZE */
301 qemu_real_host_page_size = getpagesize();
302 if (qemu_host_page_size == 0) {
303 qemu_host_page_size = qemu_real_host_page_size;
304 }
305 if (qemu_host_page_size < TARGET_PAGE_SIZE) {
306 qemu_host_page_size = TARGET_PAGE_SIZE;
307 }
308 qemu_host_page_mask = ~(qemu_host_page_size - 1);
309 }
310
311 static void page_init(void)
312 {
313 page_size_init();
314 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
315 {
316 #ifdef HAVE_KINFO_GETVMMAP
317 struct kinfo_vmentry *freep;
318 int i, cnt;
319
320 freep = kinfo_getvmmap(getpid(), &cnt);
321 if (freep) {
322 mmap_lock();
323 for (i = 0; i < cnt; i++) {
324 unsigned long startaddr, endaddr;
325
326 startaddr = freep[i].kve_start;
327 endaddr = freep[i].kve_end;
328 if (h2g_valid(startaddr)) {
329 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
330
331 if (h2g_valid(endaddr)) {
332 endaddr = h2g(endaddr);
333 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
334 } else {
335 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
336 endaddr = ~0ul;
337 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
338 #endif
339 }
340 }
341 }
342 free(freep);
343 mmap_unlock();
344 }
345 #else
346 FILE *f;
347
348 last_brk = (unsigned long)sbrk(0);
349
350 f = fopen("/compat/linux/proc/self/maps", "r");
351 if (f) {
352 mmap_lock();
353
354 do {
355 unsigned long startaddr, endaddr;
356 int n;
357
358 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
359
360 if (n == 2 && h2g_valid(startaddr)) {
361 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
362
363 if (h2g_valid(endaddr)) {
364 endaddr = h2g(endaddr);
365 } else {
366 endaddr = ~0ul;
367 }
368 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
369 }
370 } while (!feof(f));
371
372 fclose(f);
373 mmap_unlock();
374 }
375 #endif
376 }
377 #endif
378 }
379
380 static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
381 {
382 PageDesc *pd;
383 void **lp;
384 int i;
385
386 #if defined(CONFIG_USER_ONLY)
387 /* We can't use g_malloc because it may recurse into a locked mutex. */
388 # define ALLOC(P, SIZE) \
389 do { \
390 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
391 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
392 } while (0)
393 #else
394 # define ALLOC(P, SIZE) \
395 do { P = g_malloc0(SIZE); } while (0)
396 #endif
397
398 /* Level 1. Always allocated. */
399 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
400
401 /* Level 2..N-1. */
402 for (i = V_L1_SHIFT / V_L2_BITS - 1; i > 0; i--) {
403 void **p = *lp;
404
405 if (p == NULL) {
406 if (!alloc) {
407 return NULL;
408 }
409 ALLOC(p, sizeof(void *) * V_L2_SIZE);
410 *lp = p;
411 }
412
413 lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
414 }
415
416 pd = *lp;
417 if (pd == NULL) {
418 if (!alloc) {
419 return NULL;
420 }
421 ALLOC(pd, sizeof(PageDesc) * V_L2_SIZE);
422 *lp = pd;
423 }
424
425 #undef ALLOC
426
427 return pd + (index & (V_L2_SIZE - 1));
428 }
429
430 static inline PageDesc *page_find(tb_page_addr_t index)
431 {
432 return page_find_alloc(index, 0);
433 }
434
435 #if !defined(CONFIG_USER_ONLY)
436 #define mmap_lock() do { } while (0)
437 #define mmap_unlock() do { } while (0)
438 #endif
439
440 #if defined(CONFIG_USER_ONLY)
441 /* Currently it is not recommended to allocate big chunks of data in
442 user mode. It will change when a dedicated libc will be used. */
443 /* ??? 64-bit hosts ought to have no problem mmaping data outside the
444 region in which the guest needs to run. Revisit this. */
445 #define USE_STATIC_CODE_GEN_BUFFER
446 #endif
447
448 /* ??? Should configure for this, not list operating systems here. */
449 #if (defined(__linux__) \
450 || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
451 || defined(__DragonFly__) || defined(__OpenBSD__) \
452 || defined(__NetBSD__))
453 # define USE_MMAP
454 #endif
455
456 /* Minimum size of the code gen buffer. This number is randomly chosen,
457 but not so small that we can't have a fair number of TB's live. */
458 #define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
459
460 /* Maximum size of the code gen buffer we'd like to use. Unless otherwise
461 indicated, this is constrained by the range of direct branches on the
462 host cpu, as used by the TCG implementation of goto_tb. */
463 #if defined(__x86_64__)
464 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
465 #elif defined(__sparc__)
466 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
467 #elif defined(__aarch64__)
468 # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
469 #elif defined(__arm__)
470 # define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024)
471 #elif defined(__s390x__)
472 /* We have a +- 4GB range on the branches; leave some slop. */
473 # define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
474 #elif defined(__mips__)
475 /* We have a 256MB branch region, but leave room to make sure the
476 main executable is also within that region. */
477 # define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
478 #else
479 # define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
480 #endif
481
482 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
483
484 #define DEFAULT_CODE_GEN_BUFFER_SIZE \
485 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
486 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
487
488 static inline size_t size_code_gen_buffer(size_t tb_size)
489 {
490 /* Size the buffer. */
491 if (tb_size == 0) {
492 #ifdef USE_STATIC_CODE_GEN_BUFFER
493 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
494 #else
495 /* ??? Needs adjustments. */
496 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
497 static buffer, we could size this on RESERVED_VA, on the text
498 segment size of the executable, or continue to use the default. */
499 tb_size = (unsigned long)(ram_size / 4);
500 #endif
501 }
502 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
503 tb_size = MIN_CODE_GEN_BUFFER_SIZE;
504 }
505 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
506 tb_size = MAX_CODE_GEN_BUFFER_SIZE;
507 }
508 tcg_ctx.code_gen_buffer_size = tb_size;
509 return tb_size;
510 }
511
512 #ifdef __mips__
513 /* In order to use J and JAL within the code_gen_buffer, we require
514 that the buffer not cross a 256MB boundary. */
515 static inline bool cross_256mb(void *addr, size_t size)
516 {
517 return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & 0xf0000000;
518 }
519
520 /* We weren't able to allocate a buffer without crossing that boundary,
521 so make do with the larger portion of the buffer that doesn't cross.
522 Returns the new base of the buffer, and adjusts code_gen_buffer_size. */
523 static inline void *split_cross_256mb(void *buf1, size_t size1)
524 {
525 void *buf2 = (void *)(((uintptr_t)buf1 + size1) & 0xf0000000);
526 size_t size2 = buf1 + size1 - buf2;
527
528 size1 = buf2 - buf1;
529 if (size1 < size2) {
530 size1 = size2;
531 buf1 = buf2;
532 }
533
534 tcg_ctx.code_gen_buffer_size = size1;
535 return buf1;
536 }
537 #endif
538
539 #ifdef USE_STATIC_CODE_GEN_BUFFER
540 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
541 __attribute__((aligned(CODE_GEN_ALIGN)));
542
543 static inline void *alloc_code_gen_buffer(void)
544 {
545 void *buf = static_code_gen_buffer;
546 #ifdef __mips__
547 if (cross_256mb(buf, tcg_ctx.code_gen_buffer_size)) {
548 buf = split_cross_256mb(buf, tcg_ctx.code_gen_buffer_size);
549 }
550 #endif
551 map_exec(buf, tcg_ctx.code_gen_buffer_size);
552 return buf;
553 }
554 #elif defined(USE_MMAP)
555 static inline void *alloc_code_gen_buffer(void)
556 {
557 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
558 uintptr_t start = 0;
559 void *buf;
560
561 /* Constrain the position of the buffer based on the host cpu.
562 Note that these addresses are chosen in concert with the
563 addresses assigned in the relevant linker script file. */
564 # if defined(__PIE__) || defined(__PIC__)
565 /* Don't bother setting a preferred location if we're building
566 a position-independent executable. We're more likely to get
567 an address near the main executable if we let the kernel
568 choose the address. */
569 # elif defined(__x86_64__) && defined(MAP_32BIT)
570 /* Force the memory down into low memory with the executable.
571 Leave the choice of exact location with the kernel. */
572 flags |= MAP_32BIT;
573 /* Cannot expect to map more than 800MB in low memory. */
574 if (tcg_ctx.code_gen_buffer_size > 800u * 1024 * 1024) {
575 tcg_ctx.code_gen_buffer_size = 800u * 1024 * 1024;
576 }
577 # elif defined(__sparc__)
578 start = 0x40000000ul;
579 # elif defined(__s390x__)
580 start = 0x90000000ul;
581 # elif defined(__mips__)
582 /* ??? We ought to more explicitly manage layout for softmmu too. */
583 # ifdef CONFIG_USER_ONLY
584 start = 0x68000000ul;
585 # elif _MIPS_SIM == _ABI64
586 start = 0x128000000ul;
587 # else
588 start = 0x08000000ul;
589 # endif
590 # endif
591
592 buf = mmap((void *)start, tcg_ctx.code_gen_buffer_size,
593 PROT_WRITE | PROT_READ | PROT_EXEC, flags, -1, 0);
594 if (buf == MAP_FAILED) {
595 return NULL;
596 }
597
598 #ifdef __mips__
599 if (cross_256mb(buf, tcg_ctx.code_gen_buffer_size)) {
600 /* Try again, with the original still mapped, to avoid re-acquiring
601 that 256mb crossing. This time don't specify an address. */
602 size_t size2, size1 = tcg_ctx.code_gen_buffer_size;
603 void *buf2 = mmap(NULL, size1, PROT_WRITE | PROT_READ | PROT_EXEC,
604 flags, -1, 0);
605 if (buf2 != MAP_FAILED) {
606 if (!cross_256mb(buf2, size1)) {
607 /* Success! Use the new buffer. */
608 munmap(buf, size1);
609 return buf2;
610 }
611 /* Failure. Work with what we had. */
612 munmap(buf2, size1);
613 }
614
615 /* Split the original buffer. Free the smaller half. */
616 buf2 = split_cross_256mb(buf, size1);
617 size2 = tcg_ctx.code_gen_buffer_size;
618 munmap(buf + (buf == buf2 ? size2 : 0), size1 - size2);
619 return buf2;
620 }
621 #endif
622
623 return buf;
624 }
625 #else
626 static inline void *alloc_code_gen_buffer(void)
627 {
628 void *buf = g_malloc(tcg_ctx.code_gen_buffer_size);
629
630 if (buf == NULL) {
631 return NULL;
632 }
633
634 #ifdef __mips__
635 if (cross_256mb(buf, tcg_ctx.code_gen_buffer_size)) {
636 void *buf2 = g_malloc(tcg_ctx.code_gen_buffer_size);
637 if (buf2 != NULL && !cross_256mb(buf2, size1)) {
638 /* Success! Use the new buffer. */
639 free(buf);
640 buf = buf2;
641 } else {
642 /* Failure. Work with what we had. Since this is malloc
643 and not mmap, we can't free the other half. */
644 free(buf2);
645 buf = split_cross_256mb(buf, tcg_ctx.code_gen_buffer_size);
646 }
647 }
648 #endif
649
650 map_exec(buf, tcg_ctx.code_gen_buffer_size);
651 return buf;
652 }
653 #endif /* USE_STATIC_CODE_GEN_BUFFER, USE_MMAP */
654
655 static inline void code_gen_alloc(size_t tb_size)
656 {
657 tcg_ctx.code_gen_buffer_size = size_code_gen_buffer(tb_size);
658 tcg_ctx.code_gen_buffer = alloc_code_gen_buffer();
659 if (tcg_ctx.code_gen_buffer == NULL) {
660 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
661 exit(1);
662 }
663
664 qemu_madvise(tcg_ctx.code_gen_buffer, tcg_ctx.code_gen_buffer_size,
665 QEMU_MADV_HUGEPAGE);
666
667 /* Steal room for the prologue at the end of the buffer. This ensures
668 (via the MAX_CODE_GEN_BUFFER_SIZE limits above) that direct branches
669 from TB's to the prologue are going to be in range. It also means
670 that we don't need to mark (additional) portions of the data segment
671 as executable. */
672 tcg_ctx.code_gen_prologue = tcg_ctx.code_gen_buffer +
673 tcg_ctx.code_gen_buffer_size - 1024;
674 tcg_ctx.code_gen_buffer_size -= 1024;
675
676 tcg_ctx.code_gen_buffer_max_size = tcg_ctx.code_gen_buffer_size -
677 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
678 tcg_ctx.code_gen_max_blocks = tcg_ctx.code_gen_buffer_size /
679 CODE_GEN_AVG_BLOCK_SIZE;
680 tcg_ctx.tb_ctx.tbs =
681 g_malloc(tcg_ctx.code_gen_max_blocks * sizeof(TranslationBlock));
682 }
683
684 /* Must be called before using the QEMU cpus. 'tb_size' is the size
685 (in bytes) allocated to the translation buffer. Zero means default
686 size. */
687 void tcg_exec_init(unsigned long tb_size)
688 {
689 cpu_gen_init();
690 code_gen_alloc(tb_size);
691 tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
692 tcg_register_jit(tcg_ctx.code_gen_buffer, tcg_ctx.code_gen_buffer_size);
693 page_init();
694 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
695 /* There's no guest base to take into account, so go ahead and
696 initialize the prologue now. */
697 tcg_prologue_init(&tcg_ctx);
698 #endif
699 }
700
701 bool tcg_enabled(void)
702 {
703 return tcg_ctx.code_gen_buffer != NULL;
704 }
705
706 /* Allocate a new translation block. Flush the translation buffer if
707 too many translation blocks or too much generated code. */
708 static TranslationBlock *tb_alloc(target_ulong pc)
709 {
710 TranslationBlock *tb;
711
712 if (tcg_ctx.tb_ctx.nb_tbs >= tcg_ctx.code_gen_max_blocks ||
713 (tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer) >=
714 tcg_ctx.code_gen_buffer_max_size) {
715 return NULL;
716 }
717 tb = &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs++];
718 tb->pc = pc;
719 tb->cflags = 0;
720 return tb;
721 }
722
723 void tb_free(TranslationBlock *tb)
724 {
725 /* In practice this is mostly used for single use temporary TB
726 Ignore the hard cases and just back up if this TB happens to
727 be the last one generated. */
728 if (tcg_ctx.tb_ctx.nb_tbs > 0 &&
729 tb == &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs - 1]) {
730 tcg_ctx.code_gen_ptr = tb->tc_ptr;
731 tcg_ctx.tb_ctx.nb_tbs--;
732 }
733 }
734
735 static inline void invalidate_page_bitmap(PageDesc *p)
736 {
737 if (p->code_bitmap) {
738 g_free(p->code_bitmap);
739 p->code_bitmap = NULL;
740 }
741 p->code_write_count = 0;
742 }
743
744 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
745 static void page_flush_tb_1(int level, void **lp)
746 {
747 int i;
748
749 if (*lp == NULL) {
750 return;
751 }
752 if (level == 0) {
753 PageDesc *pd = *lp;
754
755 for (i = 0; i < V_L2_SIZE; ++i) {
756 pd[i].first_tb = NULL;
757 invalidate_page_bitmap(pd + i);
758 }
759 } else {
760 void **pp = *lp;
761
762 for (i = 0; i < V_L2_SIZE; ++i) {
763 page_flush_tb_1(level - 1, pp + i);
764 }
765 }
766 }
767
768 static void page_flush_tb(void)
769 {
770 int i;
771
772 for (i = 0; i < V_L1_SIZE; i++) {
773 page_flush_tb_1(V_L1_SHIFT / V_L2_BITS - 1, l1_map + i);
774 }
775 }
776
777 /* flush all the translation blocks */
778 /* XXX: tb_flush is currently not thread safe */
779 void tb_flush(CPUArchState *env1)
780 {
781 CPUState *cpu = ENV_GET_CPU(env1);
782
783 #if defined(DEBUG_FLUSH)
784 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
785 (unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer),
786 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.tb_ctx.nb_tbs > 0 ?
787 ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)) /
788 tcg_ctx.tb_ctx.nb_tbs : 0);
789 #endif
790 if ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)
791 > tcg_ctx.code_gen_buffer_size) {
792 cpu_abort(cpu, "Internal error: code buffer overflow\n");
793 }
794 tcg_ctx.tb_ctx.nb_tbs = 0;
795
796 CPU_FOREACH(cpu) {
797 memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
798 }
799
800 memset(tcg_ctx.tb_ctx.tb_phys_hash, 0, sizeof(tcg_ctx.tb_ctx.tb_phys_hash));
801 page_flush_tb();
802
803 tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
804 /* XXX: flush processor icache at this point if cache flush is
805 expensive */
806 tcg_ctx.tb_ctx.tb_flush_count++;
807 }
808
809 #ifdef DEBUG_TB_CHECK
810
811 static void tb_invalidate_check(target_ulong address)
812 {
813 TranslationBlock *tb;
814 int i;
815
816 address &= TARGET_PAGE_MASK;
817 for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) {
818 for (tb = tb_ctx.tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
819 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
820 address >= tb->pc + tb->size)) {
821 printf("ERROR invalidate: address=" TARGET_FMT_lx
822 " PC=%08lx size=%04x\n",
823 address, (long)tb->pc, tb->size);
824 }
825 }
826 }
827 }
828
829 /* verify that all the pages have correct rights for code */
830 static void tb_page_check(void)
831 {
832 TranslationBlock *tb;
833 int i, flags1, flags2;
834
835 for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) {
836 for (tb = tcg_ctx.tb_ctx.tb_phys_hash[i]; tb != NULL;
837 tb = tb->phys_hash_next) {
838 flags1 = page_get_flags(tb->pc);
839 flags2 = page_get_flags(tb->pc + tb->size - 1);
840 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
841 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
842 (long)tb->pc, tb->size, flags1, flags2);
843 }
844 }
845 }
846 }
847
848 #endif
849
850 static inline void tb_hash_remove(TranslationBlock **ptb, TranslationBlock *tb)
851 {
852 TranslationBlock *tb1;
853
854 for (;;) {
855 tb1 = *ptb;
856 if (tb1 == tb) {
857 *ptb = tb1->phys_hash_next;
858 break;
859 }
860 ptb = &tb1->phys_hash_next;
861 }
862 }
863
864 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
865 {
866 TranslationBlock *tb1;
867 unsigned int n1;
868
869 for (;;) {
870 tb1 = *ptb;
871 n1 = (uintptr_t)tb1 & 3;
872 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
873 if (tb1 == tb) {
874 *ptb = tb1->page_next[n1];
875 break;
876 }
877 ptb = &tb1->page_next[n1];
878 }
879 }
880
881 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
882 {
883 TranslationBlock *tb1, **ptb;
884 unsigned int n1;
885
886 ptb = &tb->jmp_next[n];
887 tb1 = *ptb;
888 if (tb1) {
889 /* find tb(n) in circular list */
890 for (;;) {
891 tb1 = *ptb;
892 n1 = (uintptr_t)tb1 & 3;
893 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
894 if (n1 == n && tb1 == tb) {
895 break;
896 }
897 if (n1 == 2) {
898 ptb = &tb1->jmp_first;
899 } else {
900 ptb = &tb1->jmp_next[n1];
901 }
902 }
903 /* now we can suppress tb(n) from the list */
904 *ptb = tb->jmp_next[n];
905
906 tb->jmp_next[n] = NULL;
907 }
908 }
909
910 /* reset the jump entry 'n' of a TB so that it is not chained to
911 another TB */
912 static inline void tb_reset_jump(TranslationBlock *tb, int n)
913 {
914 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
915 }
916
917 /* invalidate one TB */
918 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
919 {
920 CPUState *cpu;
921 PageDesc *p;
922 unsigned int h, n1;
923 tb_page_addr_t phys_pc;
924 TranslationBlock *tb1, *tb2;
925
926 /* remove the TB from the hash list */
927 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
928 h = tb_phys_hash_func(phys_pc);
929 tb_hash_remove(&tcg_ctx.tb_ctx.tb_phys_hash[h], tb);
930
931 /* remove the TB from the page list */
932 if (tb->page_addr[0] != page_addr) {
933 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
934 tb_page_remove(&p->first_tb, tb);
935 invalidate_page_bitmap(p);
936 }
937 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
938 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
939 tb_page_remove(&p->first_tb, tb);
940 invalidate_page_bitmap(p);
941 }
942
943 tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
944
945 /* remove the TB from the hash list */
946 h = tb_jmp_cache_hash_func(tb->pc);
947 CPU_FOREACH(cpu) {
948 if (cpu->tb_jmp_cache[h] == tb) {
949 cpu->tb_jmp_cache[h] = NULL;
950 }
951 }
952
953 /* suppress this TB from the two jump lists */
954 tb_jmp_remove(tb, 0);
955 tb_jmp_remove(tb, 1);
956
957 /* suppress any remaining jumps to this TB */
958 tb1 = tb->jmp_first;
959 for (;;) {
960 n1 = (uintptr_t)tb1 & 3;
961 if (n1 == 2) {
962 break;
963 }
964 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
965 tb2 = tb1->jmp_next[n1];
966 tb_reset_jump(tb1, n1);
967 tb1->jmp_next[n1] = NULL;
968 tb1 = tb2;
969 }
970 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
971
972 tcg_ctx.tb_ctx.tb_phys_invalidate_count++;
973 }
974
975 static inline void set_bits(uint8_t *tab, int start, int len)
976 {
977 int end, mask, end1;
978
979 end = start + len;
980 tab += start >> 3;
981 mask = 0xff << (start & 7);
982 if ((start & ~7) == (end & ~7)) {
983 if (start < end) {
984 mask &= ~(0xff << (end & 7));
985 *tab |= mask;
986 }
987 } else {
988 *tab++ |= mask;
989 start = (start + 8) & ~7;
990 end1 = end & ~7;
991 while (start < end1) {
992 *tab++ = 0xff;
993 start += 8;
994 }
995 if (start < end) {
996 mask = ~(0xff << (end & 7));
997 *tab |= mask;
998 }
999 }
1000 }
1001
1002 static void build_page_bitmap(PageDesc *p)
1003 {
1004 int n, tb_start, tb_end;
1005 TranslationBlock *tb;
1006
1007 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
1008
1009 tb = p->first_tb;
1010 while (tb != NULL) {
1011 n = (uintptr_t)tb & 3;
1012 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1013 /* NOTE: this is subtle as a TB may span two physical pages */
1014 if (n == 0) {
1015 /* NOTE: tb_end may be after the end of the page, but
1016 it is not a problem */
1017 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1018 tb_end = tb_start + tb->size;
1019 if (tb_end > TARGET_PAGE_SIZE) {
1020 tb_end = TARGET_PAGE_SIZE;
1021 }
1022 } else {
1023 tb_start = 0;
1024 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1025 }
1026 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1027 tb = tb->page_next[n];
1028 }
1029 }
1030
1031 TranslationBlock *tb_gen_code(CPUState *cpu,
1032 target_ulong pc, target_ulong cs_base,
1033 int flags, int cflags)
1034 {
1035 CPUArchState *env = cpu->env_ptr;
1036 TranslationBlock *tb;
1037 tb_page_addr_t phys_pc, phys_page2;
1038 target_ulong virt_page2;
1039 int code_gen_size;
1040
1041 phys_pc = get_page_addr_code(env, pc);
1042 tb = tb_alloc(pc);
1043 if (!tb) {
1044 /* flush must be done */
1045 tb_flush(env);
1046 /* cannot fail at this point */
1047 tb = tb_alloc(pc);
1048 /* Don't forget to invalidate previous TB info. */
1049 tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
1050 }
1051 tb->tc_ptr = tcg_ctx.code_gen_ptr;
1052 tb->cs_base = cs_base;
1053 tb->flags = flags;
1054 tb->cflags = cflags;
1055 cpu_gen_code(env, tb, &code_gen_size);
1056 tcg_ctx.code_gen_ptr = (void *)(((uintptr_t)tcg_ctx.code_gen_ptr +
1057 code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
1058
1059 /* check next page if needed */
1060 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1061 phys_page2 = -1;
1062 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1063 phys_page2 = get_page_addr_code(env, virt_page2);
1064 }
1065 tb_link_page(tb, phys_pc, phys_page2);
1066 return tb;
1067 }
1068
1069 /*
1070 * Invalidate all TBs which intersect with the target physical address range
1071 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1072 * 'is_cpu_write_access' should be true if called from a real cpu write
1073 * access: the virtual CPU will exit the current TB if code is modified inside
1074 * this TB.
1075 */
1076 void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
1077 int is_cpu_write_access)
1078 {
1079 while (start < end) {
1080 tb_invalidate_phys_page_range(start, end, is_cpu_write_access);
1081 start &= TARGET_PAGE_MASK;
1082 start += TARGET_PAGE_SIZE;
1083 }
1084 }
1085
1086 /*
1087 * Invalidate all TBs which intersect with the target physical address range
1088 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1089 * 'is_cpu_write_access' should be true if called from a real cpu write
1090 * access: the virtual CPU will exit the current TB if code is modified inside
1091 * this TB.
1092 */
1093 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1094 int is_cpu_write_access)
1095 {
1096 TranslationBlock *tb, *tb_next, *saved_tb;
1097 CPUState *cpu = current_cpu;
1098 #if defined(TARGET_HAS_PRECISE_SMC)
1099 CPUArchState *env = NULL;
1100 #endif
1101 tb_page_addr_t tb_start, tb_end;
1102 PageDesc *p;
1103 int n;
1104 #ifdef TARGET_HAS_PRECISE_SMC
1105 int current_tb_not_found = is_cpu_write_access;
1106 TranslationBlock *current_tb = NULL;
1107 int current_tb_modified = 0;
1108 target_ulong current_pc = 0;
1109 target_ulong current_cs_base = 0;
1110 int current_flags = 0;
1111 #endif /* TARGET_HAS_PRECISE_SMC */
1112
1113 p = page_find(start >> TARGET_PAGE_BITS);
1114 if (!p) {
1115 return;
1116 }
1117 if (!p->code_bitmap &&
1118 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1119 is_cpu_write_access) {
1120 /* build code bitmap */
1121 build_page_bitmap(p);
1122 }
1123 #if defined(TARGET_HAS_PRECISE_SMC)
1124 if (cpu != NULL) {
1125 env = cpu->env_ptr;
1126 }
1127 #endif
1128
1129 /* we remove all the TBs in the range [start, end[ */
1130 /* XXX: see if in some cases it could be faster to invalidate all
1131 the code */
1132 tb = p->first_tb;
1133 while (tb != NULL) {
1134 n = (uintptr_t)tb & 3;
1135 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1136 tb_next = tb->page_next[n];
1137 /* NOTE: this is subtle as a TB may span two physical pages */
1138 if (n == 0) {
1139 /* NOTE: tb_end may be after the end of the page, but
1140 it is not a problem */
1141 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1142 tb_end = tb_start + tb->size;
1143 } else {
1144 tb_start = tb->page_addr[1];
1145 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1146 }
1147 if (!(tb_end <= start || tb_start >= end)) {
1148 #ifdef TARGET_HAS_PRECISE_SMC
1149 if (current_tb_not_found) {
1150 current_tb_not_found = 0;
1151 current_tb = NULL;
1152 if (cpu->mem_io_pc) {
1153 /* now we have a real cpu fault */
1154 current_tb = tb_find_pc(cpu->mem_io_pc);
1155 }
1156 }
1157 if (current_tb == tb &&
1158 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1159 /* If we are modifying the current TB, we must stop
1160 its execution. We could be more precise by checking
1161 that the modification is after the current PC, but it
1162 would require a specialized function to partially
1163 restore the CPU state */
1164
1165 current_tb_modified = 1;
1166 cpu_restore_state_from_tb(cpu, current_tb, cpu->mem_io_pc);
1167 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1168 &current_flags);
1169 }
1170 #endif /* TARGET_HAS_PRECISE_SMC */
1171 /* we need to do that to handle the case where a signal
1172 occurs while doing tb_phys_invalidate() */
1173 saved_tb = NULL;
1174 if (cpu != NULL) {
1175 saved_tb = cpu->current_tb;
1176 cpu->current_tb = NULL;
1177 }
1178 tb_phys_invalidate(tb, -1);
1179 if (cpu != NULL) {
1180 cpu->current_tb = saved_tb;
1181 if (cpu->interrupt_request && cpu->current_tb) {
1182 cpu_interrupt(cpu, cpu->interrupt_request);
1183 }
1184 }
1185 }
1186 tb = tb_next;
1187 }
1188 #if !defined(CONFIG_USER_ONLY)
1189 /* if no code remaining, no need to continue to use slow writes */
1190 if (!p->first_tb) {
1191 invalidate_page_bitmap(p);
1192 if (is_cpu_write_access) {
1193 tlb_unprotect_code_phys(cpu, start, cpu->mem_io_vaddr);
1194 }
1195 }
1196 #endif
1197 #ifdef TARGET_HAS_PRECISE_SMC
1198 if (current_tb_modified) {
1199 /* we generate a block containing just the instruction
1200 modifying the memory. It will ensure that it cannot modify
1201 itself */
1202 cpu->current_tb = NULL;
1203 tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
1204 cpu_resume_from_signal(cpu, NULL);
1205 }
1206 #endif
1207 }
1208
1209 /* len must be <= 8 and start must be a multiple of len */
1210 void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1211 {
1212 PageDesc *p;
1213 int offset, b;
1214
1215 #if 0
1216 if (1) {
1217 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1218 cpu_single_env->mem_io_vaddr, len,
1219 cpu_single_env->eip,
1220 cpu_single_env->eip +
1221 (intptr_t)cpu_single_env->segs[R_CS].base);
1222 }
1223 #endif
1224 p = page_find(start >> TARGET_PAGE_BITS);
1225 if (!p) {
1226 return;
1227 }
1228 if (p->code_bitmap) {
1229 offset = start & ~TARGET_PAGE_MASK;
1230 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1231 if (b & ((1 << len) - 1)) {
1232 goto do_invalidate;
1233 }
1234 } else {
1235 do_invalidate:
1236 tb_invalidate_phys_page_range(start, start + len, 1);
1237 }
1238 }
1239
1240 #if !defined(CONFIG_SOFTMMU)
1241 static void tb_invalidate_phys_page(tb_page_addr_t addr,
1242 uintptr_t pc, void *puc,
1243 bool locked)
1244 {
1245 TranslationBlock *tb;
1246 PageDesc *p;
1247 int n;
1248 #ifdef TARGET_HAS_PRECISE_SMC
1249 TranslationBlock *current_tb = NULL;
1250 CPUState *cpu = current_cpu;
1251 CPUArchState *env = NULL;
1252 int current_tb_modified = 0;
1253 target_ulong current_pc = 0;
1254 target_ulong current_cs_base = 0;
1255 int current_flags = 0;
1256 #endif
1257
1258 addr &= TARGET_PAGE_MASK;
1259 p = page_find(addr >> TARGET_PAGE_BITS);
1260 if (!p) {
1261 return;
1262 }
1263 tb = p->first_tb;
1264 #ifdef TARGET_HAS_PRECISE_SMC
1265 if (tb && pc != 0) {
1266 current_tb = tb_find_pc(pc);
1267 }
1268 if (cpu != NULL) {
1269 env = cpu->env_ptr;
1270 }
1271 #endif
1272 while (tb != NULL) {
1273 n = (uintptr_t)tb & 3;
1274 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1275 #ifdef TARGET_HAS_PRECISE_SMC
1276 if (current_tb == tb &&
1277 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1278 /* If we are modifying the current TB, we must stop
1279 its execution. We could be more precise by checking
1280 that the modification is after the current PC, but it
1281 would require a specialized function to partially
1282 restore the CPU state */
1283
1284 current_tb_modified = 1;
1285 cpu_restore_state_from_tb(cpu, current_tb, pc);
1286 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1287 &current_flags);
1288 }
1289 #endif /* TARGET_HAS_PRECISE_SMC */
1290 tb_phys_invalidate(tb, addr);
1291 tb = tb->page_next[n];
1292 }
1293 p->first_tb = NULL;
1294 #ifdef TARGET_HAS_PRECISE_SMC
1295 if (current_tb_modified) {
1296 /* we generate a block containing just the instruction
1297 modifying the memory. It will ensure that it cannot modify
1298 itself */
1299 cpu->current_tb = NULL;
1300 tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
1301 if (locked) {
1302 mmap_unlock();
1303 }
1304 cpu_resume_from_signal(cpu, puc);
1305 }
1306 #endif
1307 }
1308 #endif
1309
1310 /* add the tb in the target page and protect it if necessary */
1311 static inline void tb_alloc_page(TranslationBlock *tb,
1312 unsigned int n, tb_page_addr_t page_addr)
1313 {
1314 PageDesc *p;
1315 #ifndef CONFIG_USER_ONLY
1316 bool page_already_protected;
1317 #endif
1318
1319 tb->page_addr[n] = page_addr;
1320 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1321 tb->page_next[n] = p->first_tb;
1322 #ifndef CONFIG_USER_ONLY
1323 page_already_protected = p->first_tb != NULL;
1324 #endif
1325 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
1326 invalidate_page_bitmap(p);
1327
1328 #if defined(TARGET_HAS_SMC) || 1
1329
1330 #if defined(CONFIG_USER_ONLY)
1331 if (p->flags & PAGE_WRITE) {
1332 target_ulong addr;
1333 PageDesc *p2;
1334 int prot;
1335
1336 /* force the host page as non writable (writes will have a
1337 page fault + mprotect overhead) */
1338 page_addr &= qemu_host_page_mask;
1339 prot = 0;
1340 for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1341 addr += TARGET_PAGE_SIZE) {
1342
1343 p2 = page_find(addr >> TARGET_PAGE_BITS);
1344 if (!p2) {
1345 continue;
1346 }
1347 prot |= p2->flags;
1348 p2->flags &= ~PAGE_WRITE;
1349 }
1350 mprotect(g2h(page_addr), qemu_host_page_size,
1351 (prot & PAGE_BITS) & ~PAGE_WRITE);
1352 #ifdef DEBUG_TB_INVALIDATE
1353 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1354 page_addr);
1355 #endif
1356 }
1357 #else
1358 /* if some code is already present, then the pages are already
1359 protected. So we handle the case where only the first TB is
1360 allocated in a physical page */
1361 if (!page_already_protected) {
1362 tlb_protect_code(page_addr);
1363 }
1364 #endif
1365
1366 #endif /* TARGET_HAS_SMC */
1367 }
1368
1369 /* add a new TB and link it to the physical page tables. phys_page2 is
1370 (-1) to indicate that only one page contains the TB. */
1371 static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1372 tb_page_addr_t phys_page2)
1373 {
1374 unsigned int h;
1375 TranslationBlock **ptb;
1376
1377 /* Grab the mmap lock to stop another thread invalidating this TB
1378 before we are done. */
1379 mmap_lock();
1380 /* add in the physical hash table */
1381 h = tb_phys_hash_func(phys_pc);
1382 ptb = &tcg_ctx.tb_ctx.tb_phys_hash[h];
1383 tb->phys_hash_next = *ptb;
1384 *ptb = tb;
1385
1386 /* add in the page list */
1387 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1388 if (phys_page2 != -1) {
1389 tb_alloc_page(tb, 1, phys_page2);
1390 } else {
1391 tb->page_addr[1] = -1;
1392 }
1393
1394 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
1395 tb->jmp_next[0] = NULL;
1396 tb->jmp_next[1] = NULL;
1397
1398 /* init original jump addresses */
1399 if (tb->tb_next_offset[0] != 0xffff) {
1400 tb_reset_jump(tb, 0);
1401 }
1402 if (tb->tb_next_offset[1] != 0xffff) {
1403 tb_reset_jump(tb, 1);
1404 }
1405
1406 #ifdef DEBUG_TB_CHECK
1407 tb_page_check();
1408 #endif
1409 mmap_unlock();
1410 }
1411
1412 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1413 tb[1].tc_ptr. Return NULL if not found */
1414 static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
1415 {
1416 int m_min, m_max, m;
1417 uintptr_t v;
1418 TranslationBlock *tb;
1419
1420 if (tcg_ctx.tb_ctx.nb_tbs <= 0) {
1421 return NULL;
1422 }
1423 if (tc_ptr < (uintptr_t)tcg_ctx.code_gen_buffer ||
1424 tc_ptr >= (uintptr_t)tcg_ctx.code_gen_ptr) {
1425 return NULL;
1426 }
1427 /* binary search (cf Knuth) */
1428 m_min = 0;
1429 m_max = tcg_ctx.tb_ctx.nb_tbs - 1;
1430 while (m_min <= m_max) {
1431 m = (m_min + m_max) >> 1;
1432 tb = &tcg_ctx.tb_ctx.tbs[m];
1433 v = (uintptr_t)tb->tc_ptr;
1434 if (v == tc_ptr) {
1435 return tb;
1436 } else if (tc_ptr < v) {
1437 m_max = m - 1;
1438 } else {
1439 m_min = m + 1;
1440 }
1441 }
1442 return &tcg_ctx.tb_ctx.tbs[m_max];
1443 }
1444
1445 #if defined(TARGET_HAS_ICE) && !defined(CONFIG_USER_ONLY)
1446 void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr)
1447 {
1448 ram_addr_t ram_addr;
1449 MemoryRegion *mr;
1450 hwaddr l = 1;
1451
1452 mr = address_space_translate(as, addr, &addr, &l, false);
1453 if (!(memory_region_is_ram(mr)
1454 || memory_region_is_romd(mr))) {
1455 return;
1456 }
1457 ram_addr = (memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK)
1458 + addr;
1459 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1460 }
1461 #endif /* TARGET_HAS_ICE && !defined(CONFIG_USER_ONLY) */
1462
1463 void tb_check_watchpoint(CPUState *cpu)
1464 {
1465 TranslationBlock *tb;
1466
1467 tb = tb_find_pc(cpu->mem_io_pc);
1468 if (!tb) {
1469 cpu_abort(cpu, "check_watchpoint: could not find TB for pc=%p",
1470 (void *)cpu->mem_io_pc);
1471 }
1472 cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc);
1473 tb_phys_invalidate(tb, -1);
1474 }
1475
1476 #ifndef CONFIG_USER_ONLY
1477 /* mask must never be zero, except for A20 change call */
1478 static void tcg_handle_interrupt(CPUState *cpu, int mask)
1479 {
1480 int old_mask;
1481
1482 old_mask = cpu->interrupt_request;
1483 cpu->interrupt_request |= mask;
1484
1485 /*
1486 * If called from iothread context, wake the target cpu in
1487 * case its halted.
1488 */
1489 if (!qemu_cpu_is_self(cpu)) {
1490 qemu_cpu_kick(cpu);
1491 return;
1492 }
1493
1494 if (use_icount) {
1495 cpu->icount_decr.u16.high = 0xffff;
1496 if (!cpu_can_do_io(cpu)
1497 && (mask & ~old_mask) != 0) {
1498 cpu_abort(cpu, "Raised interrupt while not in I/O function");
1499 }
1500 } else {
1501 cpu->tcg_exit_req = 1;
1502 }
1503 }
1504
1505 CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1506
1507 /* in deterministic execution mode, instructions doing device I/Os
1508 must be at the end of the TB */
1509 void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
1510 {
1511 #if defined(TARGET_MIPS) || defined(TARGET_SH4)
1512 CPUArchState *env = cpu->env_ptr;
1513 #endif
1514 TranslationBlock *tb;
1515 uint32_t n, cflags;
1516 target_ulong pc, cs_base;
1517 uint64_t flags;
1518
1519 tb = tb_find_pc(retaddr);
1520 if (!tb) {
1521 cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
1522 (void *)retaddr);
1523 }
1524 n = cpu->icount_decr.u16.low + tb->icount;
1525 cpu_restore_state_from_tb(cpu, tb, retaddr);
1526 /* Calculate how many instructions had been executed before the fault
1527 occurred. */
1528 n = n - cpu->icount_decr.u16.low;
1529 /* Generate a new TB ending on the I/O insn. */
1530 n++;
1531 /* On MIPS and SH, delay slot instructions can only be restarted if
1532 they were already the first instruction in the TB. If this is not
1533 the first instruction in a TB then re-execute the preceding
1534 branch. */
1535 #if defined(TARGET_MIPS)
1536 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
1537 env->active_tc.PC -= 4;
1538 cpu->icount_decr.u16.low++;
1539 env->hflags &= ~MIPS_HFLAG_BMASK;
1540 }
1541 #elif defined(TARGET_SH4)
1542 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
1543 && n > 1) {
1544 env->pc -= 2;
1545 cpu->icount_decr.u16.low++;
1546 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1547 }
1548 #endif
1549 /* This should never happen. */
1550 if (n > CF_COUNT_MASK) {
1551 cpu_abort(cpu, "TB too big during recompile");
1552 }
1553
1554 cflags = n | CF_LAST_IO;
1555 pc = tb->pc;
1556 cs_base = tb->cs_base;
1557 flags = tb->flags;
1558 tb_phys_invalidate(tb, -1);
1559 /* FIXME: In theory this could raise an exception. In practice
1560 we have already translated the block once so it's probably ok. */
1561 tb_gen_code(cpu, pc, cs_base, flags, cflags);
1562 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
1563 the first in the TB) then we end up generating a whole new TB and
1564 repeating the fault, which is horribly inefficient.
1565 Better would be to execute just this insn uncached, or generate a
1566 second new TB. */
1567 cpu_resume_from_signal(cpu, NULL);
1568 }
1569
1570 void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
1571 {
1572 unsigned int i;
1573
1574 /* Discard jump cache entries for any tb which might potentially
1575 overlap the flushed page. */
1576 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1577 memset(&cpu->tb_jmp_cache[i], 0,
1578 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1579
1580 i = tb_jmp_cache_hash_page(addr);
1581 memset(&cpu->tb_jmp_cache[i], 0,
1582 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1583 }
1584
1585 void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
1586 {
1587 int i, target_code_size, max_target_code_size;
1588 int direct_jmp_count, direct_jmp2_count, cross_page;
1589 TranslationBlock *tb;
1590
1591 target_code_size = 0;
1592 max_target_code_size = 0;
1593 cross_page = 0;
1594 direct_jmp_count = 0;
1595 direct_jmp2_count = 0;
1596 for (i = 0; i < tcg_ctx.tb_ctx.nb_tbs; i++) {
1597 tb = &tcg_ctx.tb_ctx.tbs[i];
1598 target_code_size += tb->size;
1599 if (tb->size > max_target_code_size) {
1600 max_target_code_size = tb->size;
1601 }
1602 if (tb->page_addr[1] != -1) {
1603 cross_page++;
1604 }
1605 if (tb->tb_next_offset[0] != 0xffff) {
1606 direct_jmp_count++;
1607 if (tb->tb_next_offset[1] != 0xffff) {
1608 direct_jmp2_count++;
1609 }
1610 }
1611 }
1612 /* XXX: avoid using doubles ? */
1613 cpu_fprintf(f, "Translation buffer state:\n");
1614 cpu_fprintf(f, "gen code size %td/%zd\n",
1615 tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer,
1616 tcg_ctx.code_gen_buffer_max_size);
1617 cpu_fprintf(f, "TB count %d/%d\n",
1618 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.code_gen_max_blocks);
1619 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
1620 tcg_ctx.tb_ctx.nb_tbs ? target_code_size /
1621 tcg_ctx.tb_ctx.nb_tbs : 0,
1622 max_target_code_size);
1623 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
1624 tcg_ctx.tb_ctx.nb_tbs ? (tcg_ctx.code_gen_ptr -
1625 tcg_ctx.code_gen_buffer) /
1626 tcg_ctx.tb_ctx.nb_tbs : 0,
1627 target_code_size ? (double) (tcg_ctx.code_gen_ptr -
1628 tcg_ctx.code_gen_buffer) /
1629 target_code_size : 0);
1630 cpu_fprintf(f, "cross page TB count %d (%d%%)\n", cross_page,
1631 tcg_ctx.tb_ctx.nb_tbs ? (cross_page * 100) /
1632 tcg_ctx.tb_ctx.nb_tbs : 0);
1633 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
1634 direct_jmp_count,
1635 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp_count * 100) /
1636 tcg_ctx.tb_ctx.nb_tbs : 0,
1637 direct_jmp2_count,
1638 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp2_count * 100) /
1639 tcg_ctx.tb_ctx.nb_tbs : 0);
1640 cpu_fprintf(f, "\nStatistics:\n");
1641 cpu_fprintf(f, "TB flush count %d\n", tcg_ctx.tb_ctx.tb_flush_count);
1642 cpu_fprintf(f, "TB invalidate count %d\n",
1643 tcg_ctx.tb_ctx.tb_phys_invalidate_count);
1644 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
1645 tcg_dump_info(f, cpu_fprintf);
1646 }
1647
1648 #else /* CONFIG_USER_ONLY */
1649
1650 void cpu_interrupt(CPUState *cpu, int mask)
1651 {
1652 cpu->interrupt_request |= mask;
1653 cpu->tcg_exit_req = 1;
1654 }
1655
1656 /*
1657 * Walks guest process memory "regions" one by one
1658 * and calls callback function 'fn' for each region.
1659 */
1660 struct walk_memory_regions_data {
1661 walk_memory_regions_fn fn;
1662 void *priv;
1663 target_ulong start;
1664 int prot;
1665 };
1666
1667 static int walk_memory_regions_end(struct walk_memory_regions_data *data,
1668 target_ulong end, int new_prot)
1669 {
1670 if (data->start != -1u) {
1671 int rc = data->fn(data->priv, data->start, end, data->prot);
1672 if (rc != 0) {
1673 return rc;
1674 }
1675 }
1676
1677 data->start = (new_prot ? end : -1u);
1678 data->prot = new_prot;
1679
1680 return 0;
1681 }
1682
1683 static int walk_memory_regions_1(struct walk_memory_regions_data *data,
1684 target_ulong base, int level, void **lp)
1685 {
1686 target_ulong pa;
1687 int i, rc;
1688
1689 if (*lp == NULL) {
1690 return walk_memory_regions_end(data, base, 0);
1691 }
1692
1693 if (level == 0) {
1694 PageDesc *pd = *lp;
1695
1696 for (i = 0; i < V_L2_SIZE; ++i) {
1697 int prot = pd[i].flags;
1698
1699 pa = base | (i << TARGET_PAGE_BITS);
1700 if (prot != data->prot) {
1701 rc = walk_memory_regions_end(data, pa, prot);
1702 if (rc != 0) {
1703 return rc;
1704 }
1705 }
1706 }
1707 } else {
1708 void **pp = *lp;
1709
1710 for (i = 0; i < V_L2_SIZE; ++i) {
1711 pa = base | ((target_ulong)i <<
1712 (TARGET_PAGE_BITS + V_L2_BITS * level));
1713 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
1714 if (rc != 0) {
1715 return rc;
1716 }
1717 }
1718 }
1719
1720 return 0;
1721 }
1722
1723 int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
1724 {
1725 struct walk_memory_regions_data data;
1726 uintptr_t i;
1727
1728 data.fn = fn;
1729 data.priv = priv;
1730 data.start = -1u;
1731 data.prot = 0;
1732
1733 for (i = 0; i < V_L1_SIZE; i++) {
1734 int rc = walk_memory_regions_1(&data, (target_ulong)i << (V_L1_SHIFT + TARGET_PAGE_BITS),
1735 V_L1_SHIFT / V_L2_BITS - 1, l1_map + i);
1736 if (rc != 0) {
1737 return rc;
1738 }
1739 }
1740
1741 return walk_memory_regions_end(&data, 0, 0);
1742 }
1743
1744 static int dump_region(void *priv, target_ulong start,
1745 target_ulong end, unsigned long prot)
1746 {
1747 FILE *f = (FILE *)priv;
1748
1749 (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx
1750 " "TARGET_FMT_lx" %c%c%c\n",
1751 start, end, end - start,
1752 ((prot & PAGE_READ) ? 'r' : '-'),
1753 ((prot & PAGE_WRITE) ? 'w' : '-'),
1754 ((prot & PAGE_EXEC) ? 'x' : '-'));
1755
1756 return 0;
1757 }
1758
1759 /* dump memory mappings */
1760 void page_dump(FILE *f)
1761 {
1762 const int length = sizeof(target_ulong) * 2;
1763 (void) fprintf(f, "%-*s %-*s %-*s %s\n",
1764 length, "start", length, "end", length, "size", "prot");
1765 walk_memory_regions(f, dump_region);
1766 }
1767
1768 int page_get_flags(target_ulong address)
1769 {
1770 PageDesc *p;
1771
1772 p = page_find(address >> TARGET_PAGE_BITS);
1773 if (!p) {
1774 return 0;
1775 }
1776 return p->flags;
1777 }
1778
1779 /* Modify the flags of a page and invalidate the code if necessary.
1780 The flag PAGE_WRITE_ORG is positioned automatically depending
1781 on PAGE_WRITE. The mmap_lock should already be held. */
1782 void page_set_flags(target_ulong start, target_ulong end, int flags)
1783 {
1784 target_ulong addr, len;
1785
1786 /* This function should never be called with addresses outside the
1787 guest address space. If this assert fires, it probably indicates
1788 a missing call to h2g_valid. */
1789 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1790 assert(end < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
1791 #endif
1792 assert(start < end);
1793
1794 start = start & TARGET_PAGE_MASK;
1795 end = TARGET_PAGE_ALIGN(end);
1796
1797 if (flags & PAGE_WRITE) {
1798 flags |= PAGE_WRITE_ORG;
1799 }
1800
1801 for (addr = start, len = end - start;
1802 len != 0;
1803 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
1804 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1805
1806 /* If the write protection bit is set, then we invalidate
1807 the code inside. */
1808 if (!(p->flags & PAGE_WRITE) &&
1809 (flags & PAGE_WRITE) &&
1810 p->first_tb) {
1811 tb_invalidate_phys_page(addr, 0, NULL, false);
1812 }
1813 p->flags = flags;
1814 }
1815 }
1816
1817 int page_check_range(target_ulong start, target_ulong len, int flags)
1818 {
1819 PageDesc *p;
1820 target_ulong end;
1821 target_ulong addr;
1822
1823 /* This function should never be called with addresses outside the
1824 guest address space. If this assert fires, it probably indicates
1825 a missing call to h2g_valid. */
1826 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1827 assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
1828 #endif
1829
1830 if (len == 0) {
1831 return 0;
1832 }
1833 if (start + len - 1 < start) {
1834 /* We've wrapped around. */
1835 return -1;
1836 }
1837
1838 /* must do before we loose bits in the next step */
1839 end = TARGET_PAGE_ALIGN(start + len);
1840 start = start & TARGET_PAGE_MASK;
1841
1842 for (addr = start, len = end - start;
1843 len != 0;
1844 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
1845 p = page_find(addr >> TARGET_PAGE_BITS);
1846 if (!p) {
1847 return -1;
1848 }
1849 if (!(p->flags & PAGE_VALID)) {
1850 return -1;
1851 }
1852
1853 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
1854 return -1;
1855 }
1856 if (flags & PAGE_WRITE) {
1857 if (!(p->flags & PAGE_WRITE_ORG)) {
1858 return -1;
1859 }
1860 /* unprotect the page if it was put read-only because it
1861 contains translated code */
1862 if (!(p->flags & PAGE_WRITE)) {
1863 if (!page_unprotect(addr, 0, NULL)) {
1864 return -1;
1865 }
1866 }
1867 }
1868 }
1869 return 0;
1870 }
1871
1872 /* called from signal handler: invalidate the code and unprotect the
1873 page. Return TRUE if the fault was successfully handled. */
1874 int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
1875 {
1876 unsigned int prot;
1877 PageDesc *p;
1878 target_ulong host_start, host_end, addr;
1879
1880 /* Technically this isn't safe inside a signal handler. However we
1881 know this only ever happens in a synchronous SEGV handler, so in
1882 practice it seems to be ok. */
1883 mmap_lock();
1884
1885 p = page_find(address >> TARGET_PAGE_BITS);
1886 if (!p) {
1887 mmap_unlock();
1888 return 0;
1889 }
1890
1891 /* if the page was really writable, then we change its
1892 protection back to writable */
1893 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
1894 host_start = address & qemu_host_page_mask;
1895 host_end = host_start + qemu_host_page_size;
1896
1897 prot = 0;
1898 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
1899 p = page_find(addr >> TARGET_PAGE_BITS);
1900 p->flags |= PAGE_WRITE;
1901 prot |= p->flags;
1902
1903 /* and since the content will be modified, we must invalidate
1904 the corresponding translated code. */
1905 tb_invalidate_phys_page(addr, pc, puc, true);
1906 #ifdef DEBUG_TB_CHECK
1907 tb_invalidate_check(addr);
1908 #endif
1909 }
1910 mprotect((void *)g2h(host_start), qemu_host_page_size,
1911 prot & PAGE_BITS);
1912
1913 mmap_unlock();
1914 return 1;
1915 }
1916 mmap_unlock();
1917 return 0;
1918 }
1919 #endif /* CONFIG_USER_ONLY */