2 * Declarations for cpu physical memory functions
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
7 * Avi Kivity <avi@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or
10 * later. See the COPYING file in the top-level directory.
15 * This header is for use by exec.c and memory.c ONLY. Do not include it.
16 * The functions declared here will be removed soon.
22 #ifndef CONFIG_USER_ONLY
23 #include "hw/xen/xen.h"
25 ram_addr_t
qemu_ram_alloc_from_file(ram_addr_t size
, MemoryRegion
*mr
,
26 const char *mem_path
, Error
**errp
);
27 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
29 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
);
30 void *qemu_get_ram_ptr(ram_addr_t addr
);
31 void qemu_ram_free(ram_addr_t addr
);
32 void qemu_ram_free_from_ptr(ram_addr_t addr
);
34 static inline bool cpu_physical_memory_get_dirty(ram_addr_t start
,
38 unsigned long end
, page
, next
;
40 assert(client
< DIRTY_MEMORY_NUM
);
42 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
43 page
= start
>> TARGET_PAGE_BITS
;
44 next
= find_next_bit(ram_list
.dirty_memory
[client
], end
, page
);
49 static inline bool cpu_physical_memory_get_dirty_flag(ram_addr_t addr
,
52 return cpu_physical_memory_get_dirty(addr
, 1, client
);
55 static inline bool cpu_physical_memory_is_clean(ram_addr_t addr
)
57 bool vga
= cpu_physical_memory_get_dirty_flag(addr
, DIRTY_MEMORY_VGA
);
58 bool code
= cpu_physical_memory_get_dirty_flag(addr
, DIRTY_MEMORY_CODE
);
60 cpu_physical_memory_get_dirty_flag(addr
, DIRTY_MEMORY_MIGRATION
);
61 return !(vga
&& code
&& migration
);
64 static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr
,
67 assert(client
< DIRTY_MEMORY_NUM
);
68 set_bit(addr
>> TARGET_PAGE_BITS
, ram_list
.dirty_memory
[client
]);
71 static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start
,
74 unsigned long end
, page
;
76 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
77 page
= start
>> TARGET_PAGE_BITS
;
78 bitmap_set(ram_list
.dirty_memory
[DIRTY_MEMORY_MIGRATION
], page
, end
- page
);
79 bitmap_set(ram_list
.dirty_memory
[DIRTY_MEMORY_VGA
], page
, end
- page
);
80 bitmap_set(ram_list
.dirty_memory
[DIRTY_MEMORY_CODE
], page
, end
- page
);
81 xen_modified_memory(start
, length
);
85 static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap
,
90 unsigned long page_number
, c
;
93 unsigned long len
= (pages
+ HOST_LONG_BITS
- 1) / HOST_LONG_BITS
;
94 unsigned long hpratio
= getpagesize() / TARGET_PAGE_SIZE
;
95 unsigned long page
= BIT_WORD(start
>> TARGET_PAGE_BITS
);
97 /* start address is aligned at the start of a word? */
98 if ((((page
* BITS_PER_LONG
) << TARGET_PAGE_BITS
) == start
) &&
101 long nr
= BITS_TO_LONGS(pages
);
103 for (k
= 0; k
< nr
; k
++) {
105 unsigned long temp
= leul_to_cpu(bitmap
[k
]);
107 ram_list
.dirty_memory
[DIRTY_MEMORY_MIGRATION
][page
+ k
] |= temp
;
108 ram_list
.dirty_memory
[DIRTY_MEMORY_VGA
][page
+ k
] |= temp
;
109 ram_list
.dirty_memory
[DIRTY_MEMORY_CODE
][page
+ k
] |= temp
;
112 xen_modified_memory(start
, pages
);
115 * bitmap-traveling is faster than memory-traveling (for addr...)
116 * especially when most of the memory is not dirty.
118 for (i
= 0; i
< len
; i
++) {
119 if (bitmap
[i
] != 0) {
120 c
= leul_to_cpu(bitmap
[i
]);
124 page_number
= (i
* HOST_LONG_BITS
+ j
) * hpratio
;
125 addr
= page_number
* TARGET_PAGE_SIZE
;
126 ram_addr
= start
+ addr
;
127 cpu_physical_memory_set_dirty_range(ram_addr
,
128 TARGET_PAGE_SIZE
* hpratio
);
134 #endif /* not _WIN32 */
136 static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start
,
140 unsigned long end
, page
;
142 assert(client
< DIRTY_MEMORY_NUM
);
143 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
144 page
= start
>> TARGET_PAGE_BITS
;
145 bitmap_clear(ram_list
.dirty_memory
[client
], page
, end
- page
);
148 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t length
,