Merge remote-tracking branch 'remotes/kevin/tags/for-upstream' into staging
[qemu.git] / page_cache.c
1 /*
2 * Page cache for QEMU
3 * The cache is base on a hash of the page address
4 *
5 * Copyright 2012 Red Hat, Inc. and/or its affiliates
6 *
7 * Authors:
8 * Orit Wasserman <owasserm@redhat.com>
9 *
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
12 *
13 */
14
15 #include <stdint.h>
16 #include <stdio.h>
17 #include <stdlib.h>
18 #include <strings.h>
19 #include <string.h>
20 #include <sys/time.h>
21 #include <sys/types.h>
22 #include <stdbool.h>
23 #include <glib.h>
24
25 #include "qemu-common.h"
26 #include "migration/page_cache.h"
27
28 #ifdef DEBUG_CACHE
29 #define DPRINTF(fmt, ...) \
30 do { fprintf(stdout, "cache: " fmt, ## __VA_ARGS__); } while (0)
31 #else
32 #define DPRINTF(fmt, ...) \
33 do { } while (0)
34 #endif
35
36 typedef struct CacheItem CacheItem;
37
38 struct CacheItem {
39 uint64_t it_addr;
40 uint64_t it_age;
41 uint8_t *it_data;
42 };
43
44 struct PageCache {
45 CacheItem *page_cache;
46 unsigned int page_size;
47 int64_t max_num_items;
48 uint64_t max_item_age;
49 int64_t num_items;
50 };
51
52 PageCache *cache_init(int64_t num_pages, unsigned int page_size)
53 {
54 int64_t i;
55
56 PageCache *cache;
57
58 if (num_pages <= 0) {
59 DPRINTF("invalid number of pages\n");
60 return NULL;
61 }
62
63 /* We prefer not to abort if there is no memory */
64 cache = g_try_malloc(sizeof(*cache));
65 if (!cache) {
66 DPRINTF("Failed to allocate cache\n");
67 return NULL;
68 }
69 /* round down to the nearest power of 2 */
70 if (!is_power_of_2(num_pages)) {
71 num_pages = pow2floor(num_pages);
72 DPRINTF("rounding down to %" PRId64 "\n", num_pages);
73 }
74 cache->page_size = page_size;
75 cache->num_items = 0;
76 cache->max_item_age = 0;
77 cache->max_num_items = num_pages;
78
79 DPRINTF("Setting cache buckets to %" PRId64 "\n", cache->max_num_items);
80
81 /* We prefer not to abort if there is no memory */
82 cache->page_cache = g_try_malloc((cache->max_num_items) *
83 sizeof(*cache->page_cache));
84 if (!cache->page_cache) {
85 DPRINTF("Failed to allocate cache->page_cache\n");
86 g_free(cache);
87 return NULL;
88 }
89
90 for (i = 0; i < cache->max_num_items; i++) {
91 cache->page_cache[i].it_data = NULL;
92 cache->page_cache[i].it_age = 0;
93 cache->page_cache[i].it_addr = -1;
94 }
95
96 return cache;
97 }
98
99 void cache_fini(PageCache *cache)
100 {
101 int64_t i;
102
103 g_assert(cache);
104 g_assert(cache->page_cache);
105
106 for (i = 0; i < cache->max_num_items; i++) {
107 g_free(cache->page_cache[i].it_data);
108 }
109
110 g_free(cache->page_cache);
111 cache->page_cache = NULL;
112 }
113
114 static size_t cache_get_cache_pos(const PageCache *cache,
115 uint64_t address)
116 {
117 size_t pos;
118
119 g_assert(cache->max_num_items);
120 pos = (address / cache->page_size) & (cache->max_num_items - 1);
121 return pos;
122 }
123
124 bool cache_is_cached(const PageCache *cache, uint64_t addr)
125 {
126 size_t pos;
127
128 g_assert(cache);
129 g_assert(cache->page_cache);
130
131 pos = cache_get_cache_pos(cache, addr);
132
133 return (cache->page_cache[pos].it_addr == addr);
134 }
135
136 static CacheItem *cache_get_by_addr(const PageCache *cache, uint64_t addr)
137 {
138 size_t pos;
139
140 g_assert(cache);
141 g_assert(cache->page_cache);
142
143 pos = cache_get_cache_pos(cache, addr);
144
145 return &cache->page_cache[pos];
146 }
147
148 uint8_t *get_cached_data(const PageCache *cache, uint64_t addr)
149 {
150 return cache_get_by_addr(cache, addr)->it_data;
151 }
152
153 int cache_insert(PageCache *cache, uint64_t addr, const uint8_t *pdata)
154 {
155
156 CacheItem *it = NULL;
157
158 g_assert(cache);
159 g_assert(cache->page_cache);
160
161 /* actual update of entry */
162 it = cache_get_by_addr(cache, addr);
163
164 /* allocate page */
165 if (!it->it_data) {
166 it->it_data = g_try_malloc(cache->page_size);
167 if (!it->it_data) {
168 DPRINTF("Error allocating page\n");
169 return -1;
170 }
171 cache->num_items++;
172 }
173
174 memcpy(it->it_data, pdata, cache->page_size);
175
176 it->it_age = ++cache->max_item_age;
177 it->it_addr = addr;
178
179 return 0;
180 }
181
182 int64_t cache_resize(PageCache *cache, int64_t new_num_pages)
183 {
184 PageCache *new_cache;
185 int64_t i;
186
187 CacheItem *old_it, *new_it;
188
189 g_assert(cache);
190
191 /* cache was not inited */
192 if (cache->page_cache == NULL) {
193 return -1;
194 }
195
196 /* same size */
197 if (pow2floor(new_num_pages) == cache->max_num_items) {
198 return cache->max_num_items;
199 }
200
201 new_cache = cache_init(new_num_pages, cache->page_size);
202 if (!(new_cache)) {
203 DPRINTF("Error creating new cache\n");
204 return -1;
205 }
206
207 /* move all data from old cache */
208 for (i = 0; i < cache->max_num_items; i++) {
209 old_it = &cache->page_cache[i];
210 if (old_it->it_addr != -1) {
211 /* check for collision, if there is, keep MRU page */
212 new_it = cache_get_by_addr(new_cache, old_it->it_addr);
213 if (new_it->it_data && new_it->it_age >= old_it->it_age) {
214 /* keep the MRU page */
215 g_free(old_it->it_data);
216 } else {
217 if (!new_it->it_data) {
218 new_cache->num_items++;
219 }
220 g_free(new_it->it_data);
221 new_it->it_data = old_it->it_data;
222 new_it->it_age = old_it->it_age;
223 new_it->it_addr = old_it->it_addr;
224 }
225 }
226 }
227
228 g_free(cache->page_cache);
229 cache->page_cache = new_cache->page_cache;
230 cache->max_num_items = new_cache->max_num_items;
231 cache->num_items = new_cache->num_items;
232
233 g_free(new_cache);
234
235 return cache->max_num_items;
236 }