migration/dirtyrate: Implement calculate_dirtyrate() function
[qemu.git] / migration / dirtyrate.c
1 /*
2 * Dirtyrate implement code
3 *
4 * Copyright (c) 2020 HUAWEI TECHNOLOGIES CO.,LTD.
5 *
6 * Authors:
7 * Chuan Zheng <zhengchuan@huawei.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
11 */
12
13 #include <zlib.h>
14 #include "qemu/osdep.h"
15 #include "qapi/error.h"
16 #include "cpu.h"
17 #include "qemu/config-file.h"
18 #include "exec/memory.h"
19 #include "exec/ramblock.h"
20 #include "exec/target_page.h"
21 #include "qemu/rcu_queue.h"
22 #include "qapi/qapi-commands-migration.h"
23 #include "migration.h"
24 #include "ram.h"
25 #include "dirtyrate.h"
26
27 static int CalculatingState = DIRTY_RATE_STATUS_UNSTARTED;
28 static struct DirtyRateStat DirtyStat;
29
30 static int64_t set_sample_page_period(int64_t msec, int64_t initial_time)
31 {
32 int64_t current_time;
33
34 current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
35 if ((current_time - initial_time) >= msec) {
36 msec = current_time - initial_time;
37 } else {
38 g_usleep((msec + initial_time - current_time) * 1000);
39 }
40
41 return msec;
42 }
43
44 static bool is_sample_period_valid(int64_t sec)
45 {
46 if (sec < MIN_FETCH_DIRTYRATE_TIME_SEC ||
47 sec > MAX_FETCH_DIRTYRATE_TIME_SEC) {
48 return false;
49 }
50
51 return true;
52 }
53
54 static int dirtyrate_set_state(int *state, int old_state, int new_state)
55 {
56 assert(new_state < DIRTY_RATE_STATUS__MAX);
57 if (qatomic_cmpxchg(state, old_state, new_state) == old_state) {
58 return 0;
59 } else {
60 return -1;
61 }
62 }
63
64 static void reset_dirtyrate_stat(void)
65 {
66 DirtyStat.total_dirty_samples = 0;
67 DirtyStat.total_sample_count = 0;
68 DirtyStat.total_block_mem_MB = 0;
69 DirtyStat.dirty_rate = -1;
70 DirtyStat.start_time = 0;
71 DirtyStat.calc_time = 0;
72 }
73
74 static void update_dirtyrate_stat(struct RamblockDirtyInfo *info)
75 {
76 DirtyStat.total_dirty_samples += info->sample_dirty_count;
77 DirtyStat.total_sample_count += info->sample_pages_count;
78 /* size of total pages in MB */
79 DirtyStat.total_block_mem_MB += (info->ramblock_pages *
80 TARGET_PAGE_SIZE) >> 20;
81 }
82
83 static void update_dirtyrate(uint64_t msec)
84 {
85 uint64_t dirtyrate;
86 uint64_t total_dirty_samples = DirtyStat.total_dirty_samples;
87 uint64_t total_sample_count = DirtyStat.total_sample_count;
88 uint64_t total_block_mem_MB = DirtyStat.total_block_mem_MB;
89
90 dirtyrate = total_dirty_samples * total_block_mem_MB *
91 1000 / (total_sample_count * msec);
92
93 DirtyStat.dirty_rate = dirtyrate;
94 }
95
96 /*
97 * get hash result for the sampled memory with length of TARGET_PAGE_SIZE
98 * in ramblock, which starts from ramblock base address.
99 */
100 static uint32_t get_ramblock_vfn_hash(struct RamblockDirtyInfo *info,
101 uint64_t vfn)
102 {
103 uint32_t crc;
104
105 crc = crc32(0, (info->ramblock_addr +
106 vfn * TARGET_PAGE_SIZE), TARGET_PAGE_SIZE);
107
108 return crc;
109 }
110
111 static bool save_ramblock_hash(struct RamblockDirtyInfo *info)
112 {
113 unsigned int sample_pages_count;
114 int i;
115 GRand *rand;
116
117 sample_pages_count = info->sample_pages_count;
118
119 /* ramblock size less than one page, return success to skip this ramblock */
120 if (unlikely(info->ramblock_pages == 0 || sample_pages_count == 0)) {
121 return true;
122 }
123
124 info->hash_result = g_try_malloc0_n(sample_pages_count,
125 sizeof(uint32_t));
126 if (!info->hash_result) {
127 return false;
128 }
129
130 info->sample_page_vfn = g_try_malloc0_n(sample_pages_count,
131 sizeof(uint64_t));
132 if (!info->sample_page_vfn) {
133 g_free(info->hash_result);
134 return false;
135 }
136
137 rand = g_rand_new();
138 for (i = 0; i < sample_pages_count; i++) {
139 info->sample_page_vfn[i] = g_rand_int_range(rand, 0,
140 info->ramblock_pages - 1);
141 info->hash_result[i] = get_ramblock_vfn_hash(info,
142 info->sample_page_vfn[i]);
143 }
144 g_rand_free(rand);
145
146 return true;
147 }
148
149 static void get_ramblock_dirty_info(RAMBlock *block,
150 struct RamblockDirtyInfo *info,
151 struct DirtyRateConfig *config)
152 {
153 uint64_t sample_pages_per_gigabytes = config->sample_pages_per_gigabytes;
154
155 /* Right shift 30 bits to calc ramblock size in GB */
156 info->sample_pages_count = (qemu_ram_get_used_length(block) *
157 sample_pages_per_gigabytes) >> 30;
158 /* Right shift TARGET_PAGE_BITS to calc page count */
159 info->ramblock_pages = qemu_ram_get_used_length(block) >>
160 TARGET_PAGE_BITS;
161 info->ramblock_addr = qemu_ram_get_host_addr(block);
162 strcpy(info->idstr, qemu_ram_get_idstr(block));
163 }
164
165 static void free_ramblock_dirty_info(struct RamblockDirtyInfo *infos, int count)
166 {
167 int i;
168
169 if (!infos) {
170 return;
171 }
172
173 for (i = 0; i < count; i++) {
174 g_free(infos[i].sample_page_vfn);
175 g_free(infos[i].hash_result);
176 }
177 g_free(infos);
178 }
179
180 static bool skip_sample_ramblock(RAMBlock *block)
181 {
182 /*
183 * Sample only blocks larger than MIN_RAMBLOCK_SIZE.
184 */
185 if (qemu_ram_get_used_length(block) < (MIN_RAMBLOCK_SIZE << 10)) {
186 return true;
187 }
188
189 return false;
190 }
191
192 static bool record_ramblock_hash_info(struct RamblockDirtyInfo **block_dinfo,
193 struct DirtyRateConfig config,
194 int *block_count)
195 {
196 struct RamblockDirtyInfo *info = NULL;
197 struct RamblockDirtyInfo *dinfo = NULL;
198 RAMBlock *block = NULL;
199 int total_count = 0;
200 int index = 0;
201 bool ret = false;
202
203 RAMBLOCK_FOREACH_MIGRATABLE(block) {
204 if (skip_sample_ramblock(block)) {
205 continue;
206 }
207 total_count++;
208 }
209
210 dinfo = g_try_malloc0_n(total_count, sizeof(struct RamblockDirtyInfo));
211 if (dinfo == NULL) {
212 goto out;
213 }
214
215 RAMBLOCK_FOREACH_MIGRATABLE(block) {
216 if (skip_sample_ramblock(block)) {
217 continue;
218 }
219 if (index >= total_count) {
220 break;
221 }
222 info = &dinfo[index];
223 get_ramblock_dirty_info(block, info, &config);
224 if (!save_ramblock_hash(info)) {
225 goto out;
226 }
227 index++;
228 }
229 ret = true;
230
231 out:
232 *block_count = index;
233 *block_dinfo = dinfo;
234 return ret;
235 }
236
237 static void calc_page_dirty_rate(struct RamblockDirtyInfo *info)
238 {
239 uint32_t crc;
240 int i;
241
242 for (i = 0; i < info->sample_pages_count; i++) {
243 crc = get_ramblock_vfn_hash(info, info->sample_page_vfn[i]);
244 if (crc != info->hash_result[i]) {
245 info->sample_dirty_count++;
246 }
247 }
248 }
249
250 static struct RamblockDirtyInfo *
251 find_block_matched(RAMBlock *block, int count,
252 struct RamblockDirtyInfo *infos)
253 {
254 int i;
255 struct RamblockDirtyInfo *matched;
256
257 for (i = 0; i < count; i++) {
258 if (!strcmp(infos[i].idstr, qemu_ram_get_idstr(block))) {
259 break;
260 }
261 }
262
263 if (i == count) {
264 return NULL;
265 }
266
267 if (infos[i].ramblock_addr != qemu_ram_get_host_addr(block) ||
268 infos[i].ramblock_pages !=
269 (qemu_ram_get_used_length(block) >> TARGET_PAGE_BITS)) {
270 return NULL;
271 }
272
273 matched = &infos[i];
274
275 return matched;
276 }
277
278 static bool compare_page_hash_info(struct RamblockDirtyInfo *info,
279 int block_count)
280 {
281 struct RamblockDirtyInfo *block_dinfo = NULL;
282 RAMBlock *block = NULL;
283
284 RAMBLOCK_FOREACH_MIGRATABLE(block) {
285 if (skip_sample_ramblock(block)) {
286 continue;
287 }
288 block_dinfo = find_block_matched(block, block_count, info);
289 if (block_dinfo == NULL) {
290 continue;
291 }
292 calc_page_dirty_rate(block_dinfo);
293 update_dirtyrate_stat(block_dinfo);
294 }
295
296 if (DirtyStat.total_sample_count == 0) {
297 return false;
298 }
299
300 return true;
301 }
302
303 static void calculate_dirtyrate(struct DirtyRateConfig config)
304 {
305 struct RamblockDirtyInfo *block_dinfo = NULL;
306 int block_count = 0;
307 int64_t msec = 0;
308 int64_t initial_time;
309
310 rcu_register_thread();
311 reset_dirtyrate_stat();
312 rcu_read_lock();
313 initial_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
314 if (!record_ramblock_hash_info(&block_dinfo, config, &block_count)) {
315 goto out;
316 }
317 rcu_read_unlock();
318
319 msec = config.sample_period_seconds * 1000;
320 msec = set_sample_page_period(msec, initial_time);
321
322 rcu_read_lock();
323 if (!compare_page_hash_info(block_dinfo, block_count)) {
324 goto out;
325 }
326
327 update_dirtyrate(msec);
328
329 out:
330 rcu_read_unlock();
331 free_ramblock_dirty_info(block_dinfo, block_count);
332 rcu_unregister_thread();
333 }
334
335 void *get_dirtyrate_thread(void *arg)
336 {
337 struct DirtyRateConfig config = *(struct DirtyRateConfig *)arg;
338 int ret;
339
340 ret = dirtyrate_set_state(&CalculatingState, DIRTY_RATE_STATUS_UNSTARTED,
341 DIRTY_RATE_STATUS_MEASURING);
342 if (ret == -1) {
343 error_report("change dirtyrate state failed.");
344 return NULL;
345 }
346
347 calculate_dirtyrate(config);
348
349 ret = dirtyrate_set_state(&CalculatingState, DIRTY_RATE_STATUS_MEASURING,
350 DIRTY_RATE_STATUS_MEASURED);
351 if (ret == -1) {
352 error_report("change dirtyrate state failed.");
353 }
354 return NULL;
355 }