s390x/pci: use a PCI Function structure
[qemu.git] / migration / dirtyrate.c
1 /*
2 * Dirtyrate implement code
3 *
4 * Copyright (c) 2020 HUAWEI TECHNOLOGIES CO.,LTD.
5 *
6 * Authors:
7 * Chuan Zheng <zhengchuan@huawei.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
11 */
12
13 #include "qemu/osdep.h"
14
15 #include <zlib.h>
16 #include "qapi/error.h"
17 #include "cpu.h"
18 #include "qemu/config-file.h"
19 #include "exec/memory.h"
20 #include "exec/ramblock.h"
21 #include "exec/target_page.h"
22 #include "qemu/rcu_queue.h"
23 #include "qapi/qapi-commands-migration.h"
24 #include "migration.h"
25 #include "ram.h"
26 #include "trace.h"
27 #include "dirtyrate.h"
28
29 static int CalculatingState = DIRTY_RATE_STATUS_UNSTARTED;
30 static struct DirtyRateStat DirtyStat;
31
32 static int64_t set_sample_page_period(int64_t msec, int64_t initial_time)
33 {
34 int64_t current_time;
35
36 current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
37 if ((current_time - initial_time) >= msec) {
38 msec = current_time - initial_time;
39 } else {
40 g_usleep((msec + initial_time - current_time) * 1000);
41 }
42
43 return msec;
44 }
45
46 static bool is_sample_period_valid(int64_t sec)
47 {
48 if (sec < MIN_FETCH_DIRTYRATE_TIME_SEC ||
49 sec > MAX_FETCH_DIRTYRATE_TIME_SEC) {
50 return false;
51 }
52
53 return true;
54 }
55
56 static int dirtyrate_set_state(int *state, int old_state, int new_state)
57 {
58 assert(new_state < DIRTY_RATE_STATUS__MAX);
59 trace_dirtyrate_set_state(DirtyRateStatus_str(new_state));
60 if (qatomic_cmpxchg(state, old_state, new_state) == old_state) {
61 return 0;
62 } else {
63 return -1;
64 }
65 }
66
67 static struct DirtyRateInfo *query_dirty_rate_info(void)
68 {
69 int64_t dirty_rate = DirtyStat.dirty_rate;
70 struct DirtyRateInfo *info = g_malloc0(sizeof(DirtyRateInfo));
71
72 if (qatomic_read(&CalculatingState) == DIRTY_RATE_STATUS_MEASURED) {
73 info->has_dirty_rate = true;
74 info->dirty_rate = dirty_rate;
75 }
76
77 info->status = CalculatingState;
78 info->start_time = DirtyStat.start_time;
79 info->calc_time = DirtyStat.calc_time;
80
81 trace_query_dirty_rate_info(DirtyRateStatus_str(CalculatingState));
82
83 return info;
84 }
85
86 static void init_dirtyrate_stat(int64_t start_time, int64_t calc_time)
87 {
88 DirtyStat.total_dirty_samples = 0;
89 DirtyStat.total_sample_count = 0;
90 DirtyStat.total_block_mem_MB = 0;
91 DirtyStat.dirty_rate = -1;
92 DirtyStat.start_time = start_time;
93 DirtyStat.calc_time = calc_time;
94 }
95
96 static void update_dirtyrate_stat(struct RamblockDirtyInfo *info)
97 {
98 DirtyStat.total_dirty_samples += info->sample_dirty_count;
99 DirtyStat.total_sample_count += info->sample_pages_count;
100 /* size of total pages in MB */
101 DirtyStat.total_block_mem_MB += (info->ramblock_pages *
102 TARGET_PAGE_SIZE) >> 20;
103 }
104
105 static void update_dirtyrate(uint64_t msec)
106 {
107 uint64_t dirtyrate;
108 uint64_t total_dirty_samples = DirtyStat.total_dirty_samples;
109 uint64_t total_sample_count = DirtyStat.total_sample_count;
110 uint64_t total_block_mem_MB = DirtyStat.total_block_mem_MB;
111
112 dirtyrate = total_dirty_samples * total_block_mem_MB *
113 1000 / (total_sample_count * msec);
114
115 DirtyStat.dirty_rate = dirtyrate;
116 }
117
118 /*
119 * get hash result for the sampled memory with length of TARGET_PAGE_SIZE
120 * in ramblock, which starts from ramblock base address.
121 */
122 static uint32_t get_ramblock_vfn_hash(struct RamblockDirtyInfo *info,
123 uint64_t vfn)
124 {
125 uint32_t crc;
126
127 crc = crc32(0, (info->ramblock_addr +
128 vfn * TARGET_PAGE_SIZE), TARGET_PAGE_SIZE);
129
130 trace_get_ramblock_vfn_hash(info->idstr, vfn, crc);
131 return crc;
132 }
133
134 static bool save_ramblock_hash(struct RamblockDirtyInfo *info)
135 {
136 unsigned int sample_pages_count;
137 int i;
138 GRand *rand;
139
140 sample_pages_count = info->sample_pages_count;
141
142 /* ramblock size less than one page, return success to skip this ramblock */
143 if (unlikely(info->ramblock_pages == 0 || sample_pages_count == 0)) {
144 return true;
145 }
146
147 info->hash_result = g_try_malloc0_n(sample_pages_count,
148 sizeof(uint32_t));
149 if (!info->hash_result) {
150 return false;
151 }
152
153 info->sample_page_vfn = g_try_malloc0_n(sample_pages_count,
154 sizeof(uint64_t));
155 if (!info->sample_page_vfn) {
156 g_free(info->hash_result);
157 return false;
158 }
159
160 rand = g_rand_new();
161 for (i = 0; i < sample_pages_count; i++) {
162 info->sample_page_vfn[i] = g_rand_int_range(rand, 0,
163 info->ramblock_pages - 1);
164 info->hash_result[i] = get_ramblock_vfn_hash(info,
165 info->sample_page_vfn[i]);
166 }
167 g_rand_free(rand);
168
169 return true;
170 }
171
172 static void get_ramblock_dirty_info(RAMBlock *block,
173 struct RamblockDirtyInfo *info,
174 struct DirtyRateConfig *config)
175 {
176 uint64_t sample_pages_per_gigabytes = config->sample_pages_per_gigabytes;
177
178 /* Right shift 30 bits to calc ramblock size in GB */
179 info->sample_pages_count = (qemu_ram_get_used_length(block) *
180 sample_pages_per_gigabytes) >> 30;
181 /* Right shift TARGET_PAGE_BITS to calc page count */
182 info->ramblock_pages = qemu_ram_get_used_length(block) >>
183 TARGET_PAGE_BITS;
184 info->ramblock_addr = qemu_ram_get_host_addr(block);
185 strcpy(info->idstr, qemu_ram_get_idstr(block));
186 }
187
188 static void free_ramblock_dirty_info(struct RamblockDirtyInfo *infos, int count)
189 {
190 int i;
191
192 if (!infos) {
193 return;
194 }
195
196 for (i = 0; i < count; i++) {
197 g_free(infos[i].sample_page_vfn);
198 g_free(infos[i].hash_result);
199 }
200 g_free(infos);
201 }
202
203 static bool skip_sample_ramblock(RAMBlock *block)
204 {
205 /*
206 * Sample only blocks larger than MIN_RAMBLOCK_SIZE.
207 */
208 if (qemu_ram_get_used_length(block) < (MIN_RAMBLOCK_SIZE << 10)) {
209 trace_skip_sample_ramblock(block->idstr,
210 qemu_ram_get_used_length(block));
211 return true;
212 }
213
214 return false;
215 }
216
217 static bool record_ramblock_hash_info(struct RamblockDirtyInfo **block_dinfo,
218 struct DirtyRateConfig config,
219 int *block_count)
220 {
221 struct RamblockDirtyInfo *info = NULL;
222 struct RamblockDirtyInfo *dinfo = NULL;
223 RAMBlock *block = NULL;
224 int total_count = 0;
225 int index = 0;
226 bool ret = false;
227
228 RAMBLOCK_FOREACH_MIGRATABLE(block) {
229 if (skip_sample_ramblock(block)) {
230 continue;
231 }
232 total_count++;
233 }
234
235 dinfo = g_try_malloc0_n(total_count, sizeof(struct RamblockDirtyInfo));
236 if (dinfo == NULL) {
237 goto out;
238 }
239
240 RAMBLOCK_FOREACH_MIGRATABLE(block) {
241 if (skip_sample_ramblock(block)) {
242 continue;
243 }
244 if (index >= total_count) {
245 break;
246 }
247 info = &dinfo[index];
248 get_ramblock_dirty_info(block, info, &config);
249 if (!save_ramblock_hash(info)) {
250 goto out;
251 }
252 index++;
253 }
254 ret = true;
255
256 out:
257 *block_count = index;
258 *block_dinfo = dinfo;
259 return ret;
260 }
261
262 static void calc_page_dirty_rate(struct RamblockDirtyInfo *info)
263 {
264 uint32_t crc;
265 int i;
266
267 for (i = 0; i < info->sample_pages_count; i++) {
268 crc = get_ramblock_vfn_hash(info, info->sample_page_vfn[i]);
269 if (crc != info->hash_result[i]) {
270 trace_calc_page_dirty_rate(info->idstr, crc, info->hash_result[i]);
271 info->sample_dirty_count++;
272 }
273 }
274 }
275
276 static struct RamblockDirtyInfo *
277 find_block_matched(RAMBlock *block, int count,
278 struct RamblockDirtyInfo *infos)
279 {
280 int i;
281 struct RamblockDirtyInfo *matched;
282
283 for (i = 0; i < count; i++) {
284 if (!strcmp(infos[i].idstr, qemu_ram_get_idstr(block))) {
285 break;
286 }
287 }
288
289 if (i == count) {
290 return NULL;
291 }
292
293 if (infos[i].ramblock_addr != qemu_ram_get_host_addr(block) ||
294 infos[i].ramblock_pages !=
295 (qemu_ram_get_used_length(block) >> TARGET_PAGE_BITS)) {
296 trace_find_page_matched(block->idstr);
297 return NULL;
298 }
299
300 matched = &infos[i];
301
302 return matched;
303 }
304
305 static bool compare_page_hash_info(struct RamblockDirtyInfo *info,
306 int block_count)
307 {
308 struct RamblockDirtyInfo *block_dinfo = NULL;
309 RAMBlock *block = NULL;
310
311 RAMBLOCK_FOREACH_MIGRATABLE(block) {
312 if (skip_sample_ramblock(block)) {
313 continue;
314 }
315 block_dinfo = find_block_matched(block, block_count, info);
316 if (block_dinfo == NULL) {
317 continue;
318 }
319 calc_page_dirty_rate(block_dinfo);
320 update_dirtyrate_stat(block_dinfo);
321 }
322
323 if (DirtyStat.total_sample_count == 0) {
324 return false;
325 }
326
327 return true;
328 }
329
330 static void calculate_dirtyrate(struct DirtyRateConfig config)
331 {
332 struct RamblockDirtyInfo *block_dinfo = NULL;
333 int block_count = 0;
334 int64_t msec = 0;
335 int64_t initial_time;
336
337 rcu_register_thread();
338 rcu_read_lock();
339 initial_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
340 if (!record_ramblock_hash_info(&block_dinfo, config, &block_count)) {
341 goto out;
342 }
343 rcu_read_unlock();
344
345 msec = config.sample_period_seconds * 1000;
346 msec = set_sample_page_period(msec, initial_time);
347 DirtyStat.start_time = initial_time / 1000;
348 DirtyStat.calc_time = msec / 1000;
349
350 rcu_read_lock();
351 if (!compare_page_hash_info(block_dinfo, block_count)) {
352 goto out;
353 }
354
355 update_dirtyrate(msec);
356
357 out:
358 rcu_read_unlock();
359 free_ramblock_dirty_info(block_dinfo, block_count);
360 rcu_unregister_thread();
361 }
362
363 void *get_dirtyrate_thread(void *arg)
364 {
365 struct DirtyRateConfig config = *(struct DirtyRateConfig *)arg;
366 int ret;
367 int64_t start_time;
368 int64_t calc_time;
369
370 ret = dirtyrate_set_state(&CalculatingState, DIRTY_RATE_STATUS_UNSTARTED,
371 DIRTY_RATE_STATUS_MEASURING);
372 if (ret == -1) {
373 error_report("change dirtyrate state failed.");
374 return NULL;
375 }
376
377 start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) / 1000;
378 calc_time = config.sample_period_seconds;
379 init_dirtyrate_stat(start_time, calc_time);
380
381 calculate_dirtyrate(config);
382
383 ret = dirtyrate_set_state(&CalculatingState, DIRTY_RATE_STATUS_MEASURING,
384 DIRTY_RATE_STATUS_MEASURED);
385 if (ret == -1) {
386 error_report("change dirtyrate state failed.");
387 }
388 return NULL;
389 }
390
391 void qmp_calc_dirty_rate(int64_t calc_time, Error **errp)
392 {
393 static struct DirtyRateConfig config;
394 QemuThread thread;
395 int ret;
396
397 /*
398 * If the dirty rate is already being measured, don't attempt to start.
399 */
400 if (qatomic_read(&CalculatingState) == DIRTY_RATE_STATUS_MEASURING) {
401 error_setg(errp, "the dirty rate is already being measured.");
402 return;
403 }
404
405 if (!is_sample_period_valid(calc_time)) {
406 error_setg(errp, "calc-time is out of range[%d, %d].",
407 MIN_FETCH_DIRTYRATE_TIME_SEC,
408 MAX_FETCH_DIRTYRATE_TIME_SEC);
409 return;
410 }
411
412 /*
413 * Init calculation state as unstarted.
414 */
415 ret = dirtyrate_set_state(&CalculatingState, CalculatingState,
416 DIRTY_RATE_STATUS_UNSTARTED);
417 if (ret == -1) {
418 error_setg(errp, "init dirty rate calculation state failed.");
419 return;
420 }
421
422 config.sample_period_seconds = calc_time;
423 config.sample_pages_per_gigabytes = DIRTYRATE_DEFAULT_SAMPLE_PAGES;
424 qemu_thread_create(&thread, "get_dirtyrate", get_dirtyrate_thread,
425 (void *)&config, QEMU_THREAD_DETACHED);
426 }
427
428 struct DirtyRateInfo *qmp_query_dirty_rate(Error **errp)
429 {
430 return query_dirty_rate_info();
431 }