migration: add reporting of errors for outgoing migration
[qemu.git] / migration / migration.c
1 /*
2 * QEMU live migration
3 *
4 * Copyright IBM, Corp. 2008
5 *
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
14 */
15
16 #include "qemu/osdep.h"
17 #include "qemu/cutils.h"
18 #include "qemu/error-report.h"
19 #include "qemu/main-loop.h"
20 #include "migration/migration.h"
21 #include "migration/qemu-file.h"
22 #include "sysemu/sysemu.h"
23 #include "block/block.h"
24 #include "qapi/qmp/qerror.h"
25 #include "qapi/util.h"
26 #include "qemu/sockets.h"
27 #include "qemu/rcu.h"
28 #include "migration/block.h"
29 #include "migration/postcopy-ram.h"
30 #include "qemu/thread.h"
31 #include "qmp-commands.h"
32 #include "trace.h"
33 #include "qapi-event.h"
34 #include "qom/cpu.h"
35 #include "exec/memory.h"
36 #include "exec/address-spaces.h"
37
38 #define MAX_THROTTLE (32 << 20) /* Migration transfer speed throttling */
39
40 /* Amount of time to allocate to each "chunk" of bandwidth-throttled
41 * data. */
42 #define BUFFER_DELAY 100
43 #define XFER_LIMIT_RATIO (1000 / BUFFER_DELAY)
44
45 /* Default compression thread count */
46 #define DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT 8
47 /* Default decompression thread count, usually decompression is at
48 * least 4 times as fast as compression.*/
49 #define DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT 2
50 /*0: means nocompress, 1: best speed, ... 9: best compress ratio */
51 #define DEFAULT_MIGRATE_COMPRESS_LEVEL 1
52 /* Define default autoconverge cpu throttle migration parameters */
53 #define DEFAULT_MIGRATE_CPU_THROTTLE_INITIAL 20
54 #define DEFAULT_MIGRATE_CPU_THROTTLE_INCREMENT 10
55
56 /* Migration XBZRLE default cache size */
57 #define DEFAULT_MIGRATE_CACHE_SIZE (64 * 1024 * 1024)
58
59 static NotifierList migration_state_notifiers =
60 NOTIFIER_LIST_INITIALIZER(migration_state_notifiers);
61
62 static bool deferred_incoming;
63
64 /*
65 * Current state of incoming postcopy; note this is not part of
66 * MigrationIncomingState since it's state is used during cleanup
67 * at the end as MIS is being freed.
68 */
69 static PostcopyState incoming_postcopy_state;
70
71 /* When we add fault tolerance, we could have several
72 migrations at once. For now we don't need to add
73 dynamic creation of migration */
74
75 /* For outgoing */
76 MigrationState *migrate_get_current(void)
77 {
78 static bool once;
79 static MigrationState current_migration = {
80 .state = MIGRATION_STATUS_NONE,
81 .bandwidth_limit = MAX_THROTTLE,
82 .xbzrle_cache_size = DEFAULT_MIGRATE_CACHE_SIZE,
83 .mbps = -1,
84 .parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL] =
85 DEFAULT_MIGRATE_COMPRESS_LEVEL,
86 .parameters[MIGRATION_PARAMETER_COMPRESS_THREADS] =
87 DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT,
88 .parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS] =
89 DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT,
90 .parameters[MIGRATION_PARAMETER_CPU_THROTTLE_INITIAL] =
91 DEFAULT_MIGRATE_CPU_THROTTLE_INITIAL,
92 .parameters[MIGRATION_PARAMETER_CPU_THROTTLE_INCREMENT] =
93 DEFAULT_MIGRATE_CPU_THROTTLE_INCREMENT,
94 };
95
96 if (!once) {
97 qemu_mutex_init(&current_migration.src_page_req_mutex);
98 once = true;
99 }
100 return &current_migration;
101 }
102
103 /* For incoming */
104 static MigrationIncomingState *mis_current;
105
106 MigrationIncomingState *migration_incoming_get_current(void)
107 {
108 return mis_current;
109 }
110
111 MigrationIncomingState *migration_incoming_state_new(QEMUFile* f)
112 {
113 mis_current = g_new0(MigrationIncomingState, 1);
114 mis_current->from_src_file = f;
115 mis_current->state = MIGRATION_STATUS_NONE;
116 QLIST_INIT(&mis_current->loadvm_handlers);
117 qemu_mutex_init(&mis_current->rp_mutex);
118 qemu_event_init(&mis_current->main_thread_load_event, false);
119
120 return mis_current;
121 }
122
123 void migration_incoming_state_destroy(void)
124 {
125 qemu_event_destroy(&mis_current->main_thread_load_event);
126 loadvm_free_handlers(mis_current);
127 g_free(mis_current);
128 mis_current = NULL;
129 }
130
131
132 typedef struct {
133 bool optional;
134 uint32_t size;
135 uint8_t runstate[100];
136 RunState state;
137 bool received;
138 } GlobalState;
139
140 static GlobalState global_state;
141
142 int global_state_store(void)
143 {
144 if (!runstate_store((char *)global_state.runstate,
145 sizeof(global_state.runstate))) {
146 error_report("runstate name too big: %s", global_state.runstate);
147 trace_migrate_state_too_big();
148 return -EINVAL;
149 }
150 return 0;
151 }
152
153 void global_state_store_running(void)
154 {
155 const char *state = RunState_lookup[RUN_STATE_RUNNING];
156 strncpy((char *)global_state.runstate,
157 state, sizeof(global_state.runstate));
158 }
159
160 static bool global_state_received(void)
161 {
162 return global_state.received;
163 }
164
165 static RunState global_state_get_runstate(void)
166 {
167 return global_state.state;
168 }
169
170 void global_state_set_optional(void)
171 {
172 global_state.optional = true;
173 }
174
175 static bool global_state_needed(void *opaque)
176 {
177 GlobalState *s = opaque;
178 char *runstate = (char *)s->runstate;
179
180 /* If it is not optional, it is mandatory */
181
182 if (s->optional == false) {
183 return true;
184 }
185
186 /* If state is running or paused, it is not needed */
187
188 if (strcmp(runstate, "running") == 0 ||
189 strcmp(runstate, "paused") == 0) {
190 return false;
191 }
192
193 /* for any other state it is needed */
194 return true;
195 }
196
197 static int global_state_post_load(void *opaque, int version_id)
198 {
199 GlobalState *s = opaque;
200 Error *local_err = NULL;
201 int r;
202 char *runstate = (char *)s->runstate;
203
204 s->received = true;
205 trace_migrate_global_state_post_load(runstate);
206
207 r = qapi_enum_parse(RunState_lookup, runstate, RUN_STATE__MAX,
208 -1, &local_err);
209
210 if (r == -1) {
211 if (local_err) {
212 error_report_err(local_err);
213 }
214 return -EINVAL;
215 }
216 s->state = r;
217
218 return 0;
219 }
220
221 static void global_state_pre_save(void *opaque)
222 {
223 GlobalState *s = opaque;
224
225 trace_migrate_global_state_pre_save((char *)s->runstate);
226 s->size = strlen((char *)s->runstate) + 1;
227 }
228
229 static const VMStateDescription vmstate_globalstate = {
230 .name = "globalstate",
231 .version_id = 1,
232 .minimum_version_id = 1,
233 .post_load = global_state_post_load,
234 .pre_save = global_state_pre_save,
235 .needed = global_state_needed,
236 .fields = (VMStateField[]) {
237 VMSTATE_UINT32(size, GlobalState),
238 VMSTATE_BUFFER(runstate, GlobalState),
239 VMSTATE_END_OF_LIST()
240 },
241 };
242
243 void register_global_state(void)
244 {
245 /* We would use it independently that we receive it */
246 strcpy((char *)&global_state.runstate, "");
247 global_state.received = false;
248 vmstate_register(NULL, 0, &vmstate_globalstate, &global_state);
249 }
250
251 static void migrate_generate_event(int new_state)
252 {
253 if (migrate_use_events()) {
254 qapi_event_send_migration(new_state, &error_abort);
255 }
256 }
257
258 /*
259 * Called on -incoming with a defer: uri.
260 * The migration can be started later after any parameters have been
261 * changed.
262 */
263 static void deferred_incoming_migration(Error **errp)
264 {
265 if (deferred_incoming) {
266 error_setg(errp, "Incoming migration already deferred");
267 }
268 deferred_incoming = true;
269 }
270
271 /* Request a range of pages from the source VM at the given
272 * start address.
273 * rbname: Name of the RAMBlock to request the page in, if NULL it's the same
274 * as the last request (a name must have been given previously)
275 * Start: Address offset within the RB
276 * Len: Length in bytes required - must be a multiple of pagesize
277 */
278 void migrate_send_rp_req_pages(MigrationIncomingState *mis, const char *rbname,
279 ram_addr_t start, size_t len)
280 {
281 uint8_t bufc[12 + 1 + 255]; /* start (8), len (4), rbname up to 256 */
282 size_t msglen = 12; /* start + len */
283
284 *(uint64_t *)bufc = cpu_to_be64((uint64_t)start);
285 *(uint32_t *)(bufc + 8) = cpu_to_be32((uint32_t)len);
286
287 if (rbname) {
288 int rbname_len = strlen(rbname);
289 assert(rbname_len < 256);
290
291 bufc[msglen++] = rbname_len;
292 memcpy(bufc + msglen, rbname, rbname_len);
293 msglen += rbname_len;
294 migrate_send_rp_message(mis, MIG_RP_MSG_REQ_PAGES_ID, msglen, bufc);
295 } else {
296 migrate_send_rp_message(mis, MIG_RP_MSG_REQ_PAGES, msglen, bufc);
297 }
298 }
299
300 void qemu_start_incoming_migration(const char *uri, Error **errp)
301 {
302 const char *p;
303
304 qapi_event_send_migration(MIGRATION_STATUS_SETUP, &error_abort);
305 if (!strcmp(uri, "defer")) {
306 deferred_incoming_migration(errp);
307 } else if (strstart(uri, "tcp:", &p)) {
308 tcp_start_incoming_migration(p, errp);
309 #ifdef CONFIG_RDMA
310 } else if (strstart(uri, "rdma:", &p)) {
311 rdma_start_incoming_migration(p, errp);
312 #endif
313 #if !defined(WIN32)
314 } else if (strstart(uri, "exec:", &p)) {
315 exec_start_incoming_migration(p, errp);
316 } else if (strstart(uri, "unix:", &p)) {
317 unix_start_incoming_migration(p, errp);
318 } else if (strstart(uri, "fd:", &p)) {
319 fd_start_incoming_migration(p, errp);
320 #endif
321 } else {
322 error_setg(errp, "unknown migration protocol: %s", uri);
323 }
324 }
325
326 static void process_incoming_migration_bh(void *opaque)
327 {
328 Error *local_err = NULL;
329 MigrationIncomingState *mis = opaque;
330
331 /* Make sure all file formats flush their mutable metadata */
332 bdrv_invalidate_cache_all(&local_err);
333 if (local_err) {
334 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
335 MIGRATION_STATUS_FAILED);
336 error_report_err(local_err);
337 migrate_decompress_threads_join();
338 exit(EXIT_FAILURE);
339 }
340
341 /*
342 * This must happen after all error conditions are dealt with and
343 * we're sure the VM is going to be running on this host.
344 */
345 qemu_announce_self();
346
347 /* If global state section was not received or we are in running
348 state, we need to obey autostart. Any other state is set with
349 runstate_set. */
350
351 if (!global_state_received() ||
352 global_state_get_runstate() == RUN_STATE_RUNNING) {
353 if (autostart) {
354 vm_start();
355 } else {
356 runstate_set(RUN_STATE_PAUSED);
357 }
358 } else {
359 runstate_set(global_state_get_runstate());
360 }
361 migrate_decompress_threads_join();
362 /*
363 * This must happen after any state changes since as soon as an external
364 * observer sees this event they might start to prod at the VM assuming
365 * it's ready to use.
366 */
367 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
368 MIGRATION_STATUS_COMPLETED);
369 qemu_bh_delete(mis->bh);
370 migration_incoming_state_destroy();
371 }
372
373 static void process_incoming_migration_co(void *opaque)
374 {
375 QEMUFile *f = opaque;
376 MigrationIncomingState *mis;
377 PostcopyState ps;
378 int ret;
379
380 mis = migration_incoming_state_new(f);
381 postcopy_state_set(POSTCOPY_INCOMING_NONE);
382 migrate_set_state(&mis->state, MIGRATION_STATUS_NONE,
383 MIGRATION_STATUS_ACTIVE);
384 ret = qemu_loadvm_state(f);
385
386 ps = postcopy_state_get();
387 trace_process_incoming_migration_co_end(ret, ps);
388 if (ps != POSTCOPY_INCOMING_NONE) {
389 if (ps == POSTCOPY_INCOMING_ADVISE) {
390 /*
391 * Where a migration had postcopy enabled (and thus went to advise)
392 * but managed to complete within the precopy period, we can use
393 * the normal exit.
394 */
395 postcopy_ram_incoming_cleanup(mis);
396 } else if (ret >= 0) {
397 /*
398 * Postcopy was started, cleanup should happen at the end of the
399 * postcopy thread.
400 */
401 trace_process_incoming_migration_co_postcopy_end_main();
402 return;
403 }
404 /* Else if something went wrong then just fall out of the normal exit */
405 }
406
407 qemu_fclose(f);
408 free_xbzrle_decoded_buf();
409
410 if (ret < 0) {
411 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
412 MIGRATION_STATUS_FAILED);
413 error_report("load of migration failed: %s", strerror(-ret));
414 migrate_decompress_threads_join();
415 exit(EXIT_FAILURE);
416 }
417
418 mis->bh = qemu_bh_new(process_incoming_migration_bh, mis);
419 qemu_bh_schedule(mis->bh);
420 }
421
422 void process_incoming_migration(QEMUFile *f)
423 {
424 Coroutine *co = qemu_coroutine_create(process_incoming_migration_co);
425
426 migrate_decompress_threads_create();
427 qemu_file_set_blocking(f, false);
428 qemu_coroutine_enter(co, f);
429 }
430
431
432 void migration_set_incoming_channel(MigrationState *s,
433 QIOChannel *ioc)
434 {
435 QEMUFile *f = qemu_fopen_channel_input(ioc);
436
437 process_incoming_migration(f);
438 }
439
440
441 void migration_set_outgoing_channel(MigrationState *s,
442 QIOChannel *ioc)
443 {
444 QEMUFile *f = qemu_fopen_channel_output(ioc);
445
446 s->to_dst_file = f;
447
448 migrate_fd_connect(s);
449 }
450
451
452 /*
453 * Send a message on the return channel back to the source
454 * of the migration.
455 */
456 void migrate_send_rp_message(MigrationIncomingState *mis,
457 enum mig_rp_message_type message_type,
458 uint16_t len, void *data)
459 {
460 trace_migrate_send_rp_message((int)message_type, len);
461 qemu_mutex_lock(&mis->rp_mutex);
462 qemu_put_be16(mis->to_src_file, (unsigned int)message_type);
463 qemu_put_be16(mis->to_src_file, len);
464 qemu_put_buffer(mis->to_src_file, data, len);
465 qemu_fflush(mis->to_src_file);
466 qemu_mutex_unlock(&mis->rp_mutex);
467 }
468
469 /*
470 * Send a 'SHUT' message on the return channel with the given value
471 * to indicate that we've finished with the RP. Non-0 value indicates
472 * error.
473 */
474 void migrate_send_rp_shut(MigrationIncomingState *mis,
475 uint32_t value)
476 {
477 uint32_t buf;
478
479 buf = cpu_to_be32(value);
480 migrate_send_rp_message(mis, MIG_RP_MSG_SHUT, sizeof(buf), &buf);
481 }
482
483 /*
484 * Send a 'PONG' message on the return channel with the given value
485 * (normally in response to a 'PING')
486 */
487 void migrate_send_rp_pong(MigrationIncomingState *mis,
488 uint32_t value)
489 {
490 uint32_t buf;
491
492 buf = cpu_to_be32(value);
493 migrate_send_rp_message(mis, MIG_RP_MSG_PONG, sizeof(buf), &buf);
494 }
495
496 /* amount of nanoseconds we are willing to wait for migration to be down.
497 * the choice of nanoseconds is because it is the maximum resolution that
498 * get_clock() can achieve. It is an internal measure. All user-visible
499 * units must be in seconds */
500 static uint64_t max_downtime = 300000000;
501
502 uint64_t migrate_max_downtime(void)
503 {
504 return max_downtime;
505 }
506
507 MigrationCapabilityStatusList *qmp_query_migrate_capabilities(Error **errp)
508 {
509 MigrationCapabilityStatusList *head = NULL;
510 MigrationCapabilityStatusList *caps;
511 MigrationState *s = migrate_get_current();
512 int i;
513
514 caps = NULL; /* silence compiler warning */
515 for (i = 0; i < MIGRATION_CAPABILITY__MAX; i++) {
516 if (head == NULL) {
517 head = g_malloc0(sizeof(*caps));
518 caps = head;
519 } else {
520 caps->next = g_malloc0(sizeof(*caps));
521 caps = caps->next;
522 }
523 caps->value =
524 g_malloc(sizeof(*caps->value));
525 caps->value->capability = i;
526 caps->value->state = s->enabled_capabilities[i];
527 }
528
529 return head;
530 }
531
532 MigrationParameters *qmp_query_migrate_parameters(Error **errp)
533 {
534 MigrationParameters *params;
535 MigrationState *s = migrate_get_current();
536
537 params = g_malloc0(sizeof(*params));
538 params->compress_level = s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL];
539 params->compress_threads =
540 s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS];
541 params->decompress_threads =
542 s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS];
543 params->cpu_throttle_initial =
544 s->parameters[MIGRATION_PARAMETER_CPU_THROTTLE_INITIAL];
545 params->cpu_throttle_increment =
546 s->parameters[MIGRATION_PARAMETER_CPU_THROTTLE_INCREMENT];
547
548 return params;
549 }
550
551 /*
552 * Return true if we're already in the middle of a migration
553 * (i.e. any of the active or setup states)
554 */
555 static bool migration_is_setup_or_active(int state)
556 {
557 switch (state) {
558 case MIGRATION_STATUS_ACTIVE:
559 case MIGRATION_STATUS_POSTCOPY_ACTIVE:
560 case MIGRATION_STATUS_SETUP:
561 return true;
562
563 default:
564 return false;
565
566 }
567 }
568
569 static void get_xbzrle_cache_stats(MigrationInfo *info)
570 {
571 if (migrate_use_xbzrle()) {
572 info->has_xbzrle_cache = true;
573 info->xbzrle_cache = g_malloc0(sizeof(*info->xbzrle_cache));
574 info->xbzrle_cache->cache_size = migrate_xbzrle_cache_size();
575 info->xbzrle_cache->bytes = xbzrle_mig_bytes_transferred();
576 info->xbzrle_cache->pages = xbzrle_mig_pages_transferred();
577 info->xbzrle_cache->cache_miss = xbzrle_mig_pages_cache_miss();
578 info->xbzrle_cache->cache_miss_rate = xbzrle_mig_cache_miss_rate();
579 info->xbzrle_cache->overflow = xbzrle_mig_pages_overflow();
580 }
581 }
582
583 MigrationInfo *qmp_query_migrate(Error **errp)
584 {
585 MigrationInfo *info = g_malloc0(sizeof(*info));
586 MigrationState *s = migrate_get_current();
587
588 switch (s->state) {
589 case MIGRATION_STATUS_NONE:
590 /* no migration has happened ever */
591 break;
592 case MIGRATION_STATUS_SETUP:
593 info->has_status = true;
594 info->has_total_time = false;
595 break;
596 case MIGRATION_STATUS_ACTIVE:
597 case MIGRATION_STATUS_CANCELLING:
598 info->has_status = true;
599 info->has_total_time = true;
600 info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME)
601 - s->total_time;
602 info->has_expected_downtime = true;
603 info->expected_downtime = s->expected_downtime;
604 info->has_setup_time = true;
605 info->setup_time = s->setup_time;
606
607 info->has_ram = true;
608 info->ram = g_malloc0(sizeof(*info->ram));
609 info->ram->transferred = ram_bytes_transferred();
610 info->ram->remaining = ram_bytes_remaining();
611 info->ram->total = ram_bytes_total();
612 info->ram->duplicate = dup_mig_pages_transferred();
613 info->ram->skipped = skipped_mig_pages_transferred();
614 info->ram->normal = norm_mig_pages_transferred();
615 info->ram->normal_bytes = norm_mig_bytes_transferred();
616 info->ram->dirty_pages_rate = s->dirty_pages_rate;
617 info->ram->mbps = s->mbps;
618 info->ram->dirty_sync_count = s->dirty_sync_count;
619
620 if (blk_mig_active()) {
621 info->has_disk = true;
622 info->disk = g_malloc0(sizeof(*info->disk));
623 info->disk->transferred = blk_mig_bytes_transferred();
624 info->disk->remaining = blk_mig_bytes_remaining();
625 info->disk->total = blk_mig_bytes_total();
626 }
627
628 if (cpu_throttle_active()) {
629 info->has_cpu_throttle_percentage = true;
630 info->cpu_throttle_percentage = cpu_throttle_get_percentage();
631 }
632
633 get_xbzrle_cache_stats(info);
634 break;
635 case MIGRATION_STATUS_POSTCOPY_ACTIVE:
636 /* Mostly the same as active; TODO add some postcopy stats */
637 info->has_status = true;
638 info->has_total_time = true;
639 info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME)
640 - s->total_time;
641 info->has_expected_downtime = true;
642 info->expected_downtime = s->expected_downtime;
643 info->has_setup_time = true;
644 info->setup_time = s->setup_time;
645
646 info->has_ram = true;
647 info->ram = g_malloc0(sizeof(*info->ram));
648 info->ram->transferred = ram_bytes_transferred();
649 info->ram->remaining = ram_bytes_remaining();
650 info->ram->total = ram_bytes_total();
651 info->ram->duplicate = dup_mig_pages_transferred();
652 info->ram->skipped = skipped_mig_pages_transferred();
653 info->ram->normal = norm_mig_pages_transferred();
654 info->ram->normal_bytes = norm_mig_bytes_transferred();
655 info->ram->dirty_pages_rate = s->dirty_pages_rate;
656 info->ram->mbps = s->mbps;
657 info->ram->dirty_sync_count = s->dirty_sync_count;
658
659 if (blk_mig_active()) {
660 info->has_disk = true;
661 info->disk = g_malloc0(sizeof(*info->disk));
662 info->disk->transferred = blk_mig_bytes_transferred();
663 info->disk->remaining = blk_mig_bytes_remaining();
664 info->disk->total = blk_mig_bytes_total();
665 }
666
667 get_xbzrle_cache_stats(info);
668 break;
669 case MIGRATION_STATUS_COMPLETED:
670 get_xbzrle_cache_stats(info);
671
672 info->has_status = true;
673 info->has_total_time = true;
674 info->total_time = s->total_time;
675 info->has_downtime = true;
676 info->downtime = s->downtime;
677 info->has_setup_time = true;
678 info->setup_time = s->setup_time;
679
680 info->has_ram = true;
681 info->ram = g_malloc0(sizeof(*info->ram));
682 info->ram->transferred = ram_bytes_transferred();
683 info->ram->remaining = 0;
684 info->ram->total = ram_bytes_total();
685 info->ram->duplicate = dup_mig_pages_transferred();
686 info->ram->skipped = skipped_mig_pages_transferred();
687 info->ram->normal = norm_mig_pages_transferred();
688 info->ram->normal_bytes = norm_mig_bytes_transferred();
689 info->ram->mbps = s->mbps;
690 info->ram->dirty_sync_count = s->dirty_sync_count;
691 break;
692 case MIGRATION_STATUS_FAILED:
693 info->has_status = true;
694 if (s->error) {
695 info->has_error_desc = true;
696 info->error_desc = g_strdup(error_get_pretty(s->error));
697 }
698 break;
699 case MIGRATION_STATUS_CANCELLED:
700 info->has_status = true;
701 break;
702 }
703 info->status = s->state;
704
705 return info;
706 }
707
708 void qmp_migrate_set_capabilities(MigrationCapabilityStatusList *params,
709 Error **errp)
710 {
711 MigrationState *s = migrate_get_current();
712 MigrationCapabilityStatusList *cap;
713
714 if (migration_is_setup_or_active(s->state)) {
715 error_setg(errp, QERR_MIGRATION_ACTIVE);
716 return;
717 }
718
719 for (cap = params; cap; cap = cap->next) {
720 s->enabled_capabilities[cap->value->capability] = cap->value->state;
721 }
722
723 if (migrate_postcopy_ram()) {
724 if (migrate_use_compression()) {
725 /* The decompression threads asynchronously write into RAM
726 * rather than use the atomic copies needed to avoid
727 * userfaulting. It should be possible to fix the decompression
728 * threads for compatibility in future.
729 */
730 error_report("Postcopy is not currently compatible with "
731 "compression");
732 s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM] =
733 false;
734 }
735 }
736 }
737
738 void qmp_migrate_set_parameters(bool has_compress_level,
739 int64_t compress_level,
740 bool has_compress_threads,
741 int64_t compress_threads,
742 bool has_decompress_threads,
743 int64_t decompress_threads,
744 bool has_cpu_throttle_initial,
745 int64_t cpu_throttle_initial,
746 bool has_cpu_throttle_increment,
747 int64_t cpu_throttle_increment, Error **errp)
748 {
749 MigrationState *s = migrate_get_current();
750
751 if (has_compress_level && (compress_level < 0 || compress_level > 9)) {
752 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "compress_level",
753 "is invalid, it should be in the range of 0 to 9");
754 return;
755 }
756 if (has_compress_threads &&
757 (compress_threads < 1 || compress_threads > 255)) {
758 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
759 "compress_threads",
760 "is invalid, it should be in the range of 1 to 255");
761 return;
762 }
763 if (has_decompress_threads &&
764 (decompress_threads < 1 || decompress_threads > 255)) {
765 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
766 "decompress_threads",
767 "is invalid, it should be in the range of 1 to 255");
768 return;
769 }
770 if (has_cpu_throttle_initial &&
771 (cpu_throttle_initial < 1 || cpu_throttle_initial > 99)) {
772 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
773 "cpu_throttle_initial",
774 "an integer in the range of 1 to 99");
775 }
776 if (has_cpu_throttle_increment &&
777 (cpu_throttle_increment < 1 || cpu_throttle_increment > 99)) {
778 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
779 "cpu_throttle_increment",
780 "an integer in the range of 1 to 99");
781 }
782
783 if (has_compress_level) {
784 s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL] = compress_level;
785 }
786 if (has_compress_threads) {
787 s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS] = compress_threads;
788 }
789 if (has_decompress_threads) {
790 s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS] =
791 decompress_threads;
792 }
793 if (has_cpu_throttle_initial) {
794 s->parameters[MIGRATION_PARAMETER_CPU_THROTTLE_INITIAL] =
795 cpu_throttle_initial;
796 }
797
798 if (has_cpu_throttle_increment) {
799 s->parameters[MIGRATION_PARAMETER_CPU_THROTTLE_INCREMENT] =
800 cpu_throttle_increment;
801 }
802 }
803
804 void qmp_migrate_start_postcopy(Error **errp)
805 {
806 MigrationState *s = migrate_get_current();
807
808 if (!migrate_postcopy_ram()) {
809 error_setg(errp, "Enable postcopy with migrate_set_capability before"
810 " the start of migration");
811 return;
812 }
813
814 if (s->state == MIGRATION_STATUS_NONE) {
815 error_setg(errp, "Postcopy must be started after migration has been"
816 " started");
817 return;
818 }
819 /*
820 * we don't error if migration has finished since that would be racy
821 * with issuing this command.
822 */
823 atomic_set(&s->start_postcopy, true);
824 }
825
826 /* shared migration helpers */
827
828 void migrate_set_state(int *state, int old_state, int new_state)
829 {
830 if (atomic_cmpxchg(state, old_state, new_state) == old_state) {
831 trace_migrate_set_state(new_state);
832 migrate_generate_event(new_state);
833 }
834 }
835
836 static void migrate_fd_cleanup(void *opaque)
837 {
838 MigrationState *s = opaque;
839
840 qemu_bh_delete(s->cleanup_bh);
841 s->cleanup_bh = NULL;
842
843 flush_page_queue(s);
844
845 if (s->to_dst_file) {
846 trace_migrate_fd_cleanup();
847 qemu_mutex_unlock_iothread();
848 if (s->migration_thread_running) {
849 qemu_thread_join(&s->thread);
850 s->migration_thread_running = false;
851 }
852 qemu_mutex_lock_iothread();
853
854 migrate_compress_threads_join();
855 qemu_fclose(s->to_dst_file);
856 s->to_dst_file = NULL;
857 }
858
859 assert((s->state != MIGRATION_STATUS_ACTIVE) &&
860 (s->state != MIGRATION_STATUS_POSTCOPY_ACTIVE));
861
862 if (s->state == MIGRATION_STATUS_CANCELLING) {
863 migrate_set_state(&s->state, MIGRATION_STATUS_CANCELLING,
864 MIGRATION_STATUS_CANCELLED);
865 }
866
867 notifier_list_notify(&migration_state_notifiers, s);
868 }
869
870 void migrate_fd_error(MigrationState *s, const Error *error)
871 {
872 trace_migrate_fd_error(error ? error_get_pretty(error) : "");
873 assert(s->to_dst_file == NULL);
874 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
875 MIGRATION_STATUS_FAILED);
876 if (!s->error) {
877 s->error = error_copy(error);
878 }
879 notifier_list_notify(&migration_state_notifiers, s);
880 }
881
882 static void migrate_fd_cancel(MigrationState *s)
883 {
884 int old_state ;
885 QEMUFile *f = migrate_get_current()->to_dst_file;
886 trace_migrate_fd_cancel();
887
888 if (s->rp_state.from_dst_file) {
889 /* shutdown the rp socket, so causing the rp thread to shutdown */
890 qemu_file_shutdown(s->rp_state.from_dst_file);
891 }
892
893 do {
894 old_state = s->state;
895 if (!migration_is_setup_or_active(old_state)) {
896 break;
897 }
898 migrate_set_state(&s->state, old_state, MIGRATION_STATUS_CANCELLING);
899 } while (s->state != MIGRATION_STATUS_CANCELLING);
900
901 /*
902 * If we're unlucky the migration code might be stuck somewhere in a
903 * send/write while the network has failed and is waiting to timeout;
904 * if we've got shutdown(2) available then we can force it to quit.
905 * The outgoing qemu file gets closed in migrate_fd_cleanup that is
906 * called in a bh, so there is no race against this cancel.
907 */
908 if (s->state == MIGRATION_STATUS_CANCELLING && f) {
909 qemu_file_shutdown(f);
910 }
911 }
912
913 void add_migration_state_change_notifier(Notifier *notify)
914 {
915 notifier_list_add(&migration_state_notifiers, notify);
916 }
917
918 void remove_migration_state_change_notifier(Notifier *notify)
919 {
920 notifier_remove(notify);
921 }
922
923 bool migration_in_setup(MigrationState *s)
924 {
925 return s->state == MIGRATION_STATUS_SETUP;
926 }
927
928 bool migration_has_finished(MigrationState *s)
929 {
930 return s->state == MIGRATION_STATUS_COMPLETED;
931 }
932
933 bool migration_has_failed(MigrationState *s)
934 {
935 return (s->state == MIGRATION_STATUS_CANCELLED ||
936 s->state == MIGRATION_STATUS_FAILED);
937 }
938
939 bool migration_in_postcopy(MigrationState *s)
940 {
941 return (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE);
942 }
943
944 bool migration_in_postcopy_after_devices(MigrationState *s)
945 {
946 return migration_in_postcopy(s) && s->postcopy_after_devices;
947 }
948
949 MigrationState *migrate_init(const MigrationParams *params)
950 {
951 MigrationState *s = migrate_get_current();
952
953 /*
954 * Reinitialise all migration state, except
955 * parameters/capabilities that the user set, and
956 * locks.
957 */
958 s->bytes_xfer = 0;
959 s->xfer_limit = 0;
960 s->cleanup_bh = 0;
961 s->to_dst_file = NULL;
962 s->state = MIGRATION_STATUS_NONE;
963 s->params = *params;
964 s->rp_state.from_dst_file = NULL;
965 s->rp_state.error = false;
966 s->mbps = 0.0;
967 s->downtime = 0;
968 s->expected_downtime = 0;
969 s->dirty_pages_rate = 0;
970 s->dirty_bytes_rate = 0;
971 s->setup_time = 0;
972 s->dirty_sync_count = 0;
973 s->start_postcopy = false;
974 s->postcopy_after_devices = false;
975 s->migration_thread_running = false;
976 s->last_req_rb = NULL;
977 error_free(s->error);
978 s->error = NULL;
979
980 migrate_set_state(&s->state, MIGRATION_STATUS_NONE, MIGRATION_STATUS_SETUP);
981
982 QSIMPLEQ_INIT(&s->src_page_requests);
983
984 s->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
985 return s;
986 }
987
988 static GSList *migration_blockers;
989
990 void migrate_add_blocker(Error *reason)
991 {
992 migration_blockers = g_slist_prepend(migration_blockers, reason);
993 }
994
995 void migrate_del_blocker(Error *reason)
996 {
997 migration_blockers = g_slist_remove(migration_blockers, reason);
998 }
999
1000 void qmp_migrate_incoming(const char *uri, Error **errp)
1001 {
1002 Error *local_err = NULL;
1003 static bool once = true;
1004
1005 if (!deferred_incoming) {
1006 error_setg(errp, "For use with '-incoming defer'");
1007 return;
1008 }
1009 if (!once) {
1010 error_setg(errp, "The incoming migration has already been started");
1011 }
1012
1013 qemu_start_incoming_migration(uri, &local_err);
1014
1015 if (local_err) {
1016 error_propagate(errp, local_err);
1017 return;
1018 }
1019
1020 once = false;
1021 }
1022
1023 bool migration_is_blocked(Error **errp)
1024 {
1025 if (qemu_savevm_state_blocked(errp)) {
1026 return true;
1027 }
1028
1029 if (migration_blockers) {
1030 *errp = error_copy(migration_blockers->data);
1031 return true;
1032 }
1033
1034 return false;
1035 }
1036
1037 void qmp_migrate(const char *uri, bool has_blk, bool blk,
1038 bool has_inc, bool inc, bool has_detach, bool detach,
1039 Error **errp)
1040 {
1041 Error *local_err = NULL;
1042 MigrationState *s = migrate_get_current();
1043 MigrationParams params;
1044 const char *p;
1045
1046 params.blk = has_blk && blk;
1047 params.shared = has_inc && inc;
1048
1049 if (migration_is_setup_or_active(s->state) ||
1050 s->state == MIGRATION_STATUS_CANCELLING) {
1051 error_setg(errp, QERR_MIGRATION_ACTIVE);
1052 return;
1053 }
1054 if (runstate_check(RUN_STATE_INMIGRATE)) {
1055 error_setg(errp, "Guest is waiting for an incoming migration");
1056 return;
1057 }
1058
1059 if (migration_is_blocked(errp)) {
1060 return;
1061 }
1062
1063 s = migrate_init(&params);
1064
1065 if (strstart(uri, "tcp:", &p)) {
1066 tcp_start_outgoing_migration(s, p, &local_err);
1067 #ifdef CONFIG_RDMA
1068 } else if (strstart(uri, "rdma:", &p)) {
1069 rdma_start_outgoing_migration(s, p, &local_err);
1070 #endif
1071 #if !defined(WIN32)
1072 } else if (strstart(uri, "exec:", &p)) {
1073 exec_start_outgoing_migration(s, p, &local_err);
1074 } else if (strstart(uri, "unix:", &p)) {
1075 unix_start_outgoing_migration(s, p, &local_err);
1076 } else if (strstart(uri, "fd:", &p)) {
1077 fd_start_outgoing_migration(s, p, &local_err);
1078 #endif
1079 } else {
1080 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "uri",
1081 "a valid migration protocol");
1082 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
1083 MIGRATION_STATUS_FAILED);
1084 return;
1085 }
1086
1087 if (local_err) {
1088 migrate_fd_error(s, local_err);
1089 error_propagate(errp, local_err);
1090 return;
1091 }
1092 }
1093
1094 void qmp_migrate_cancel(Error **errp)
1095 {
1096 migrate_fd_cancel(migrate_get_current());
1097 }
1098
1099 void qmp_migrate_set_cache_size(int64_t value, Error **errp)
1100 {
1101 MigrationState *s = migrate_get_current();
1102 int64_t new_size;
1103
1104 /* Check for truncation */
1105 if (value != (size_t)value) {
1106 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
1107 "exceeding address space");
1108 return;
1109 }
1110
1111 /* Cache should not be larger than guest ram size */
1112 if (value > ram_bytes_total()) {
1113 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
1114 "exceeds guest ram size ");
1115 return;
1116 }
1117
1118 new_size = xbzrle_cache_resize(value);
1119 if (new_size < 0) {
1120 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
1121 "is smaller than page size");
1122 return;
1123 }
1124
1125 s->xbzrle_cache_size = new_size;
1126 }
1127
1128 int64_t qmp_query_migrate_cache_size(Error **errp)
1129 {
1130 return migrate_xbzrle_cache_size();
1131 }
1132
1133 void qmp_migrate_set_speed(int64_t value, Error **errp)
1134 {
1135 MigrationState *s;
1136
1137 if (value < 0) {
1138 value = 0;
1139 }
1140 if (value > SIZE_MAX) {
1141 value = SIZE_MAX;
1142 }
1143
1144 s = migrate_get_current();
1145 s->bandwidth_limit = value;
1146 if (s->to_dst_file) {
1147 qemu_file_set_rate_limit(s->to_dst_file,
1148 s->bandwidth_limit / XFER_LIMIT_RATIO);
1149 }
1150 }
1151
1152 void qmp_migrate_set_downtime(double value, Error **errp)
1153 {
1154 value *= 1e9;
1155 value = MAX(0, MIN(UINT64_MAX, value));
1156 max_downtime = (uint64_t)value;
1157 }
1158
1159 bool migrate_postcopy_ram(void)
1160 {
1161 MigrationState *s;
1162
1163 s = migrate_get_current();
1164
1165 return s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM];
1166 }
1167
1168 bool migrate_auto_converge(void)
1169 {
1170 MigrationState *s;
1171
1172 s = migrate_get_current();
1173
1174 return s->enabled_capabilities[MIGRATION_CAPABILITY_AUTO_CONVERGE];
1175 }
1176
1177 bool migrate_zero_blocks(void)
1178 {
1179 MigrationState *s;
1180
1181 s = migrate_get_current();
1182
1183 return s->enabled_capabilities[MIGRATION_CAPABILITY_ZERO_BLOCKS];
1184 }
1185
1186 bool migrate_use_compression(void)
1187 {
1188 MigrationState *s;
1189
1190 s = migrate_get_current();
1191
1192 return s->enabled_capabilities[MIGRATION_CAPABILITY_COMPRESS];
1193 }
1194
1195 int migrate_compress_level(void)
1196 {
1197 MigrationState *s;
1198
1199 s = migrate_get_current();
1200
1201 return s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL];
1202 }
1203
1204 int migrate_compress_threads(void)
1205 {
1206 MigrationState *s;
1207
1208 s = migrate_get_current();
1209
1210 return s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS];
1211 }
1212
1213 int migrate_decompress_threads(void)
1214 {
1215 MigrationState *s;
1216
1217 s = migrate_get_current();
1218
1219 return s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS];
1220 }
1221
1222 bool migrate_use_events(void)
1223 {
1224 MigrationState *s;
1225
1226 s = migrate_get_current();
1227
1228 return s->enabled_capabilities[MIGRATION_CAPABILITY_EVENTS];
1229 }
1230
1231 int migrate_use_xbzrle(void)
1232 {
1233 MigrationState *s;
1234
1235 s = migrate_get_current();
1236
1237 return s->enabled_capabilities[MIGRATION_CAPABILITY_XBZRLE];
1238 }
1239
1240 int64_t migrate_xbzrle_cache_size(void)
1241 {
1242 MigrationState *s;
1243
1244 s = migrate_get_current();
1245
1246 return s->xbzrle_cache_size;
1247 }
1248
1249 /* migration thread support */
1250 /*
1251 * Something bad happened to the RP stream, mark an error
1252 * The caller shall print or trace something to indicate why
1253 */
1254 static void mark_source_rp_bad(MigrationState *s)
1255 {
1256 s->rp_state.error = true;
1257 }
1258
1259 static struct rp_cmd_args {
1260 ssize_t len; /* -1 = variable */
1261 const char *name;
1262 } rp_cmd_args[] = {
1263 [MIG_RP_MSG_INVALID] = { .len = -1, .name = "INVALID" },
1264 [MIG_RP_MSG_SHUT] = { .len = 4, .name = "SHUT" },
1265 [MIG_RP_MSG_PONG] = { .len = 4, .name = "PONG" },
1266 [MIG_RP_MSG_REQ_PAGES] = { .len = 12, .name = "REQ_PAGES" },
1267 [MIG_RP_MSG_REQ_PAGES_ID] = { .len = -1, .name = "REQ_PAGES_ID" },
1268 [MIG_RP_MSG_MAX] = { .len = -1, .name = "MAX" },
1269 };
1270
1271 /*
1272 * Process a request for pages received on the return path,
1273 * We're allowed to send more than requested (e.g. to round to our page size)
1274 * and we don't need to send pages that have already been sent.
1275 */
1276 static void migrate_handle_rp_req_pages(MigrationState *ms, const char* rbname,
1277 ram_addr_t start, size_t len)
1278 {
1279 long our_host_ps = getpagesize();
1280
1281 trace_migrate_handle_rp_req_pages(rbname, start, len);
1282
1283 /*
1284 * Since we currently insist on matching page sizes, just sanity check
1285 * we're being asked for whole host pages.
1286 */
1287 if (start & (our_host_ps-1) ||
1288 (len & (our_host_ps-1))) {
1289 error_report("%s: Misaligned page request, start: " RAM_ADDR_FMT
1290 " len: %zd", __func__, start, len);
1291 mark_source_rp_bad(ms);
1292 return;
1293 }
1294
1295 if (ram_save_queue_pages(ms, rbname, start, len)) {
1296 mark_source_rp_bad(ms);
1297 }
1298 }
1299
1300 /*
1301 * Handles messages sent on the return path towards the source VM
1302 *
1303 */
1304 static void *source_return_path_thread(void *opaque)
1305 {
1306 MigrationState *ms = opaque;
1307 QEMUFile *rp = ms->rp_state.from_dst_file;
1308 uint16_t header_len, header_type;
1309 uint8_t buf[512];
1310 uint32_t tmp32, sibling_error;
1311 ram_addr_t start = 0; /* =0 to silence warning */
1312 size_t len = 0, expected_len;
1313 int res;
1314
1315 trace_source_return_path_thread_entry();
1316 while (!ms->rp_state.error && !qemu_file_get_error(rp) &&
1317 migration_is_setup_or_active(ms->state)) {
1318 trace_source_return_path_thread_loop_top();
1319 header_type = qemu_get_be16(rp);
1320 header_len = qemu_get_be16(rp);
1321
1322 if (header_type >= MIG_RP_MSG_MAX ||
1323 header_type == MIG_RP_MSG_INVALID) {
1324 error_report("RP: Received invalid message 0x%04x length 0x%04x",
1325 header_type, header_len);
1326 mark_source_rp_bad(ms);
1327 goto out;
1328 }
1329
1330 if ((rp_cmd_args[header_type].len != -1 &&
1331 header_len != rp_cmd_args[header_type].len) ||
1332 header_len > sizeof(buf)) {
1333 error_report("RP: Received '%s' message (0x%04x) with"
1334 "incorrect length %d expecting %zu",
1335 rp_cmd_args[header_type].name, header_type, header_len,
1336 (size_t)rp_cmd_args[header_type].len);
1337 mark_source_rp_bad(ms);
1338 goto out;
1339 }
1340
1341 /* We know we've got a valid header by this point */
1342 res = qemu_get_buffer(rp, buf, header_len);
1343 if (res != header_len) {
1344 error_report("RP: Failed reading data for message 0x%04x"
1345 " read %d expected %d",
1346 header_type, res, header_len);
1347 mark_source_rp_bad(ms);
1348 goto out;
1349 }
1350
1351 /* OK, we have the message and the data */
1352 switch (header_type) {
1353 case MIG_RP_MSG_SHUT:
1354 sibling_error = be32_to_cpup((uint32_t *)buf);
1355 trace_source_return_path_thread_shut(sibling_error);
1356 if (sibling_error) {
1357 error_report("RP: Sibling indicated error %d", sibling_error);
1358 mark_source_rp_bad(ms);
1359 }
1360 /*
1361 * We'll let the main thread deal with closing the RP
1362 * we could do a shutdown(2) on it, but we're the only user
1363 * anyway, so there's nothing gained.
1364 */
1365 goto out;
1366
1367 case MIG_RP_MSG_PONG:
1368 tmp32 = be32_to_cpup((uint32_t *)buf);
1369 trace_source_return_path_thread_pong(tmp32);
1370 break;
1371
1372 case MIG_RP_MSG_REQ_PAGES:
1373 start = be64_to_cpup((uint64_t *)buf);
1374 len = be32_to_cpup((uint32_t *)(buf + 8));
1375 migrate_handle_rp_req_pages(ms, NULL, start, len);
1376 break;
1377
1378 case MIG_RP_MSG_REQ_PAGES_ID:
1379 expected_len = 12 + 1; /* header + termination */
1380
1381 if (header_len >= expected_len) {
1382 start = be64_to_cpup((uint64_t *)buf);
1383 len = be32_to_cpup((uint32_t *)(buf + 8));
1384 /* Now we expect an idstr */
1385 tmp32 = buf[12]; /* Length of the following idstr */
1386 buf[13 + tmp32] = '\0';
1387 expected_len += tmp32;
1388 }
1389 if (header_len != expected_len) {
1390 error_report("RP: Req_Page_id with length %d expecting %zd",
1391 header_len, expected_len);
1392 mark_source_rp_bad(ms);
1393 goto out;
1394 }
1395 migrate_handle_rp_req_pages(ms, (char *)&buf[13], start, len);
1396 break;
1397
1398 default:
1399 break;
1400 }
1401 }
1402 if (qemu_file_get_error(rp)) {
1403 trace_source_return_path_thread_bad_end();
1404 mark_source_rp_bad(ms);
1405 }
1406
1407 trace_source_return_path_thread_end();
1408 out:
1409 ms->rp_state.from_dst_file = NULL;
1410 qemu_fclose(rp);
1411 return NULL;
1412 }
1413
1414 static int open_return_path_on_source(MigrationState *ms)
1415 {
1416
1417 ms->rp_state.from_dst_file = qemu_file_get_return_path(ms->to_dst_file);
1418 if (!ms->rp_state.from_dst_file) {
1419 return -1;
1420 }
1421
1422 trace_open_return_path_on_source();
1423 qemu_thread_create(&ms->rp_state.rp_thread, "return path",
1424 source_return_path_thread, ms, QEMU_THREAD_JOINABLE);
1425
1426 trace_open_return_path_on_source_continue();
1427
1428 return 0;
1429 }
1430
1431 /* Returns 0 if the RP was ok, otherwise there was an error on the RP */
1432 static int await_return_path_close_on_source(MigrationState *ms)
1433 {
1434 /*
1435 * If this is a normal exit then the destination will send a SHUT and the
1436 * rp_thread will exit, however if there's an error we need to cause
1437 * it to exit.
1438 */
1439 if (qemu_file_get_error(ms->to_dst_file) && ms->rp_state.from_dst_file) {
1440 /*
1441 * shutdown(2), if we have it, will cause it to unblock if it's stuck
1442 * waiting for the destination.
1443 */
1444 qemu_file_shutdown(ms->rp_state.from_dst_file);
1445 mark_source_rp_bad(ms);
1446 }
1447 trace_await_return_path_close_on_source_joining();
1448 qemu_thread_join(&ms->rp_state.rp_thread);
1449 trace_await_return_path_close_on_source_close();
1450 return ms->rp_state.error;
1451 }
1452
1453 /*
1454 * Switch from normal iteration to postcopy
1455 * Returns non-0 on error
1456 */
1457 static int postcopy_start(MigrationState *ms, bool *old_vm_running)
1458 {
1459 int ret;
1460 const QEMUSizedBuffer *qsb;
1461 int64_t time_at_stop = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1462 migrate_set_state(&ms->state, MIGRATION_STATUS_ACTIVE,
1463 MIGRATION_STATUS_POSTCOPY_ACTIVE);
1464
1465 trace_postcopy_start();
1466 qemu_mutex_lock_iothread();
1467 trace_postcopy_start_set_run();
1468
1469 qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER);
1470 *old_vm_running = runstate_is_running();
1471 global_state_store();
1472 ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
1473 if (ret < 0) {
1474 goto fail;
1475 }
1476
1477 ret = bdrv_inactivate_all();
1478 if (ret < 0) {
1479 goto fail;
1480 }
1481
1482 /*
1483 * Cause any non-postcopiable, but iterative devices to
1484 * send out their final data.
1485 */
1486 qemu_savevm_state_complete_precopy(ms->to_dst_file, true);
1487
1488 /*
1489 * in Finish migrate and with the io-lock held everything should
1490 * be quiet, but we've potentially still got dirty pages and we
1491 * need to tell the destination to throw any pages it's already received
1492 * that are dirty
1493 */
1494 if (ram_postcopy_send_discard_bitmap(ms)) {
1495 error_report("postcopy send discard bitmap failed");
1496 goto fail;
1497 }
1498
1499 /*
1500 * send rest of state - note things that are doing postcopy
1501 * will notice we're in POSTCOPY_ACTIVE and not actually
1502 * wrap their state up here
1503 */
1504 qemu_file_set_rate_limit(ms->to_dst_file, INT64_MAX);
1505 /* Ping just for debugging, helps line traces up */
1506 qemu_savevm_send_ping(ms->to_dst_file, 2);
1507
1508 /*
1509 * While loading the device state we may trigger page transfer
1510 * requests and the fd must be free to process those, and thus
1511 * the destination must read the whole device state off the fd before
1512 * it starts processing it. Unfortunately the ad-hoc migration format
1513 * doesn't allow the destination to know the size to read without fully
1514 * parsing it through each devices load-state code (especially the open
1515 * coded devices that use get/put).
1516 * So we wrap the device state up in a package with a length at the start;
1517 * to do this we use a qemu_buf to hold the whole of the device state.
1518 */
1519 QEMUFile *fb = qemu_bufopen("w", NULL);
1520 if (!fb) {
1521 error_report("Failed to create buffered file");
1522 goto fail;
1523 }
1524
1525 /*
1526 * Make sure the receiver can get incoming pages before we send the rest
1527 * of the state
1528 */
1529 qemu_savevm_send_postcopy_listen(fb);
1530
1531 qemu_savevm_state_complete_precopy(fb, false);
1532 qemu_savevm_send_ping(fb, 3);
1533
1534 qemu_savevm_send_postcopy_run(fb);
1535
1536 /* <><> end of stuff going into the package */
1537 qsb = qemu_buf_get(fb);
1538
1539 /* Now send that blob */
1540 if (qemu_savevm_send_packaged(ms->to_dst_file, qsb)) {
1541 goto fail_closefb;
1542 }
1543 qemu_fclose(fb);
1544
1545 /* Send a notify to give a chance for anything that needs to happen
1546 * at the transition to postcopy and after the device state; in particular
1547 * spice needs to trigger a transition now
1548 */
1549 ms->postcopy_after_devices = true;
1550 notifier_list_notify(&migration_state_notifiers, ms);
1551
1552 ms->downtime = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - time_at_stop;
1553
1554 qemu_mutex_unlock_iothread();
1555
1556 /*
1557 * Although this ping is just for debug, it could potentially be
1558 * used for getting a better measurement of downtime at the source.
1559 */
1560 qemu_savevm_send_ping(ms->to_dst_file, 4);
1561
1562 ret = qemu_file_get_error(ms->to_dst_file);
1563 if (ret) {
1564 error_report("postcopy_start: Migration stream errored");
1565 migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
1566 MIGRATION_STATUS_FAILED);
1567 }
1568
1569 return ret;
1570
1571 fail_closefb:
1572 qemu_fclose(fb);
1573 fail:
1574 migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
1575 MIGRATION_STATUS_FAILED);
1576 qemu_mutex_unlock_iothread();
1577 return -1;
1578 }
1579
1580 /**
1581 * migration_completion: Used by migration_thread when there's not much left.
1582 * The caller 'breaks' the loop when this returns.
1583 *
1584 * @s: Current migration state
1585 * @current_active_state: The migration state we expect to be in
1586 * @*old_vm_running: Pointer to old_vm_running flag
1587 * @*start_time: Pointer to time to update
1588 */
1589 static void migration_completion(MigrationState *s, int current_active_state,
1590 bool *old_vm_running,
1591 int64_t *start_time)
1592 {
1593 int ret;
1594
1595 if (s->state == MIGRATION_STATUS_ACTIVE) {
1596 qemu_mutex_lock_iothread();
1597 *start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1598 qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER);
1599 *old_vm_running = runstate_is_running();
1600 ret = global_state_store();
1601
1602 if (!ret) {
1603 ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
1604 if (ret >= 0) {
1605 ret = bdrv_inactivate_all();
1606 }
1607 if (ret >= 0) {
1608 qemu_file_set_rate_limit(s->to_dst_file, INT64_MAX);
1609 qemu_savevm_state_complete_precopy(s->to_dst_file, false);
1610 }
1611 }
1612 qemu_mutex_unlock_iothread();
1613
1614 if (ret < 0) {
1615 goto fail;
1616 }
1617 } else if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
1618 trace_migration_completion_postcopy_end();
1619
1620 qemu_savevm_state_complete_postcopy(s->to_dst_file);
1621 trace_migration_completion_postcopy_end_after_complete();
1622 }
1623
1624 /*
1625 * If rp was opened we must clean up the thread before
1626 * cleaning everything else up (since if there are no failures
1627 * it will wait for the destination to send it's status in
1628 * a SHUT command).
1629 * Postcopy opens rp if enabled (even if it's not avtivated)
1630 */
1631 if (migrate_postcopy_ram()) {
1632 int rp_error;
1633 trace_migration_completion_postcopy_end_before_rp();
1634 rp_error = await_return_path_close_on_source(s);
1635 trace_migration_completion_postcopy_end_after_rp(rp_error);
1636 if (rp_error) {
1637 goto fail_invalidate;
1638 }
1639 }
1640
1641 if (qemu_file_get_error(s->to_dst_file)) {
1642 trace_migration_completion_file_err();
1643 goto fail_invalidate;
1644 }
1645
1646 migrate_set_state(&s->state, current_active_state,
1647 MIGRATION_STATUS_COMPLETED);
1648 return;
1649
1650 fail_invalidate:
1651 /* If not doing postcopy, vm_start() will be called: let's regain
1652 * control on images.
1653 */
1654 if (s->state == MIGRATION_STATUS_ACTIVE) {
1655 Error *local_err = NULL;
1656
1657 bdrv_invalidate_cache_all(&local_err);
1658 if (local_err) {
1659 error_report_err(local_err);
1660 }
1661 }
1662
1663 fail:
1664 migrate_set_state(&s->state, current_active_state,
1665 MIGRATION_STATUS_FAILED);
1666 }
1667
1668 /*
1669 * Master migration thread on the source VM.
1670 * It drives the migration and pumps the data down the outgoing channel.
1671 */
1672 static void *migration_thread(void *opaque)
1673 {
1674 MigrationState *s = opaque;
1675 /* Used by the bandwidth calcs, updated later */
1676 int64_t initial_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1677 int64_t setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST);
1678 int64_t initial_bytes = 0;
1679 int64_t max_size = 0;
1680 int64_t start_time = initial_time;
1681 int64_t end_time;
1682 bool old_vm_running = false;
1683 bool entered_postcopy = false;
1684 /* The active state we expect to be in; ACTIVE or POSTCOPY_ACTIVE */
1685 enum MigrationStatus current_active_state = MIGRATION_STATUS_ACTIVE;
1686
1687 rcu_register_thread();
1688
1689 qemu_savevm_state_header(s->to_dst_file);
1690
1691 if (migrate_postcopy_ram()) {
1692 /* Now tell the dest that it should open its end so it can reply */
1693 qemu_savevm_send_open_return_path(s->to_dst_file);
1694
1695 /* And do a ping that will make stuff easier to debug */
1696 qemu_savevm_send_ping(s->to_dst_file, 1);
1697
1698 /*
1699 * Tell the destination that we *might* want to do postcopy later;
1700 * if the other end can't do postcopy it should fail now, nice and
1701 * early.
1702 */
1703 qemu_savevm_send_postcopy_advise(s->to_dst_file);
1704 }
1705
1706 qemu_savevm_state_begin(s->to_dst_file, &s->params);
1707
1708 s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start;
1709 current_active_state = MIGRATION_STATUS_ACTIVE;
1710 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
1711 MIGRATION_STATUS_ACTIVE);
1712
1713 trace_migration_thread_setup_complete();
1714
1715 while (s->state == MIGRATION_STATUS_ACTIVE ||
1716 s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
1717 int64_t current_time;
1718 uint64_t pending_size;
1719
1720 if (!qemu_file_rate_limit(s->to_dst_file)) {
1721 uint64_t pend_post, pend_nonpost;
1722
1723 qemu_savevm_state_pending(s->to_dst_file, max_size, &pend_nonpost,
1724 &pend_post);
1725 pending_size = pend_nonpost + pend_post;
1726 trace_migrate_pending(pending_size, max_size,
1727 pend_post, pend_nonpost);
1728 if (pending_size && pending_size >= max_size) {
1729 /* Still a significant amount to transfer */
1730
1731 if (migrate_postcopy_ram() &&
1732 s->state != MIGRATION_STATUS_POSTCOPY_ACTIVE &&
1733 pend_nonpost <= max_size &&
1734 atomic_read(&s->start_postcopy)) {
1735
1736 if (!postcopy_start(s, &old_vm_running)) {
1737 current_active_state = MIGRATION_STATUS_POSTCOPY_ACTIVE;
1738 entered_postcopy = true;
1739 }
1740
1741 continue;
1742 }
1743 /* Just another iteration step */
1744 qemu_savevm_state_iterate(s->to_dst_file, entered_postcopy);
1745 } else {
1746 trace_migration_thread_low_pending(pending_size);
1747 migration_completion(s, current_active_state,
1748 &old_vm_running, &start_time);
1749 break;
1750 }
1751 }
1752
1753 if (qemu_file_get_error(s->to_dst_file)) {
1754 migrate_set_state(&s->state, current_active_state,
1755 MIGRATION_STATUS_FAILED);
1756 trace_migration_thread_file_err();
1757 break;
1758 }
1759 current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1760 if (current_time >= initial_time + BUFFER_DELAY) {
1761 uint64_t transferred_bytes = qemu_ftell(s->to_dst_file) -
1762 initial_bytes;
1763 uint64_t time_spent = current_time - initial_time;
1764 double bandwidth = (double)transferred_bytes / time_spent;
1765 max_size = bandwidth * migrate_max_downtime() / 1000000;
1766
1767 s->mbps = (((double) transferred_bytes * 8.0) /
1768 ((double) time_spent / 1000.0)) / 1000.0 / 1000.0;
1769
1770 trace_migrate_transferred(transferred_bytes, time_spent,
1771 bandwidth, max_size);
1772 /* if we haven't sent anything, we don't want to recalculate
1773 10000 is a small enough number for our purposes */
1774 if (s->dirty_bytes_rate && transferred_bytes > 10000) {
1775 s->expected_downtime = s->dirty_bytes_rate / bandwidth;
1776 }
1777
1778 qemu_file_reset_rate_limit(s->to_dst_file);
1779 initial_time = current_time;
1780 initial_bytes = qemu_ftell(s->to_dst_file);
1781 }
1782 if (qemu_file_rate_limit(s->to_dst_file)) {
1783 /* usleep expects microseconds */
1784 g_usleep((initial_time + BUFFER_DELAY - current_time)*1000);
1785 }
1786 }
1787
1788 trace_migration_thread_after_loop();
1789 /* If we enabled cpu throttling for auto-converge, turn it off. */
1790 cpu_throttle_stop();
1791 end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1792
1793 qemu_mutex_lock_iothread();
1794 qemu_savevm_state_cleanup();
1795 if (s->state == MIGRATION_STATUS_COMPLETED) {
1796 uint64_t transferred_bytes = qemu_ftell(s->to_dst_file);
1797 s->total_time = end_time - s->total_time;
1798 if (!entered_postcopy) {
1799 s->downtime = end_time - start_time;
1800 }
1801 if (s->total_time) {
1802 s->mbps = (((double) transferred_bytes * 8.0) /
1803 ((double) s->total_time)) / 1000;
1804 }
1805 runstate_set(RUN_STATE_POSTMIGRATE);
1806 } else {
1807 if (old_vm_running && !entered_postcopy) {
1808 vm_start();
1809 }
1810 }
1811 qemu_bh_schedule(s->cleanup_bh);
1812 qemu_mutex_unlock_iothread();
1813
1814 rcu_unregister_thread();
1815 return NULL;
1816 }
1817
1818 void migrate_fd_connect(MigrationState *s)
1819 {
1820 /* This is a best 1st approximation. ns to ms */
1821 s->expected_downtime = max_downtime/1000000;
1822 s->cleanup_bh = qemu_bh_new(migrate_fd_cleanup, s);
1823
1824 qemu_file_set_blocking(s->to_dst_file, true);
1825 qemu_file_set_rate_limit(s->to_dst_file,
1826 s->bandwidth_limit / XFER_LIMIT_RATIO);
1827
1828 /* Notify before starting migration thread */
1829 notifier_list_notify(&migration_state_notifiers, s);
1830
1831 /*
1832 * Open the return path; currently for postcopy but other things might
1833 * also want it.
1834 */
1835 if (migrate_postcopy_ram()) {
1836 if (open_return_path_on_source(s)) {
1837 error_report("Unable to open return-path for postcopy");
1838 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
1839 MIGRATION_STATUS_FAILED);
1840 migrate_fd_cleanup(s);
1841 return;
1842 }
1843 }
1844
1845 migrate_compress_threads_create();
1846 qemu_thread_create(&s->thread, "migration", migration_thread, s,
1847 QEMU_THREAD_JOINABLE);
1848 s->migration_thread_running = true;
1849 }
1850
1851 PostcopyState postcopy_state_get(void)
1852 {
1853 return atomic_mb_read(&incoming_postcopy_state);
1854 }
1855
1856 /* Set the state and return the old state */
1857 PostcopyState postcopy_state_set(PostcopyState new_state)
1858 {
1859 return atomic_xchg(&incoming_postcopy_state, new_state);
1860 }
1861