ppc/pnv: add a PnvICPState object
[qemu.git] / migration / migration.c
1 /*
2 * QEMU live migration
3 *
4 * Copyright IBM, Corp. 2008
5 *
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
14 */
15
16 #include "qemu/osdep.h"
17 #include "qemu/cutils.h"
18 #include "qemu/error-report.h"
19 #include "qemu/main-loop.h"
20 #include "migration/migration.h"
21 #include "migration/qemu-file.h"
22 #include "sysemu/sysemu.h"
23 #include "block/block.h"
24 #include "qapi/qmp/qerror.h"
25 #include "qapi/util.h"
26 #include "qemu/sockets.h"
27 #include "qemu/rcu.h"
28 #include "migration/block.h"
29 #include "migration/postcopy-ram.h"
30 #include "qemu/thread.h"
31 #include "qmp-commands.h"
32 #include "trace.h"
33 #include "qapi-event.h"
34 #include "qom/cpu.h"
35 #include "exec/memory.h"
36 #include "exec/address-spaces.h"
37 #include "io/channel-buffer.h"
38 #include "io/channel-tls.h"
39 #include "migration/colo.h"
40
41 #define MAX_THROTTLE (32 << 20) /* Migration transfer speed throttling */
42
43 /* Amount of time to allocate to each "chunk" of bandwidth-throttled
44 * data. */
45 #define BUFFER_DELAY 100
46 #define XFER_LIMIT_RATIO (1000 / BUFFER_DELAY)
47
48 /* Time in milliseconds we are allowed to stop the source,
49 * for sending the last part */
50 #define DEFAULT_MIGRATE_SET_DOWNTIME 300
51
52 /* Maximum migrate downtime set to 2000 seconds */
53 #define MAX_MIGRATE_DOWNTIME_SECONDS 2000
54 #define MAX_MIGRATE_DOWNTIME (MAX_MIGRATE_DOWNTIME_SECONDS * 1000)
55
56 /* Default compression thread count */
57 #define DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT 8
58 /* Default decompression thread count, usually decompression is at
59 * least 4 times as fast as compression.*/
60 #define DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT 2
61 /*0: means nocompress, 1: best speed, ... 9: best compress ratio */
62 #define DEFAULT_MIGRATE_COMPRESS_LEVEL 1
63 /* Define default autoconverge cpu throttle migration parameters */
64 #define DEFAULT_MIGRATE_CPU_THROTTLE_INITIAL 20
65 #define DEFAULT_MIGRATE_CPU_THROTTLE_INCREMENT 10
66
67 /* Migration XBZRLE default cache size */
68 #define DEFAULT_MIGRATE_CACHE_SIZE (64 * 1024 * 1024)
69
70 /* The delay time (in ms) between two COLO checkpoints
71 * Note: Please change this default value to 10000 when we support hybrid mode.
72 */
73 #define DEFAULT_MIGRATE_X_CHECKPOINT_DELAY 200
74
75 static NotifierList migration_state_notifiers =
76 NOTIFIER_LIST_INITIALIZER(migration_state_notifiers);
77
78 static bool deferred_incoming;
79
80 /*
81 * Current state of incoming postcopy; note this is not part of
82 * MigrationIncomingState since it's state is used during cleanup
83 * at the end as MIS is being freed.
84 */
85 static PostcopyState incoming_postcopy_state;
86
87 /* When we add fault tolerance, we could have several
88 migrations at once. For now we don't need to add
89 dynamic creation of migration */
90
91 /* For outgoing */
92 MigrationState *migrate_get_current(void)
93 {
94 static bool once;
95 static MigrationState current_migration = {
96 .state = MIGRATION_STATUS_NONE,
97 .xbzrle_cache_size = DEFAULT_MIGRATE_CACHE_SIZE,
98 .mbps = -1,
99 .parameters = {
100 .compress_level = DEFAULT_MIGRATE_COMPRESS_LEVEL,
101 .compress_threads = DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT,
102 .decompress_threads = DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT,
103 .cpu_throttle_initial = DEFAULT_MIGRATE_CPU_THROTTLE_INITIAL,
104 .cpu_throttle_increment = DEFAULT_MIGRATE_CPU_THROTTLE_INCREMENT,
105 .max_bandwidth = MAX_THROTTLE,
106 .downtime_limit = DEFAULT_MIGRATE_SET_DOWNTIME,
107 .x_checkpoint_delay = DEFAULT_MIGRATE_X_CHECKPOINT_DELAY,
108 },
109 };
110
111 if (!once) {
112 current_migration.parameters.tls_creds = g_strdup("");
113 current_migration.parameters.tls_hostname = g_strdup("");
114 once = true;
115 }
116 return &current_migration;
117 }
118
119 MigrationIncomingState *migration_incoming_get_current(void)
120 {
121 static bool once;
122 static MigrationIncomingState mis_current;
123
124 if (!once) {
125 mis_current.state = MIGRATION_STATUS_NONE;
126 memset(&mis_current, 0, sizeof(MigrationIncomingState));
127 QLIST_INIT(&mis_current.loadvm_handlers);
128 qemu_mutex_init(&mis_current.rp_mutex);
129 qemu_event_init(&mis_current.main_thread_load_event, false);
130 once = true;
131 }
132 return &mis_current;
133 }
134
135 void migration_incoming_state_destroy(void)
136 {
137 struct MigrationIncomingState *mis = migration_incoming_get_current();
138
139 qemu_event_destroy(&mis->main_thread_load_event);
140 loadvm_free_handlers(mis);
141 }
142
143
144 typedef struct {
145 bool optional;
146 uint32_t size;
147 uint8_t runstate[100];
148 RunState state;
149 bool received;
150 } GlobalState;
151
152 static GlobalState global_state;
153
154 int global_state_store(void)
155 {
156 if (!runstate_store((char *)global_state.runstate,
157 sizeof(global_state.runstate))) {
158 error_report("runstate name too big: %s", global_state.runstate);
159 trace_migrate_state_too_big();
160 return -EINVAL;
161 }
162 return 0;
163 }
164
165 void global_state_store_running(void)
166 {
167 const char *state = RunState_lookup[RUN_STATE_RUNNING];
168 strncpy((char *)global_state.runstate,
169 state, sizeof(global_state.runstate));
170 }
171
172 static bool global_state_received(void)
173 {
174 return global_state.received;
175 }
176
177 static RunState global_state_get_runstate(void)
178 {
179 return global_state.state;
180 }
181
182 void global_state_set_optional(void)
183 {
184 global_state.optional = true;
185 }
186
187 static bool global_state_needed(void *opaque)
188 {
189 GlobalState *s = opaque;
190 char *runstate = (char *)s->runstate;
191
192 /* If it is not optional, it is mandatory */
193
194 if (s->optional == false) {
195 return true;
196 }
197
198 /* If state is running or paused, it is not needed */
199
200 if (strcmp(runstate, "running") == 0 ||
201 strcmp(runstate, "paused") == 0) {
202 return false;
203 }
204
205 /* for any other state it is needed */
206 return true;
207 }
208
209 static int global_state_post_load(void *opaque, int version_id)
210 {
211 GlobalState *s = opaque;
212 Error *local_err = NULL;
213 int r;
214 char *runstate = (char *)s->runstate;
215
216 s->received = true;
217 trace_migrate_global_state_post_load(runstate);
218
219 r = qapi_enum_parse(RunState_lookup, runstate, RUN_STATE__MAX,
220 -1, &local_err);
221
222 if (r == -1) {
223 if (local_err) {
224 error_report_err(local_err);
225 }
226 return -EINVAL;
227 }
228 s->state = r;
229
230 return 0;
231 }
232
233 static void global_state_pre_save(void *opaque)
234 {
235 GlobalState *s = opaque;
236
237 trace_migrate_global_state_pre_save((char *)s->runstate);
238 s->size = strlen((char *)s->runstate) + 1;
239 }
240
241 static const VMStateDescription vmstate_globalstate = {
242 .name = "globalstate",
243 .version_id = 1,
244 .minimum_version_id = 1,
245 .post_load = global_state_post_load,
246 .pre_save = global_state_pre_save,
247 .needed = global_state_needed,
248 .fields = (VMStateField[]) {
249 VMSTATE_UINT32(size, GlobalState),
250 VMSTATE_BUFFER(runstate, GlobalState),
251 VMSTATE_END_OF_LIST()
252 },
253 };
254
255 void register_global_state(void)
256 {
257 /* We would use it independently that we receive it */
258 strcpy((char *)&global_state.runstate, "");
259 global_state.received = false;
260 vmstate_register(NULL, 0, &vmstate_globalstate, &global_state);
261 }
262
263 static void migrate_generate_event(int new_state)
264 {
265 if (migrate_use_events()) {
266 qapi_event_send_migration(new_state, &error_abort);
267 }
268 }
269
270 /*
271 * Called on -incoming with a defer: uri.
272 * The migration can be started later after any parameters have been
273 * changed.
274 */
275 static void deferred_incoming_migration(Error **errp)
276 {
277 if (deferred_incoming) {
278 error_setg(errp, "Incoming migration already deferred");
279 }
280 deferred_incoming = true;
281 }
282
283 /* Request a range of pages from the source VM at the given
284 * start address.
285 * rbname: Name of the RAMBlock to request the page in, if NULL it's the same
286 * as the last request (a name must have been given previously)
287 * Start: Address offset within the RB
288 * Len: Length in bytes required - must be a multiple of pagesize
289 */
290 void migrate_send_rp_req_pages(MigrationIncomingState *mis, const char *rbname,
291 ram_addr_t start, size_t len)
292 {
293 uint8_t bufc[12 + 1 + 255]; /* start (8), len (4), rbname up to 256 */
294 size_t msglen = 12; /* start + len */
295
296 *(uint64_t *)bufc = cpu_to_be64((uint64_t)start);
297 *(uint32_t *)(bufc + 8) = cpu_to_be32((uint32_t)len);
298
299 if (rbname) {
300 int rbname_len = strlen(rbname);
301 assert(rbname_len < 256);
302
303 bufc[msglen++] = rbname_len;
304 memcpy(bufc + msglen, rbname, rbname_len);
305 msglen += rbname_len;
306 migrate_send_rp_message(mis, MIG_RP_MSG_REQ_PAGES_ID, msglen, bufc);
307 } else {
308 migrate_send_rp_message(mis, MIG_RP_MSG_REQ_PAGES, msglen, bufc);
309 }
310 }
311
312 void qemu_start_incoming_migration(const char *uri, Error **errp)
313 {
314 const char *p;
315
316 qapi_event_send_migration(MIGRATION_STATUS_SETUP, &error_abort);
317 if (!strcmp(uri, "defer")) {
318 deferred_incoming_migration(errp);
319 } else if (strstart(uri, "tcp:", &p)) {
320 tcp_start_incoming_migration(p, errp);
321 #ifdef CONFIG_RDMA
322 } else if (strstart(uri, "rdma:", &p)) {
323 rdma_start_incoming_migration(p, errp);
324 #endif
325 } else if (strstart(uri, "exec:", &p)) {
326 exec_start_incoming_migration(p, errp);
327 } else if (strstart(uri, "unix:", &p)) {
328 unix_start_incoming_migration(p, errp);
329 } else if (strstart(uri, "fd:", &p)) {
330 fd_start_incoming_migration(p, errp);
331 } else {
332 error_setg(errp, "unknown migration protocol: %s", uri);
333 }
334 }
335
336 static void process_incoming_migration_bh(void *opaque)
337 {
338 Error *local_err = NULL;
339 MigrationIncomingState *mis = opaque;
340
341 /* Make sure all file formats flush their mutable metadata */
342 bdrv_invalidate_cache_all(&local_err);
343 if (local_err) {
344 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
345 MIGRATION_STATUS_FAILED);
346 error_report_err(local_err);
347 migrate_decompress_threads_join();
348 exit(EXIT_FAILURE);
349 }
350
351 /* If we get an error here, just don't restart the VM yet. */
352 blk_resume_after_migration(&local_err);
353 if (local_err) {
354 error_free(local_err);
355 local_err = NULL;
356 autostart = false;
357 }
358
359 /*
360 * This must happen after all error conditions are dealt with and
361 * we're sure the VM is going to be running on this host.
362 */
363 qemu_announce_self();
364
365 /* If global state section was not received or we are in running
366 state, we need to obey autostart. Any other state is set with
367 runstate_set. */
368
369 if (!global_state_received() ||
370 global_state_get_runstate() == RUN_STATE_RUNNING) {
371 if (autostart) {
372 vm_start();
373 } else {
374 runstate_set(RUN_STATE_PAUSED);
375 }
376 } else {
377 runstate_set(global_state_get_runstate());
378 }
379 migrate_decompress_threads_join();
380 /*
381 * This must happen after any state changes since as soon as an external
382 * observer sees this event they might start to prod at the VM assuming
383 * it's ready to use.
384 */
385 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
386 MIGRATION_STATUS_COMPLETED);
387 qemu_bh_delete(mis->bh);
388 migration_incoming_state_destroy();
389 }
390
391 static void process_incoming_migration_co(void *opaque)
392 {
393 QEMUFile *f = opaque;
394 MigrationIncomingState *mis = migration_incoming_get_current();
395 PostcopyState ps;
396 int ret;
397
398 mis->from_src_file = f;
399 mis->largest_page_size = qemu_ram_pagesize_largest();
400 postcopy_state_set(POSTCOPY_INCOMING_NONE);
401 migrate_set_state(&mis->state, MIGRATION_STATUS_NONE,
402 MIGRATION_STATUS_ACTIVE);
403 ret = qemu_loadvm_state(f);
404
405 ps = postcopy_state_get();
406 trace_process_incoming_migration_co_end(ret, ps);
407 if (ps != POSTCOPY_INCOMING_NONE) {
408 if (ps == POSTCOPY_INCOMING_ADVISE) {
409 /*
410 * Where a migration had postcopy enabled (and thus went to advise)
411 * but managed to complete within the precopy period, we can use
412 * the normal exit.
413 */
414 postcopy_ram_incoming_cleanup(mis);
415 } else if (ret >= 0) {
416 /*
417 * Postcopy was started, cleanup should happen at the end of the
418 * postcopy thread.
419 */
420 trace_process_incoming_migration_co_postcopy_end_main();
421 return;
422 }
423 /* Else if something went wrong then just fall out of the normal exit */
424 }
425
426 /* we get COLO info, and know if we are in COLO mode */
427 if (!ret && migration_incoming_enable_colo()) {
428 mis->migration_incoming_co = qemu_coroutine_self();
429 qemu_thread_create(&mis->colo_incoming_thread, "COLO incoming",
430 colo_process_incoming_thread, mis, QEMU_THREAD_JOINABLE);
431 mis->have_colo_incoming_thread = true;
432 qemu_coroutine_yield();
433
434 /* Wait checkpoint incoming thread exit before free resource */
435 qemu_thread_join(&mis->colo_incoming_thread);
436 }
437
438 if (ret < 0) {
439 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
440 MIGRATION_STATUS_FAILED);
441 error_report("load of migration failed: %s", strerror(-ret));
442 migrate_decompress_threads_join();
443 exit(EXIT_FAILURE);
444 }
445
446 qemu_fclose(f);
447 free_xbzrle_decoded_buf();
448
449 mis->bh = qemu_bh_new(process_incoming_migration_bh, mis);
450 qemu_bh_schedule(mis->bh);
451 }
452
453 void migration_fd_process_incoming(QEMUFile *f)
454 {
455 Coroutine *co = qemu_coroutine_create(process_incoming_migration_co, f);
456
457 migrate_decompress_threads_create();
458 qemu_file_set_blocking(f, false);
459 qemu_coroutine_enter(co);
460 }
461
462
463 void migration_channel_process_incoming(MigrationState *s,
464 QIOChannel *ioc)
465 {
466 trace_migration_set_incoming_channel(
467 ioc, object_get_typename(OBJECT(ioc)));
468
469 if (s->parameters.tls_creds &&
470 *s->parameters.tls_creds &&
471 !object_dynamic_cast(OBJECT(ioc),
472 TYPE_QIO_CHANNEL_TLS)) {
473 Error *local_err = NULL;
474 migration_tls_channel_process_incoming(s, ioc, &local_err);
475 if (local_err) {
476 error_report_err(local_err);
477 }
478 } else {
479 QEMUFile *f = qemu_fopen_channel_input(ioc);
480 migration_fd_process_incoming(f);
481 }
482 }
483
484
485 void migration_channel_connect(MigrationState *s,
486 QIOChannel *ioc,
487 const char *hostname)
488 {
489 trace_migration_set_outgoing_channel(
490 ioc, object_get_typename(OBJECT(ioc)), hostname);
491
492 if (s->parameters.tls_creds &&
493 *s->parameters.tls_creds &&
494 !object_dynamic_cast(OBJECT(ioc),
495 TYPE_QIO_CHANNEL_TLS)) {
496 Error *local_err = NULL;
497 migration_tls_channel_connect(s, ioc, hostname, &local_err);
498 if (local_err) {
499 migrate_fd_error(s, local_err);
500 error_free(local_err);
501 }
502 } else {
503 QEMUFile *f = qemu_fopen_channel_output(ioc);
504
505 s->to_dst_file = f;
506
507 migrate_fd_connect(s);
508 }
509 }
510
511
512 /*
513 * Send a message on the return channel back to the source
514 * of the migration.
515 */
516 void migrate_send_rp_message(MigrationIncomingState *mis,
517 enum mig_rp_message_type message_type,
518 uint16_t len, void *data)
519 {
520 trace_migrate_send_rp_message((int)message_type, len);
521 qemu_mutex_lock(&mis->rp_mutex);
522 qemu_put_be16(mis->to_src_file, (unsigned int)message_type);
523 qemu_put_be16(mis->to_src_file, len);
524 qemu_put_buffer(mis->to_src_file, data, len);
525 qemu_fflush(mis->to_src_file);
526 qemu_mutex_unlock(&mis->rp_mutex);
527 }
528
529 /*
530 * Send a 'SHUT' message on the return channel with the given value
531 * to indicate that we've finished with the RP. Non-0 value indicates
532 * error.
533 */
534 void migrate_send_rp_shut(MigrationIncomingState *mis,
535 uint32_t value)
536 {
537 uint32_t buf;
538
539 buf = cpu_to_be32(value);
540 migrate_send_rp_message(mis, MIG_RP_MSG_SHUT, sizeof(buf), &buf);
541 }
542
543 /*
544 * Send a 'PONG' message on the return channel with the given value
545 * (normally in response to a 'PING')
546 */
547 void migrate_send_rp_pong(MigrationIncomingState *mis,
548 uint32_t value)
549 {
550 uint32_t buf;
551
552 buf = cpu_to_be32(value);
553 migrate_send_rp_message(mis, MIG_RP_MSG_PONG, sizeof(buf), &buf);
554 }
555
556 MigrationCapabilityStatusList *qmp_query_migrate_capabilities(Error **errp)
557 {
558 MigrationCapabilityStatusList *head = NULL;
559 MigrationCapabilityStatusList *caps;
560 MigrationState *s = migrate_get_current();
561 int i;
562
563 caps = NULL; /* silence compiler warning */
564 for (i = 0; i < MIGRATION_CAPABILITY__MAX; i++) {
565 if (i == MIGRATION_CAPABILITY_X_COLO && !colo_supported()) {
566 continue;
567 }
568 if (head == NULL) {
569 head = g_malloc0(sizeof(*caps));
570 caps = head;
571 } else {
572 caps->next = g_malloc0(sizeof(*caps));
573 caps = caps->next;
574 }
575 caps->value =
576 g_malloc(sizeof(*caps->value));
577 caps->value->capability = i;
578 caps->value->state = s->enabled_capabilities[i];
579 }
580
581 return head;
582 }
583
584 MigrationParameters *qmp_query_migrate_parameters(Error **errp)
585 {
586 MigrationParameters *params;
587 MigrationState *s = migrate_get_current();
588
589 params = g_malloc0(sizeof(*params));
590 params->has_compress_level = true;
591 params->compress_level = s->parameters.compress_level;
592 params->has_compress_threads = true;
593 params->compress_threads = s->parameters.compress_threads;
594 params->has_decompress_threads = true;
595 params->decompress_threads = s->parameters.decompress_threads;
596 params->has_cpu_throttle_initial = true;
597 params->cpu_throttle_initial = s->parameters.cpu_throttle_initial;
598 params->has_cpu_throttle_increment = true;
599 params->cpu_throttle_increment = s->parameters.cpu_throttle_increment;
600 params->has_tls_creds = !!s->parameters.tls_creds;
601 params->tls_creds = g_strdup(s->parameters.tls_creds);
602 params->has_tls_hostname = !!s->parameters.tls_hostname;
603 params->tls_hostname = g_strdup(s->parameters.tls_hostname);
604 params->has_max_bandwidth = true;
605 params->max_bandwidth = s->parameters.max_bandwidth;
606 params->has_downtime_limit = true;
607 params->downtime_limit = s->parameters.downtime_limit;
608 params->has_x_checkpoint_delay = true;
609 params->x_checkpoint_delay = s->parameters.x_checkpoint_delay;
610
611 return params;
612 }
613
614 /*
615 * Return true if we're already in the middle of a migration
616 * (i.e. any of the active or setup states)
617 */
618 static bool migration_is_setup_or_active(int state)
619 {
620 switch (state) {
621 case MIGRATION_STATUS_ACTIVE:
622 case MIGRATION_STATUS_POSTCOPY_ACTIVE:
623 case MIGRATION_STATUS_SETUP:
624 return true;
625
626 default:
627 return false;
628
629 }
630 }
631
632 static void get_xbzrle_cache_stats(MigrationInfo *info)
633 {
634 if (migrate_use_xbzrle()) {
635 info->has_xbzrle_cache = true;
636 info->xbzrle_cache = g_malloc0(sizeof(*info->xbzrle_cache));
637 info->xbzrle_cache->cache_size = migrate_xbzrle_cache_size();
638 info->xbzrle_cache->bytes = xbzrle_mig_bytes_transferred();
639 info->xbzrle_cache->pages = xbzrle_mig_pages_transferred();
640 info->xbzrle_cache->cache_miss = xbzrle_mig_pages_cache_miss();
641 info->xbzrle_cache->cache_miss_rate = xbzrle_mig_cache_miss_rate();
642 info->xbzrle_cache->overflow = xbzrle_mig_pages_overflow();
643 }
644 }
645
646 static void populate_ram_info(MigrationInfo *info, MigrationState *s)
647 {
648 info->has_ram = true;
649 info->ram = g_malloc0(sizeof(*info->ram));
650 info->ram->transferred = ram_bytes_transferred();
651 info->ram->total = ram_bytes_total();
652 info->ram->duplicate = dup_mig_pages_transferred();
653 /* legacy value. It is not used anymore */
654 info->ram->skipped = 0;
655 info->ram->normal = norm_mig_pages_transferred();
656 info->ram->normal_bytes = norm_mig_pages_transferred() *
657 qemu_target_page_size();
658 info->ram->mbps = s->mbps;
659 info->ram->dirty_sync_count = ram_dirty_sync_count();
660 info->ram->postcopy_requests = ram_postcopy_requests();
661 info->ram->page_size = qemu_target_page_size();
662
663 if (s->state != MIGRATION_STATUS_COMPLETED) {
664 info->ram->remaining = ram_bytes_remaining();
665 info->ram->dirty_pages_rate = ram_dirty_pages_rate();
666 }
667 }
668
669 MigrationInfo *qmp_query_migrate(Error **errp)
670 {
671 MigrationInfo *info = g_malloc0(sizeof(*info));
672 MigrationState *s = migrate_get_current();
673
674 switch (s->state) {
675 case MIGRATION_STATUS_NONE:
676 /* no migration has happened ever */
677 break;
678 case MIGRATION_STATUS_SETUP:
679 info->has_status = true;
680 info->has_total_time = false;
681 break;
682 case MIGRATION_STATUS_ACTIVE:
683 case MIGRATION_STATUS_CANCELLING:
684 info->has_status = true;
685 info->has_total_time = true;
686 info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME)
687 - s->total_time;
688 info->has_expected_downtime = true;
689 info->expected_downtime = s->expected_downtime;
690 info->has_setup_time = true;
691 info->setup_time = s->setup_time;
692
693 populate_ram_info(info, s);
694
695 if (blk_mig_active()) {
696 info->has_disk = true;
697 info->disk = g_malloc0(sizeof(*info->disk));
698 info->disk->transferred = blk_mig_bytes_transferred();
699 info->disk->remaining = blk_mig_bytes_remaining();
700 info->disk->total = blk_mig_bytes_total();
701 }
702
703 if (cpu_throttle_active()) {
704 info->has_cpu_throttle_percentage = true;
705 info->cpu_throttle_percentage = cpu_throttle_get_percentage();
706 }
707
708 get_xbzrle_cache_stats(info);
709 break;
710 case MIGRATION_STATUS_POSTCOPY_ACTIVE:
711 /* Mostly the same as active; TODO add some postcopy stats */
712 info->has_status = true;
713 info->has_total_time = true;
714 info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME)
715 - s->total_time;
716 info->has_expected_downtime = true;
717 info->expected_downtime = s->expected_downtime;
718 info->has_setup_time = true;
719 info->setup_time = s->setup_time;
720
721 populate_ram_info(info, s);
722
723 if (blk_mig_active()) {
724 info->has_disk = true;
725 info->disk = g_malloc0(sizeof(*info->disk));
726 info->disk->transferred = blk_mig_bytes_transferred();
727 info->disk->remaining = blk_mig_bytes_remaining();
728 info->disk->total = blk_mig_bytes_total();
729 }
730
731 get_xbzrle_cache_stats(info);
732 break;
733 case MIGRATION_STATUS_COLO:
734 info->has_status = true;
735 /* TODO: display COLO specific information (checkpoint info etc.) */
736 break;
737 case MIGRATION_STATUS_COMPLETED:
738 get_xbzrle_cache_stats(info);
739
740 info->has_status = true;
741 info->has_total_time = true;
742 info->total_time = s->total_time;
743 info->has_downtime = true;
744 info->downtime = s->downtime;
745 info->has_setup_time = true;
746 info->setup_time = s->setup_time;
747
748 populate_ram_info(info, s);
749 break;
750 case MIGRATION_STATUS_FAILED:
751 info->has_status = true;
752 if (s->error) {
753 info->has_error_desc = true;
754 info->error_desc = g_strdup(error_get_pretty(s->error));
755 }
756 break;
757 case MIGRATION_STATUS_CANCELLED:
758 info->has_status = true;
759 break;
760 }
761 info->status = s->state;
762
763 return info;
764 }
765
766 void qmp_migrate_set_capabilities(MigrationCapabilityStatusList *params,
767 Error **errp)
768 {
769 MigrationState *s = migrate_get_current();
770 MigrationCapabilityStatusList *cap;
771 bool old_postcopy_cap = migrate_postcopy_ram();
772
773 if (migration_is_setup_or_active(s->state)) {
774 error_setg(errp, QERR_MIGRATION_ACTIVE);
775 return;
776 }
777
778 for (cap = params; cap; cap = cap->next) {
779 if (cap->value->capability == MIGRATION_CAPABILITY_X_COLO) {
780 if (!colo_supported()) {
781 error_setg(errp, "COLO is not currently supported, please"
782 " configure with --enable-colo option in order to"
783 " support COLO feature");
784 continue;
785 }
786 }
787 s->enabled_capabilities[cap->value->capability] = cap->value->state;
788 }
789
790 if (migrate_postcopy_ram()) {
791 if (migrate_use_compression()) {
792 /* The decompression threads asynchronously write into RAM
793 * rather than use the atomic copies needed to avoid
794 * userfaulting. It should be possible to fix the decompression
795 * threads for compatibility in future.
796 */
797 error_report("Postcopy is not currently compatible with "
798 "compression");
799 s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM] =
800 false;
801 }
802 /* This check is reasonably expensive, so only when it's being
803 * set the first time, also it's only the destination that needs
804 * special support.
805 */
806 if (!old_postcopy_cap && runstate_check(RUN_STATE_INMIGRATE) &&
807 !postcopy_ram_supported_by_host()) {
808 /* postcopy_ram_supported_by_host will have emitted a more
809 * detailed message
810 */
811 error_report("Postcopy is not supported");
812 s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM] =
813 false;
814 }
815 }
816 }
817
818 void qmp_migrate_set_parameters(MigrationParameters *params, Error **errp)
819 {
820 MigrationState *s = migrate_get_current();
821
822 if (params->has_compress_level &&
823 (params->compress_level < 0 || params->compress_level > 9)) {
824 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "compress_level",
825 "is invalid, it should be in the range of 0 to 9");
826 return;
827 }
828 if (params->has_compress_threads &&
829 (params->compress_threads < 1 || params->compress_threads > 255)) {
830 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
831 "compress_threads",
832 "is invalid, it should be in the range of 1 to 255");
833 return;
834 }
835 if (params->has_decompress_threads &&
836 (params->decompress_threads < 1 || params->decompress_threads > 255)) {
837 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
838 "decompress_threads",
839 "is invalid, it should be in the range of 1 to 255");
840 return;
841 }
842 if (params->has_cpu_throttle_initial &&
843 (params->cpu_throttle_initial < 1 ||
844 params->cpu_throttle_initial > 99)) {
845 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
846 "cpu_throttle_initial",
847 "an integer in the range of 1 to 99");
848 return;
849 }
850 if (params->has_cpu_throttle_increment &&
851 (params->cpu_throttle_increment < 1 ||
852 params->cpu_throttle_increment > 99)) {
853 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
854 "cpu_throttle_increment",
855 "an integer in the range of 1 to 99");
856 return;
857 }
858 if (params->has_max_bandwidth &&
859 (params->max_bandwidth < 0 || params->max_bandwidth > SIZE_MAX)) {
860 error_setg(errp, "Parameter 'max_bandwidth' expects an integer in the"
861 " range of 0 to %zu bytes/second", SIZE_MAX);
862 return;
863 }
864 if (params->has_downtime_limit &&
865 (params->downtime_limit < 0 ||
866 params->downtime_limit > MAX_MIGRATE_DOWNTIME)) {
867 error_setg(errp, "Parameter 'downtime_limit' expects an integer in "
868 "the range of 0 to %d milliseconds",
869 MAX_MIGRATE_DOWNTIME);
870 return;
871 }
872 if (params->has_x_checkpoint_delay && (params->x_checkpoint_delay < 0)) {
873 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
874 "x_checkpoint_delay",
875 "is invalid, it should be positive");
876 }
877
878 if (params->has_compress_level) {
879 s->parameters.compress_level = params->compress_level;
880 }
881 if (params->has_compress_threads) {
882 s->parameters.compress_threads = params->compress_threads;
883 }
884 if (params->has_decompress_threads) {
885 s->parameters.decompress_threads = params->decompress_threads;
886 }
887 if (params->has_cpu_throttle_initial) {
888 s->parameters.cpu_throttle_initial = params->cpu_throttle_initial;
889 }
890 if (params->has_cpu_throttle_increment) {
891 s->parameters.cpu_throttle_increment = params->cpu_throttle_increment;
892 }
893 if (params->has_tls_creds) {
894 g_free(s->parameters.tls_creds);
895 s->parameters.tls_creds = g_strdup(params->tls_creds);
896 }
897 if (params->has_tls_hostname) {
898 g_free(s->parameters.tls_hostname);
899 s->parameters.tls_hostname = g_strdup(params->tls_hostname);
900 }
901 if (params->has_max_bandwidth) {
902 s->parameters.max_bandwidth = params->max_bandwidth;
903 if (s->to_dst_file) {
904 qemu_file_set_rate_limit(s->to_dst_file,
905 s->parameters.max_bandwidth / XFER_LIMIT_RATIO);
906 }
907 }
908 if (params->has_downtime_limit) {
909 s->parameters.downtime_limit = params->downtime_limit;
910 }
911
912 if (params->has_x_checkpoint_delay) {
913 s->parameters.x_checkpoint_delay = params->x_checkpoint_delay;
914 if (migration_in_colo_state()) {
915 colo_checkpoint_notify(s);
916 }
917 }
918 }
919
920
921 void qmp_migrate_start_postcopy(Error **errp)
922 {
923 MigrationState *s = migrate_get_current();
924
925 if (!migrate_postcopy_ram()) {
926 error_setg(errp, "Enable postcopy with migrate_set_capability before"
927 " the start of migration");
928 return;
929 }
930
931 if (s->state == MIGRATION_STATUS_NONE) {
932 error_setg(errp, "Postcopy must be started after migration has been"
933 " started");
934 return;
935 }
936 /*
937 * we don't error if migration has finished since that would be racy
938 * with issuing this command.
939 */
940 atomic_set(&s->start_postcopy, true);
941 }
942
943 /* shared migration helpers */
944
945 void migrate_set_state(int *state, int old_state, int new_state)
946 {
947 if (atomic_cmpxchg(state, old_state, new_state) == old_state) {
948 trace_migrate_set_state(new_state);
949 migrate_generate_event(new_state);
950 }
951 }
952
953 static void migrate_fd_cleanup(void *opaque)
954 {
955 MigrationState *s = opaque;
956
957 qemu_bh_delete(s->cleanup_bh);
958 s->cleanup_bh = NULL;
959
960 migration_page_queue_free();
961
962 if (s->to_dst_file) {
963 trace_migrate_fd_cleanup();
964 qemu_mutex_unlock_iothread();
965 if (s->migration_thread_running) {
966 qemu_thread_join(&s->thread);
967 s->migration_thread_running = false;
968 }
969 qemu_mutex_lock_iothread();
970
971 migrate_compress_threads_join();
972 qemu_fclose(s->to_dst_file);
973 s->to_dst_file = NULL;
974 }
975
976 assert((s->state != MIGRATION_STATUS_ACTIVE) &&
977 (s->state != MIGRATION_STATUS_POSTCOPY_ACTIVE));
978
979 if (s->state == MIGRATION_STATUS_CANCELLING) {
980 migrate_set_state(&s->state, MIGRATION_STATUS_CANCELLING,
981 MIGRATION_STATUS_CANCELLED);
982 }
983
984 notifier_list_notify(&migration_state_notifiers, s);
985 }
986
987 void migrate_fd_error(MigrationState *s, const Error *error)
988 {
989 trace_migrate_fd_error(error_get_pretty(error));
990 assert(s->to_dst_file == NULL);
991 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
992 MIGRATION_STATUS_FAILED);
993 if (!s->error) {
994 s->error = error_copy(error);
995 }
996 notifier_list_notify(&migration_state_notifiers, s);
997 }
998
999 static void migrate_fd_cancel(MigrationState *s)
1000 {
1001 int old_state ;
1002 QEMUFile *f = migrate_get_current()->to_dst_file;
1003 trace_migrate_fd_cancel();
1004
1005 if (s->rp_state.from_dst_file) {
1006 /* shutdown the rp socket, so causing the rp thread to shutdown */
1007 qemu_file_shutdown(s->rp_state.from_dst_file);
1008 }
1009
1010 do {
1011 old_state = s->state;
1012 if (!migration_is_setup_or_active(old_state)) {
1013 break;
1014 }
1015 migrate_set_state(&s->state, old_state, MIGRATION_STATUS_CANCELLING);
1016 } while (s->state != MIGRATION_STATUS_CANCELLING);
1017
1018 /*
1019 * If we're unlucky the migration code might be stuck somewhere in a
1020 * send/write while the network has failed and is waiting to timeout;
1021 * if we've got shutdown(2) available then we can force it to quit.
1022 * The outgoing qemu file gets closed in migrate_fd_cleanup that is
1023 * called in a bh, so there is no race against this cancel.
1024 */
1025 if (s->state == MIGRATION_STATUS_CANCELLING && f) {
1026 qemu_file_shutdown(f);
1027 }
1028 if (s->state == MIGRATION_STATUS_CANCELLING && s->block_inactive) {
1029 Error *local_err = NULL;
1030
1031 bdrv_invalidate_cache_all(&local_err);
1032 if (local_err) {
1033 error_report_err(local_err);
1034 } else {
1035 s->block_inactive = false;
1036 }
1037 }
1038 }
1039
1040 void add_migration_state_change_notifier(Notifier *notify)
1041 {
1042 notifier_list_add(&migration_state_notifiers, notify);
1043 }
1044
1045 void remove_migration_state_change_notifier(Notifier *notify)
1046 {
1047 notifier_remove(notify);
1048 }
1049
1050 bool migration_in_setup(MigrationState *s)
1051 {
1052 return s->state == MIGRATION_STATUS_SETUP;
1053 }
1054
1055 bool migration_has_finished(MigrationState *s)
1056 {
1057 return s->state == MIGRATION_STATUS_COMPLETED;
1058 }
1059
1060 bool migration_has_failed(MigrationState *s)
1061 {
1062 return (s->state == MIGRATION_STATUS_CANCELLED ||
1063 s->state == MIGRATION_STATUS_FAILED);
1064 }
1065
1066 bool migration_in_postcopy(void)
1067 {
1068 MigrationState *s = migrate_get_current();
1069
1070 return (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE);
1071 }
1072
1073 bool migration_in_postcopy_after_devices(MigrationState *s)
1074 {
1075 return migration_in_postcopy() && s->postcopy_after_devices;
1076 }
1077
1078 bool migration_is_idle(void)
1079 {
1080 MigrationState *s = migrate_get_current();
1081
1082 switch (s->state) {
1083 case MIGRATION_STATUS_NONE:
1084 case MIGRATION_STATUS_CANCELLED:
1085 case MIGRATION_STATUS_COMPLETED:
1086 case MIGRATION_STATUS_FAILED:
1087 return true;
1088 case MIGRATION_STATUS_SETUP:
1089 case MIGRATION_STATUS_CANCELLING:
1090 case MIGRATION_STATUS_ACTIVE:
1091 case MIGRATION_STATUS_POSTCOPY_ACTIVE:
1092 case MIGRATION_STATUS_COLO:
1093 return false;
1094 case MIGRATION_STATUS__MAX:
1095 g_assert_not_reached();
1096 }
1097
1098 return false;
1099 }
1100
1101 MigrationState *migrate_init(const MigrationParams *params)
1102 {
1103 MigrationState *s = migrate_get_current();
1104
1105 /*
1106 * Reinitialise all migration state, except
1107 * parameters/capabilities that the user set, and
1108 * locks.
1109 */
1110 s->bytes_xfer = 0;
1111 s->xfer_limit = 0;
1112 s->cleanup_bh = 0;
1113 s->to_dst_file = NULL;
1114 s->state = MIGRATION_STATUS_NONE;
1115 s->params = *params;
1116 s->rp_state.from_dst_file = NULL;
1117 s->rp_state.error = false;
1118 s->mbps = 0.0;
1119 s->downtime = 0;
1120 s->expected_downtime = 0;
1121 s->setup_time = 0;
1122 s->start_postcopy = false;
1123 s->postcopy_after_devices = false;
1124 s->migration_thread_running = false;
1125 error_free(s->error);
1126 s->error = NULL;
1127
1128 migrate_set_state(&s->state, MIGRATION_STATUS_NONE, MIGRATION_STATUS_SETUP);
1129
1130 s->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1131 return s;
1132 }
1133
1134 static GSList *migration_blockers;
1135
1136 int migrate_add_blocker(Error *reason, Error **errp)
1137 {
1138 if (only_migratable) {
1139 error_propagate(errp, error_copy(reason));
1140 error_prepend(errp, "disallowing migration blocker "
1141 "(--only_migratable) for: ");
1142 return -EACCES;
1143 }
1144
1145 if (migration_is_idle()) {
1146 migration_blockers = g_slist_prepend(migration_blockers, reason);
1147 return 0;
1148 }
1149
1150 error_propagate(errp, error_copy(reason));
1151 error_prepend(errp, "disallowing migration blocker (migration in "
1152 "progress) for: ");
1153 return -EBUSY;
1154 }
1155
1156 void migrate_del_blocker(Error *reason)
1157 {
1158 migration_blockers = g_slist_remove(migration_blockers, reason);
1159 }
1160
1161 int check_migratable(Object *obj, Error **err)
1162 {
1163 DeviceClass *dc = DEVICE_GET_CLASS(obj);
1164 if (only_migratable && dc->vmsd) {
1165 if (dc->vmsd->unmigratable) {
1166 error_setg(err, "Device %s is not migratable, but "
1167 "--only-migratable was specified",
1168 object_get_typename(obj));
1169 return -1;
1170 }
1171 }
1172
1173 return 0;
1174 }
1175
1176 void qmp_migrate_incoming(const char *uri, Error **errp)
1177 {
1178 Error *local_err = NULL;
1179 static bool once = true;
1180
1181 if (!deferred_incoming) {
1182 error_setg(errp, "For use with '-incoming defer'");
1183 return;
1184 }
1185 if (!once) {
1186 error_setg(errp, "The incoming migration has already been started");
1187 }
1188
1189 qemu_start_incoming_migration(uri, &local_err);
1190
1191 if (local_err) {
1192 error_propagate(errp, local_err);
1193 return;
1194 }
1195
1196 once = false;
1197 }
1198
1199 bool migration_is_blocked(Error **errp)
1200 {
1201 if (qemu_savevm_state_blocked(errp)) {
1202 return true;
1203 }
1204
1205 if (migration_blockers) {
1206 *errp = error_copy(migration_blockers->data);
1207 return true;
1208 }
1209
1210 return false;
1211 }
1212
1213 void qmp_migrate(const char *uri, bool has_blk, bool blk,
1214 bool has_inc, bool inc, bool has_detach, bool detach,
1215 Error **errp)
1216 {
1217 Error *local_err = NULL;
1218 MigrationState *s = migrate_get_current();
1219 MigrationParams params;
1220 const char *p;
1221
1222 params.blk = has_blk && blk;
1223 params.shared = has_inc && inc;
1224
1225 if (migration_is_setup_or_active(s->state) ||
1226 s->state == MIGRATION_STATUS_CANCELLING ||
1227 s->state == MIGRATION_STATUS_COLO) {
1228 error_setg(errp, QERR_MIGRATION_ACTIVE);
1229 return;
1230 }
1231 if (runstate_check(RUN_STATE_INMIGRATE)) {
1232 error_setg(errp, "Guest is waiting for an incoming migration");
1233 return;
1234 }
1235
1236 if (migration_is_blocked(errp)) {
1237 return;
1238 }
1239
1240 s = migrate_init(&params);
1241
1242 if (strstart(uri, "tcp:", &p)) {
1243 tcp_start_outgoing_migration(s, p, &local_err);
1244 #ifdef CONFIG_RDMA
1245 } else if (strstart(uri, "rdma:", &p)) {
1246 rdma_start_outgoing_migration(s, p, &local_err);
1247 #endif
1248 } else if (strstart(uri, "exec:", &p)) {
1249 exec_start_outgoing_migration(s, p, &local_err);
1250 } else if (strstart(uri, "unix:", &p)) {
1251 unix_start_outgoing_migration(s, p, &local_err);
1252 } else if (strstart(uri, "fd:", &p)) {
1253 fd_start_outgoing_migration(s, p, &local_err);
1254 } else {
1255 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "uri",
1256 "a valid migration protocol");
1257 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
1258 MIGRATION_STATUS_FAILED);
1259 return;
1260 }
1261
1262 if (local_err) {
1263 migrate_fd_error(s, local_err);
1264 error_propagate(errp, local_err);
1265 return;
1266 }
1267 }
1268
1269 void qmp_migrate_cancel(Error **errp)
1270 {
1271 migrate_fd_cancel(migrate_get_current());
1272 }
1273
1274 void qmp_migrate_set_cache_size(int64_t value, Error **errp)
1275 {
1276 MigrationState *s = migrate_get_current();
1277 int64_t new_size;
1278
1279 /* Check for truncation */
1280 if (value != (size_t)value) {
1281 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
1282 "exceeding address space");
1283 return;
1284 }
1285
1286 /* Cache should not be larger than guest ram size */
1287 if (value > ram_bytes_total()) {
1288 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
1289 "exceeds guest ram size ");
1290 return;
1291 }
1292
1293 new_size = xbzrle_cache_resize(value);
1294 if (new_size < 0) {
1295 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
1296 "is smaller than page size");
1297 return;
1298 }
1299
1300 s->xbzrle_cache_size = new_size;
1301 }
1302
1303 int64_t qmp_query_migrate_cache_size(Error **errp)
1304 {
1305 return migrate_xbzrle_cache_size();
1306 }
1307
1308 void qmp_migrate_set_speed(int64_t value, Error **errp)
1309 {
1310 MigrationParameters p = {
1311 .has_max_bandwidth = true,
1312 .max_bandwidth = value,
1313 };
1314
1315 qmp_migrate_set_parameters(&p, errp);
1316 }
1317
1318 void qmp_migrate_set_downtime(double value, Error **errp)
1319 {
1320 if (value < 0 || value > MAX_MIGRATE_DOWNTIME_SECONDS) {
1321 error_setg(errp, "Parameter 'downtime_limit' expects an integer in "
1322 "the range of 0 to %d seconds",
1323 MAX_MIGRATE_DOWNTIME_SECONDS);
1324 return;
1325 }
1326
1327 value *= 1000; /* Convert to milliseconds */
1328 value = MAX(0, MIN(INT64_MAX, value));
1329
1330 MigrationParameters p = {
1331 .has_downtime_limit = true,
1332 .downtime_limit = value,
1333 };
1334
1335 qmp_migrate_set_parameters(&p, errp);
1336 }
1337
1338 bool migrate_release_ram(void)
1339 {
1340 MigrationState *s;
1341
1342 s = migrate_get_current();
1343
1344 return s->enabled_capabilities[MIGRATION_CAPABILITY_RELEASE_RAM];
1345 }
1346
1347 bool migrate_postcopy_ram(void)
1348 {
1349 MigrationState *s;
1350
1351 s = migrate_get_current();
1352
1353 return s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM];
1354 }
1355
1356 bool migrate_auto_converge(void)
1357 {
1358 MigrationState *s;
1359
1360 s = migrate_get_current();
1361
1362 return s->enabled_capabilities[MIGRATION_CAPABILITY_AUTO_CONVERGE];
1363 }
1364
1365 bool migrate_zero_blocks(void)
1366 {
1367 MigrationState *s;
1368
1369 s = migrate_get_current();
1370
1371 return s->enabled_capabilities[MIGRATION_CAPABILITY_ZERO_BLOCKS];
1372 }
1373
1374 bool migrate_use_compression(void)
1375 {
1376 MigrationState *s;
1377
1378 s = migrate_get_current();
1379
1380 return s->enabled_capabilities[MIGRATION_CAPABILITY_COMPRESS];
1381 }
1382
1383 int migrate_compress_level(void)
1384 {
1385 MigrationState *s;
1386
1387 s = migrate_get_current();
1388
1389 return s->parameters.compress_level;
1390 }
1391
1392 int migrate_compress_threads(void)
1393 {
1394 MigrationState *s;
1395
1396 s = migrate_get_current();
1397
1398 return s->parameters.compress_threads;
1399 }
1400
1401 int migrate_decompress_threads(void)
1402 {
1403 MigrationState *s;
1404
1405 s = migrate_get_current();
1406
1407 return s->parameters.decompress_threads;
1408 }
1409
1410 bool migrate_use_events(void)
1411 {
1412 MigrationState *s;
1413
1414 s = migrate_get_current();
1415
1416 return s->enabled_capabilities[MIGRATION_CAPABILITY_EVENTS];
1417 }
1418
1419 int migrate_use_xbzrle(void)
1420 {
1421 MigrationState *s;
1422
1423 s = migrate_get_current();
1424
1425 return s->enabled_capabilities[MIGRATION_CAPABILITY_XBZRLE];
1426 }
1427
1428 int64_t migrate_xbzrle_cache_size(void)
1429 {
1430 MigrationState *s;
1431
1432 s = migrate_get_current();
1433
1434 return s->xbzrle_cache_size;
1435 }
1436
1437 /* migration thread support */
1438 /*
1439 * Something bad happened to the RP stream, mark an error
1440 * The caller shall print or trace something to indicate why
1441 */
1442 static void mark_source_rp_bad(MigrationState *s)
1443 {
1444 s->rp_state.error = true;
1445 }
1446
1447 static struct rp_cmd_args {
1448 ssize_t len; /* -1 = variable */
1449 const char *name;
1450 } rp_cmd_args[] = {
1451 [MIG_RP_MSG_INVALID] = { .len = -1, .name = "INVALID" },
1452 [MIG_RP_MSG_SHUT] = { .len = 4, .name = "SHUT" },
1453 [MIG_RP_MSG_PONG] = { .len = 4, .name = "PONG" },
1454 [MIG_RP_MSG_REQ_PAGES] = { .len = 12, .name = "REQ_PAGES" },
1455 [MIG_RP_MSG_REQ_PAGES_ID] = { .len = -1, .name = "REQ_PAGES_ID" },
1456 [MIG_RP_MSG_MAX] = { .len = -1, .name = "MAX" },
1457 };
1458
1459 /*
1460 * Process a request for pages received on the return path,
1461 * We're allowed to send more than requested (e.g. to round to our page size)
1462 * and we don't need to send pages that have already been sent.
1463 */
1464 static void migrate_handle_rp_req_pages(MigrationState *ms, const char* rbname,
1465 ram_addr_t start, size_t len)
1466 {
1467 long our_host_ps = getpagesize();
1468
1469 trace_migrate_handle_rp_req_pages(rbname, start, len);
1470
1471 /*
1472 * Since we currently insist on matching page sizes, just sanity check
1473 * we're being asked for whole host pages.
1474 */
1475 if (start & (our_host_ps-1) ||
1476 (len & (our_host_ps-1))) {
1477 error_report("%s: Misaligned page request, start: " RAM_ADDR_FMT
1478 " len: %zd", __func__, start, len);
1479 mark_source_rp_bad(ms);
1480 return;
1481 }
1482
1483 if (ram_save_queue_pages(rbname, start, len)) {
1484 mark_source_rp_bad(ms);
1485 }
1486 }
1487
1488 /*
1489 * Handles messages sent on the return path towards the source VM
1490 *
1491 */
1492 static void *source_return_path_thread(void *opaque)
1493 {
1494 MigrationState *ms = opaque;
1495 QEMUFile *rp = ms->rp_state.from_dst_file;
1496 uint16_t header_len, header_type;
1497 uint8_t buf[512];
1498 uint32_t tmp32, sibling_error;
1499 ram_addr_t start = 0; /* =0 to silence warning */
1500 size_t len = 0, expected_len;
1501 int res;
1502
1503 trace_source_return_path_thread_entry();
1504 while (!ms->rp_state.error && !qemu_file_get_error(rp) &&
1505 migration_is_setup_or_active(ms->state)) {
1506 trace_source_return_path_thread_loop_top();
1507 header_type = qemu_get_be16(rp);
1508 header_len = qemu_get_be16(rp);
1509
1510 if (header_type >= MIG_RP_MSG_MAX ||
1511 header_type == MIG_RP_MSG_INVALID) {
1512 error_report("RP: Received invalid message 0x%04x length 0x%04x",
1513 header_type, header_len);
1514 mark_source_rp_bad(ms);
1515 goto out;
1516 }
1517
1518 if ((rp_cmd_args[header_type].len != -1 &&
1519 header_len != rp_cmd_args[header_type].len) ||
1520 header_len > sizeof(buf)) {
1521 error_report("RP: Received '%s' message (0x%04x) with"
1522 "incorrect length %d expecting %zu",
1523 rp_cmd_args[header_type].name, header_type, header_len,
1524 (size_t)rp_cmd_args[header_type].len);
1525 mark_source_rp_bad(ms);
1526 goto out;
1527 }
1528
1529 /* We know we've got a valid header by this point */
1530 res = qemu_get_buffer(rp, buf, header_len);
1531 if (res != header_len) {
1532 error_report("RP: Failed reading data for message 0x%04x"
1533 " read %d expected %d",
1534 header_type, res, header_len);
1535 mark_source_rp_bad(ms);
1536 goto out;
1537 }
1538
1539 /* OK, we have the message and the data */
1540 switch (header_type) {
1541 case MIG_RP_MSG_SHUT:
1542 sibling_error = ldl_be_p(buf);
1543 trace_source_return_path_thread_shut(sibling_error);
1544 if (sibling_error) {
1545 error_report("RP: Sibling indicated error %d", sibling_error);
1546 mark_source_rp_bad(ms);
1547 }
1548 /*
1549 * We'll let the main thread deal with closing the RP
1550 * we could do a shutdown(2) on it, but we're the only user
1551 * anyway, so there's nothing gained.
1552 */
1553 goto out;
1554
1555 case MIG_RP_MSG_PONG:
1556 tmp32 = ldl_be_p(buf);
1557 trace_source_return_path_thread_pong(tmp32);
1558 break;
1559
1560 case MIG_RP_MSG_REQ_PAGES:
1561 start = ldq_be_p(buf);
1562 len = ldl_be_p(buf + 8);
1563 migrate_handle_rp_req_pages(ms, NULL, start, len);
1564 break;
1565
1566 case MIG_RP_MSG_REQ_PAGES_ID:
1567 expected_len = 12 + 1; /* header + termination */
1568
1569 if (header_len >= expected_len) {
1570 start = ldq_be_p(buf);
1571 len = ldl_be_p(buf + 8);
1572 /* Now we expect an idstr */
1573 tmp32 = buf[12]; /* Length of the following idstr */
1574 buf[13 + tmp32] = '\0';
1575 expected_len += tmp32;
1576 }
1577 if (header_len != expected_len) {
1578 error_report("RP: Req_Page_id with length %d expecting %zd",
1579 header_len, expected_len);
1580 mark_source_rp_bad(ms);
1581 goto out;
1582 }
1583 migrate_handle_rp_req_pages(ms, (char *)&buf[13], start, len);
1584 break;
1585
1586 default:
1587 break;
1588 }
1589 }
1590 if (qemu_file_get_error(rp)) {
1591 trace_source_return_path_thread_bad_end();
1592 mark_source_rp_bad(ms);
1593 }
1594
1595 trace_source_return_path_thread_end();
1596 out:
1597 ms->rp_state.from_dst_file = NULL;
1598 qemu_fclose(rp);
1599 return NULL;
1600 }
1601
1602 static int open_return_path_on_source(MigrationState *ms)
1603 {
1604
1605 ms->rp_state.from_dst_file = qemu_file_get_return_path(ms->to_dst_file);
1606 if (!ms->rp_state.from_dst_file) {
1607 return -1;
1608 }
1609
1610 trace_open_return_path_on_source();
1611 qemu_thread_create(&ms->rp_state.rp_thread, "return path",
1612 source_return_path_thread, ms, QEMU_THREAD_JOINABLE);
1613
1614 trace_open_return_path_on_source_continue();
1615
1616 return 0;
1617 }
1618
1619 /* Returns 0 if the RP was ok, otherwise there was an error on the RP */
1620 static int await_return_path_close_on_source(MigrationState *ms)
1621 {
1622 /*
1623 * If this is a normal exit then the destination will send a SHUT and the
1624 * rp_thread will exit, however if there's an error we need to cause
1625 * it to exit.
1626 */
1627 if (qemu_file_get_error(ms->to_dst_file) && ms->rp_state.from_dst_file) {
1628 /*
1629 * shutdown(2), if we have it, will cause it to unblock if it's stuck
1630 * waiting for the destination.
1631 */
1632 qemu_file_shutdown(ms->rp_state.from_dst_file);
1633 mark_source_rp_bad(ms);
1634 }
1635 trace_await_return_path_close_on_source_joining();
1636 qemu_thread_join(&ms->rp_state.rp_thread);
1637 trace_await_return_path_close_on_source_close();
1638 return ms->rp_state.error;
1639 }
1640
1641 /*
1642 * Switch from normal iteration to postcopy
1643 * Returns non-0 on error
1644 */
1645 static int postcopy_start(MigrationState *ms, bool *old_vm_running)
1646 {
1647 int ret;
1648 QIOChannelBuffer *bioc;
1649 QEMUFile *fb;
1650 int64_t time_at_stop = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1651 bool restart_block = false;
1652 migrate_set_state(&ms->state, MIGRATION_STATUS_ACTIVE,
1653 MIGRATION_STATUS_POSTCOPY_ACTIVE);
1654
1655 trace_postcopy_start();
1656 qemu_mutex_lock_iothread();
1657 trace_postcopy_start_set_run();
1658
1659 qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER);
1660 *old_vm_running = runstate_is_running();
1661 global_state_store();
1662 ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
1663 if (ret < 0) {
1664 goto fail;
1665 }
1666
1667 ret = bdrv_inactivate_all();
1668 if (ret < 0) {
1669 goto fail;
1670 }
1671 restart_block = true;
1672
1673 /*
1674 * Cause any non-postcopiable, but iterative devices to
1675 * send out their final data.
1676 */
1677 qemu_savevm_state_complete_precopy(ms->to_dst_file, true);
1678
1679 /*
1680 * in Finish migrate and with the io-lock held everything should
1681 * be quiet, but we've potentially still got dirty pages and we
1682 * need to tell the destination to throw any pages it's already received
1683 * that are dirty
1684 */
1685 if (ram_postcopy_send_discard_bitmap(ms)) {
1686 error_report("postcopy send discard bitmap failed");
1687 goto fail;
1688 }
1689
1690 /*
1691 * send rest of state - note things that are doing postcopy
1692 * will notice we're in POSTCOPY_ACTIVE and not actually
1693 * wrap their state up here
1694 */
1695 qemu_file_set_rate_limit(ms->to_dst_file, INT64_MAX);
1696 /* Ping just for debugging, helps line traces up */
1697 qemu_savevm_send_ping(ms->to_dst_file, 2);
1698
1699 /*
1700 * While loading the device state we may trigger page transfer
1701 * requests and the fd must be free to process those, and thus
1702 * the destination must read the whole device state off the fd before
1703 * it starts processing it. Unfortunately the ad-hoc migration format
1704 * doesn't allow the destination to know the size to read without fully
1705 * parsing it through each devices load-state code (especially the open
1706 * coded devices that use get/put).
1707 * So we wrap the device state up in a package with a length at the start;
1708 * to do this we use a qemu_buf to hold the whole of the device state.
1709 */
1710 bioc = qio_channel_buffer_new(4096);
1711 qio_channel_set_name(QIO_CHANNEL(bioc), "migration-postcopy-buffer");
1712 fb = qemu_fopen_channel_output(QIO_CHANNEL(bioc));
1713 object_unref(OBJECT(bioc));
1714
1715 /*
1716 * Make sure the receiver can get incoming pages before we send the rest
1717 * of the state
1718 */
1719 qemu_savevm_send_postcopy_listen(fb);
1720
1721 qemu_savevm_state_complete_precopy(fb, false);
1722 qemu_savevm_send_ping(fb, 3);
1723
1724 qemu_savevm_send_postcopy_run(fb);
1725
1726 /* <><> end of stuff going into the package */
1727
1728 /* Last point of recovery; as soon as we send the package the destination
1729 * can open devices and potentially start running.
1730 * Lets just check again we've not got any errors.
1731 */
1732 ret = qemu_file_get_error(ms->to_dst_file);
1733 if (ret) {
1734 error_report("postcopy_start: Migration stream errored (pre package)");
1735 goto fail_closefb;
1736 }
1737
1738 restart_block = false;
1739
1740 /* Now send that blob */
1741 if (qemu_savevm_send_packaged(ms->to_dst_file, bioc->data, bioc->usage)) {
1742 goto fail_closefb;
1743 }
1744 qemu_fclose(fb);
1745
1746 /* Send a notify to give a chance for anything that needs to happen
1747 * at the transition to postcopy and after the device state; in particular
1748 * spice needs to trigger a transition now
1749 */
1750 ms->postcopy_after_devices = true;
1751 notifier_list_notify(&migration_state_notifiers, ms);
1752
1753 ms->downtime = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - time_at_stop;
1754
1755 qemu_mutex_unlock_iothread();
1756
1757 /*
1758 * Although this ping is just for debug, it could potentially be
1759 * used for getting a better measurement of downtime at the source.
1760 */
1761 qemu_savevm_send_ping(ms->to_dst_file, 4);
1762
1763 if (migrate_release_ram()) {
1764 ram_postcopy_migrated_memory_release(ms);
1765 }
1766
1767 ret = qemu_file_get_error(ms->to_dst_file);
1768 if (ret) {
1769 error_report("postcopy_start: Migration stream errored");
1770 migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
1771 MIGRATION_STATUS_FAILED);
1772 }
1773
1774 return ret;
1775
1776 fail_closefb:
1777 qemu_fclose(fb);
1778 fail:
1779 migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
1780 MIGRATION_STATUS_FAILED);
1781 if (restart_block) {
1782 /* A failure happened early enough that we know the destination hasn't
1783 * accessed block devices, so we're safe to recover.
1784 */
1785 Error *local_err = NULL;
1786
1787 bdrv_invalidate_cache_all(&local_err);
1788 if (local_err) {
1789 error_report_err(local_err);
1790 }
1791 }
1792 qemu_mutex_unlock_iothread();
1793 return -1;
1794 }
1795
1796 /**
1797 * migration_completion: Used by migration_thread when there's not much left.
1798 * The caller 'breaks' the loop when this returns.
1799 *
1800 * @s: Current migration state
1801 * @current_active_state: The migration state we expect to be in
1802 * @*old_vm_running: Pointer to old_vm_running flag
1803 * @*start_time: Pointer to time to update
1804 */
1805 static void migration_completion(MigrationState *s, int current_active_state,
1806 bool *old_vm_running,
1807 int64_t *start_time)
1808 {
1809 int ret;
1810
1811 if (s->state == MIGRATION_STATUS_ACTIVE) {
1812 qemu_mutex_lock_iothread();
1813 *start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1814 qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER);
1815 *old_vm_running = runstate_is_running();
1816 ret = global_state_store();
1817
1818 if (!ret) {
1819 ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
1820 /*
1821 * Don't mark the image with BDRV_O_INACTIVE flag if
1822 * we will go into COLO stage later.
1823 */
1824 if (ret >= 0 && !migrate_colo_enabled()) {
1825 ret = bdrv_inactivate_all();
1826 }
1827 if (ret >= 0) {
1828 qemu_file_set_rate_limit(s->to_dst_file, INT64_MAX);
1829 qemu_savevm_state_complete_precopy(s->to_dst_file, false);
1830 s->block_inactive = true;
1831 }
1832 }
1833 qemu_mutex_unlock_iothread();
1834
1835 if (ret < 0) {
1836 goto fail;
1837 }
1838 } else if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
1839 trace_migration_completion_postcopy_end();
1840
1841 qemu_savevm_state_complete_postcopy(s->to_dst_file);
1842 trace_migration_completion_postcopy_end_after_complete();
1843 }
1844
1845 /*
1846 * If rp was opened we must clean up the thread before
1847 * cleaning everything else up (since if there are no failures
1848 * it will wait for the destination to send it's status in
1849 * a SHUT command).
1850 * Postcopy opens rp if enabled (even if it's not avtivated)
1851 */
1852 if (migrate_postcopy_ram()) {
1853 int rp_error;
1854 trace_migration_completion_postcopy_end_before_rp();
1855 rp_error = await_return_path_close_on_source(s);
1856 trace_migration_completion_postcopy_end_after_rp(rp_error);
1857 if (rp_error) {
1858 goto fail_invalidate;
1859 }
1860 }
1861
1862 if (qemu_file_get_error(s->to_dst_file)) {
1863 trace_migration_completion_file_err();
1864 goto fail_invalidate;
1865 }
1866
1867 if (!migrate_colo_enabled()) {
1868 migrate_set_state(&s->state, current_active_state,
1869 MIGRATION_STATUS_COMPLETED);
1870 }
1871
1872 return;
1873
1874 fail_invalidate:
1875 /* If not doing postcopy, vm_start() will be called: let's regain
1876 * control on images.
1877 */
1878 if (s->state == MIGRATION_STATUS_ACTIVE) {
1879 Error *local_err = NULL;
1880
1881 qemu_mutex_lock_iothread();
1882 bdrv_invalidate_cache_all(&local_err);
1883 if (local_err) {
1884 error_report_err(local_err);
1885 } else {
1886 s->block_inactive = false;
1887 }
1888 qemu_mutex_unlock_iothread();
1889 }
1890
1891 fail:
1892 migrate_set_state(&s->state, current_active_state,
1893 MIGRATION_STATUS_FAILED);
1894 }
1895
1896 bool migrate_colo_enabled(void)
1897 {
1898 MigrationState *s = migrate_get_current();
1899 return s->enabled_capabilities[MIGRATION_CAPABILITY_X_COLO];
1900 }
1901
1902 /*
1903 * Master migration thread on the source VM.
1904 * It drives the migration and pumps the data down the outgoing channel.
1905 */
1906 static void *migration_thread(void *opaque)
1907 {
1908 MigrationState *s = opaque;
1909 /* Used by the bandwidth calcs, updated later */
1910 int64_t initial_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1911 int64_t setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST);
1912 int64_t initial_bytes = 0;
1913 /*
1914 * The final stage happens when the remaining data is smaller than
1915 * this threshold; it's calculated from the requested downtime and
1916 * measured bandwidth
1917 */
1918 int64_t threshold_size = 0;
1919 int64_t start_time = initial_time;
1920 int64_t end_time;
1921 bool old_vm_running = false;
1922 bool entered_postcopy = false;
1923 /* The active state we expect to be in; ACTIVE or POSTCOPY_ACTIVE */
1924 enum MigrationStatus current_active_state = MIGRATION_STATUS_ACTIVE;
1925 bool enable_colo = migrate_colo_enabled();
1926
1927 rcu_register_thread();
1928
1929 qemu_savevm_state_header(s->to_dst_file);
1930
1931 if (migrate_postcopy_ram()) {
1932 /* Now tell the dest that it should open its end so it can reply */
1933 qemu_savevm_send_open_return_path(s->to_dst_file);
1934
1935 /* And do a ping that will make stuff easier to debug */
1936 qemu_savevm_send_ping(s->to_dst_file, 1);
1937
1938 /*
1939 * Tell the destination that we *might* want to do postcopy later;
1940 * if the other end can't do postcopy it should fail now, nice and
1941 * early.
1942 */
1943 qemu_savevm_send_postcopy_advise(s->to_dst_file);
1944 }
1945
1946 qemu_savevm_state_begin(s->to_dst_file, &s->params);
1947
1948 s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start;
1949 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
1950 MIGRATION_STATUS_ACTIVE);
1951
1952 trace_migration_thread_setup_complete();
1953
1954 while (s->state == MIGRATION_STATUS_ACTIVE ||
1955 s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
1956 int64_t current_time;
1957 uint64_t pending_size;
1958
1959 if (!qemu_file_rate_limit(s->to_dst_file)) {
1960 uint64_t pend_post, pend_nonpost;
1961
1962 qemu_savevm_state_pending(s->to_dst_file, threshold_size,
1963 &pend_nonpost, &pend_post);
1964 pending_size = pend_nonpost + pend_post;
1965 trace_migrate_pending(pending_size, threshold_size,
1966 pend_post, pend_nonpost);
1967 if (pending_size && pending_size >= threshold_size) {
1968 /* Still a significant amount to transfer */
1969
1970 if (migrate_postcopy_ram() &&
1971 s->state != MIGRATION_STATUS_POSTCOPY_ACTIVE &&
1972 pend_nonpost <= threshold_size &&
1973 atomic_read(&s->start_postcopy)) {
1974
1975 if (!postcopy_start(s, &old_vm_running)) {
1976 current_active_state = MIGRATION_STATUS_POSTCOPY_ACTIVE;
1977 entered_postcopy = true;
1978 }
1979
1980 continue;
1981 }
1982 /* Just another iteration step */
1983 qemu_savevm_state_iterate(s->to_dst_file, entered_postcopy);
1984 } else {
1985 trace_migration_thread_low_pending(pending_size);
1986 migration_completion(s, current_active_state,
1987 &old_vm_running, &start_time);
1988 break;
1989 }
1990 }
1991
1992 if (qemu_file_get_error(s->to_dst_file)) {
1993 migrate_set_state(&s->state, current_active_state,
1994 MIGRATION_STATUS_FAILED);
1995 trace_migration_thread_file_err();
1996 break;
1997 }
1998 current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1999 if (current_time >= initial_time + BUFFER_DELAY) {
2000 uint64_t transferred_bytes = qemu_ftell(s->to_dst_file) -
2001 initial_bytes;
2002 uint64_t time_spent = current_time - initial_time;
2003 double bandwidth = (double)transferred_bytes / time_spent;
2004 threshold_size = bandwidth * s->parameters.downtime_limit;
2005
2006 s->mbps = (((double) transferred_bytes * 8.0) /
2007 ((double) time_spent / 1000.0)) / 1000.0 / 1000.0;
2008
2009 trace_migrate_transferred(transferred_bytes, time_spent,
2010 bandwidth, threshold_size);
2011 /* if we haven't sent anything, we don't want to recalculate
2012 10000 is a small enough number for our purposes */
2013 if (ram_dirty_pages_rate() && transferred_bytes > 10000) {
2014 s->expected_downtime = ram_dirty_pages_rate() *
2015 qemu_target_page_size() / bandwidth;
2016 }
2017
2018 qemu_file_reset_rate_limit(s->to_dst_file);
2019 initial_time = current_time;
2020 initial_bytes = qemu_ftell(s->to_dst_file);
2021 }
2022 if (qemu_file_rate_limit(s->to_dst_file)) {
2023 /* usleep expects microseconds */
2024 g_usleep((initial_time + BUFFER_DELAY - current_time)*1000);
2025 }
2026 }
2027
2028 trace_migration_thread_after_loop();
2029 /* If we enabled cpu throttling for auto-converge, turn it off. */
2030 cpu_throttle_stop();
2031 end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
2032
2033 qemu_mutex_lock_iothread();
2034 /*
2035 * The resource has been allocated by migration will be reused in COLO
2036 * process, so don't release them.
2037 */
2038 if (!enable_colo) {
2039 qemu_savevm_state_cleanup();
2040 }
2041 if (s->state == MIGRATION_STATUS_COMPLETED) {
2042 uint64_t transferred_bytes = qemu_ftell(s->to_dst_file);
2043 s->total_time = end_time - s->total_time;
2044 if (!entered_postcopy) {
2045 s->downtime = end_time - start_time;
2046 }
2047 if (s->total_time) {
2048 s->mbps = (((double) transferred_bytes * 8.0) /
2049 ((double) s->total_time)) / 1000;
2050 }
2051 runstate_set(RUN_STATE_POSTMIGRATE);
2052 } else {
2053 if (s->state == MIGRATION_STATUS_ACTIVE && enable_colo) {
2054 migrate_start_colo_process(s);
2055 qemu_savevm_state_cleanup();
2056 /*
2057 * Fixme: we will run VM in COLO no matter its old running state.
2058 * After exited COLO, we will keep running.
2059 */
2060 old_vm_running = true;
2061 }
2062 if (old_vm_running && !entered_postcopy) {
2063 vm_start();
2064 } else {
2065 if (runstate_check(RUN_STATE_FINISH_MIGRATE)) {
2066 runstate_set(RUN_STATE_POSTMIGRATE);
2067 }
2068 }
2069 }
2070 qemu_bh_schedule(s->cleanup_bh);
2071 qemu_mutex_unlock_iothread();
2072
2073 rcu_unregister_thread();
2074 return NULL;
2075 }
2076
2077 void migrate_fd_connect(MigrationState *s)
2078 {
2079 s->expected_downtime = s->parameters.downtime_limit;
2080 s->cleanup_bh = qemu_bh_new(migrate_fd_cleanup, s);
2081
2082 qemu_file_set_blocking(s->to_dst_file, true);
2083 qemu_file_set_rate_limit(s->to_dst_file,
2084 s->parameters.max_bandwidth / XFER_LIMIT_RATIO);
2085
2086 /* Notify before starting migration thread */
2087 notifier_list_notify(&migration_state_notifiers, s);
2088
2089 /*
2090 * Open the return path; currently for postcopy but other things might
2091 * also want it.
2092 */
2093 if (migrate_postcopy_ram()) {
2094 if (open_return_path_on_source(s)) {
2095 error_report("Unable to open return-path for postcopy");
2096 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
2097 MIGRATION_STATUS_FAILED);
2098 migrate_fd_cleanup(s);
2099 return;
2100 }
2101 }
2102
2103 migrate_compress_threads_create();
2104 qemu_thread_create(&s->thread, "live_migration", migration_thread, s,
2105 QEMU_THREAD_JOINABLE);
2106 s->migration_thread_running = true;
2107 }
2108
2109 PostcopyState postcopy_state_get(void)
2110 {
2111 return atomic_mb_read(&incoming_postcopy_state);
2112 }
2113
2114 /* Set the state and return the old state */
2115 PostcopyState postcopy_state_set(PostcopyState new_state)
2116 {
2117 return atomic_xchg(&incoming_postcopy_state, new_state);
2118 }
2119