migration: convert unix socket protocol to use QIOChannel
[qemu.git] / migration / migration.c
1 /*
2 * QEMU live migration
3 *
4 * Copyright IBM, Corp. 2008
5 *
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
14 */
15
16 #include "qemu/osdep.h"
17 #include "qemu/cutils.h"
18 #include "qemu/error-report.h"
19 #include "qemu/main-loop.h"
20 #include "migration/migration.h"
21 #include "migration/qemu-file.h"
22 #include "sysemu/sysemu.h"
23 #include "block/block.h"
24 #include "qapi/qmp/qerror.h"
25 #include "qapi/util.h"
26 #include "qemu/sockets.h"
27 #include "qemu/rcu.h"
28 #include "migration/block.h"
29 #include "migration/postcopy-ram.h"
30 #include "qemu/thread.h"
31 #include "qmp-commands.h"
32 #include "trace.h"
33 #include "qapi-event.h"
34 #include "qom/cpu.h"
35 #include "exec/memory.h"
36 #include "exec/address-spaces.h"
37 #include "io/channel-buffer.h"
38
39 #define MAX_THROTTLE (32 << 20) /* Migration transfer speed throttling */
40
41 /* Amount of time to allocate to each "chunk" of bandwidth-throttled
42 * data. */
43 #define BUFFER_DELAY 100
44 #define XFER_LIMIT_RATIO (1000 / BUFFER_DELAY)
45
46 /* Default compression thread count */
47 #define DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT 8
48 /* Default decompression thread count, usually decompression is at
49 * least 4 times as fast as compression.*/
50 #define DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT 2
51 /*0: means nocompress, 1: best speed, ... 9: best compress ratio */
52 #define DEFAULT_MIGRATE_COMPRESS_LEVEL 1
53 /* Define default autoconverge cpu throttle migration parameters */
54 #define DEFAULT_MIGRATE_CPU_THROTTLE_INITIAL 20
55 #define DEFAULT_MIGRATE_CPU_THROTTLE_INCREMENT 10
56
57 /* Migration XBZRLE default cache size */
58 #define DEFAULT_MIGRATE_CACHE_SIZE (64 * 1024 * 1024)
59
60 static NotifierList migration_state_notifiers =
61 NOTIFIER_LIST_INITIALIZER(migration_state_notifiers);
62
63 static bool deferred_incoming;
64
65 /*
66 * Current state of incoming postcopy; note this is not part of
67 * MigrationIncomingState since it's state is used during cleanup
68 * at the end as MIS is being freed.
69 */
70 static PostcopyState incoming_postcopy_state;
71
72 /* When we add fault tolerance, we could have several
73 migrations at once. For now we don't need to add
74 dynamic creation of migration */
75
76 /* For outgoing */
77 MigrationState *migrate_get_current(void)
78 {
79 static bool once;
80 static MigrationState current_migration = {
81 .state = MIGRATION_STATUS_NONE,
82 .bandwidth_limit = MAX_THROTTLE,
83 .xbzrle_cache_size = DEFAULT_MIGRATE_CACHE_SIZE,
84 .mbps = -1,
85 .parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL] =
86 DEFAULT_MIGRATE_COMPRESS_LEVEL,
87 .parameters[MIGRATION_PARAMETER_COMPRESS_THREADS] =
88 DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT,
89 .parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS] =
90 DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT,
91 .parameters[MIGRATION_PARAMETER_CPU_THROTTLE_INITIAL] =
92 DEFAULT_MIGRATE_CPU_THROTTLE_INITIAL,
93 .parameters[MIGRATION_PARAMETER_CPU_THROTTLE_INCREMENT] =
94 DEFAULT_MIGRATE_CPU_THROTTLE_INCREMENT,
95 };
96
97 if (!once) {
98 qemu_mutex_init(&current_migration.src_page_req_mutex);
99 once = true;
100 }
101 return &current_migration;
102 }
103
104 /* For incoming */
105 static MigrationIncomingState *mis_current;
106
107 MigrationIncomingState *migration_incoming_get_current(void)
108 {
109 return mis_current;
110 }
111
112 MigrationIncomingState *migration_incoming_state_new(QEMUFile* f)
113 {
114 mis_current = g_new0(MigrationIncomingState, 1);
115 mis_current->from_src_file = f;
116 mis_current->state = MIGRATION_STATUS_NONE;
117 QLIST_INIT(&mis_current->loadvm_handlers);
118 qemu_mutex_init(&mis_current->rp_mutex);
119 qemu_event_init(&mis_current->main_thread_load_event, false);
120
121 return mis_current;
122 }
123
124 void migration_incoming_state_destroy(void)
125 {
126 qemu_event_destroy(&mis_current->main_thread_load_event);
127 loadvm_free_handlers(mis_current);
128 g_free(mis_current);
129 mis_current = NULL;
130 }
131
132
133 typedef struct {
134 bool optional;
135 uint32_t size;
136 uint8_t runstate[100];
137 RunState state;
138 bool received;
139 } GlobalState;
140
141 static GlobalState global_state;
142
143 int global_state_store(void)
144 {
145 if (!runstate_store((char *)global_state.runstate,
146 sizeof(global_state.runstate))) {
147 error_report("runstate name too big: %s", global_state.runstate);
148 trace_migrate_state_too_big();
149 return -EINVAL;
150 }
151 return 0;
152 }
153
154 void global_state_store_running(void)
155 {
156 const char *state = RunState_lookup[RUN_STATE_RUNNING];
157 strncpy((char *)global_state.runstate,
158 state, sizeof(global_state.runstate));
159 }
160
161 static bool global_state_received(void)
162 {
163 return global_state.received;
164 }
165
166 static RunState global_state_get_runstate(void)
167 {
168 return global_state.state;
169 }
170
171 void global_state_set_optional(void)
172 {
173 global_state.optional = true;
174 }
175
176 static bool global_state_needed(void *opaque)
177 {
178 GlobalState *s = opaque;
179 char *runstate = (char *)s->runstate;
180
181 /* If it is not optional, it is mandatory */
182
183 if (s->optional == false) {
184 return true;
185 }
186
187 /* If state is running or paused, it is not needed */
188
189 if (strcmp(runstate, "running") == 0 ||
190 strcmp(runstate, "paused") == 0) {
191 return false;
192 }
193
194 /* for any other state it is needed */
195 return true;
196 }
197
198 static int global_state_post_load(void *opaque, int version_id)
199 {
200 GlobalState *s = opaque;
201 Error *local_err = NULL;
202 int r;
203 char *runstate = (char *)s->runstate;
204
205 s->received = true;
206 trace_migrate_global_state_post_load(runstate);
207
208 r = qapi_enum_parse(RunState_lookup, runstate, RUN_STATE__MAX,
209 -1, &local_err);
210
211 if (r == -1) {
212 if (local_err) {
213 error_report_err(local_err);
214 }
215 return -EINVAL;
216 }
217 s->state = r;
218
219 return 0;
220 }
221
222 static void global_state_pre_save(void *opaque)
223 {
224 GlobalState *s = opaque;
225
226 trace_migrate_global_state_pre_save((char *)s->runstate);
227 s->size = strlen((char *)s->runstate) + 1;
228 }
229
230 static const VMStateDescription vmstate_globalstate = {
231 .name = "globalstate",
232 .version_id = 1,
233 .minimum_version_id = 1,
234 .post_load = global_state_post_load,
235 .pre_save = global_state_pre_save,
236 .needed = global_state_needed,
237 .fields = (VMStateField[]) {
238 VMSTATE_UINT32(size, GlobalState),
239 VMSTATE_BUFFER(runstate, GlobalState),
240 VMSTATE_END_OF_LIST()
241 },
242 };
243
244 void register_global_state(void)
245 {
246 /* We would use it independently that we receive it */
247 strcpy((char *)&global_state.runstate, "");
248 global_state.received = false;
249 vmstate_register(NULL, 0, &vmstate_globalstate, &global_state);
250 }
251
252 static void migrate_generate_event(int new_state)
253 {
254 if (migrate_use_events()) {
255 qapi_event_send_migration(new_state, &error_abort);
256 }
257 }
258
259 /*
260 * Called on -incoming with a defer: uri.
261 * The migration can be started later after any parameters have been
262 * changed.
263 */
264 static void deferred_incoming_migration(Error **errp)
265 {
266 if (deferred_incoming) {
267 error_setg(errp, "Incoming migration already deferred");
268 }
269 deferred_incoming = true;
270 }
271
272 /* Request a range of pages from the source VM at the given
273 * start address.
274 * rbname: Name of the RAMBlock to request the page in, if NULL it's the same
275 * as the last request (a name must have been given previously)
276 * Start: Address offset within the RB
277 * Len: Length in bytes required - must be a multiple of pagesize
278 */
279 void migrate_send_rp_req_pages(MigrationIncomingState *mis, const char *rbname,
280 ram_addr_t start, size_t len)
281 {
282 uint8_t bufc[12 + 1 + 255]; /* start (8), len (4), rbname up to 256 */
283 size_t msglen = 12; /* start + len */
284
285 *(uint64_t *)bufc = cpu_to_be64((uint64_t)start);
286 *(uint32_t *)(bufc + 8) = cpu_to_be32((uint32_t)len);
287
288 if (rbname) {
289 int rbname_len = strlen(rbname);
290 assert(rbname_len < 256);
291
292 bufc[msglen++] = rbname_len;
293 memcpy(bufc + msglen, rbname, rbname_len);
294 msglen += rbname_len;
295 migrate_send_rp_message(mis, MIG_RP_MSG_REQ_PAGES_ID, msglen, bufc);
296 } else {
297 migrate_send_rp_message(mis, MIG_RP_MSG_REQ_PAGES, msglen, bufc);
298 }
299 }
300
301 void qemu_start_incoming_migration(const char *uri, Error **errp)
302 {
303 const char *p;
304
305 qapi_event_send_migration(MIGRATION_STATUS_SETUP, &error_abort);
306 if (!strcmp(uri, "defer")) {
307 deferred_incoming_migration(errp);
308 } else if (strstart(uri, "tcp:", &p)) {
309 tcp_start_incoming_migration(p, errp);
310 #ifdef CONFIG_RDMA
311 } else if (strstart(uri, "rdma:", &p)) {
312 rdma_start_incoming_migration(p, errp);
313 #endif
314 #if !defined(WIN32)
315 } else if (strstart(uri, "exec:", &p)) {
316 exec_start_incoming_migration(p, errp);
317 #endif
318 } else if (strstart(uri, "unix:", &p)) {
319 unix_start_incoming_migration(p, errp);
320 #if !defined(WIN32)
321 } else if (strstart(uri, "fd:", &p)) {
322 fd_start_incoming_migration(p, errp);
323 #endif
324 } else {
325 error_setg(errp, "unknown migration protocol: %s", uri);
326 }
327 }
328
329 static void process_incoming_migration_bh(void *opaque)
330 {
331 Error *local_err = NULL;
332 MigrationIncomingState *mis = opaque;
333
334 /* Make sure all file formats flush their mutable metadata */
335 bdrv_invalidate_cache_all(&local_err);
336 if (local_err) {
337 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
338 MIGRATION_STATUS_FAILED);
339 error_report_err(local_err);
340 migrate_decompress_threads_join();
341 exit(EXIT_FAILURE);
342 }
343
344 /*
345 * This must happen after all error conditions are dealt with and
346 * we're sure the VM is going to be running on this host.
347 */
348 qemu_announce_self();
349
350 /* If global state section was not received or we are in running
351 state, we need to obey autostart. Any other state is set with
352 runstate_set. */
353
354 if (!global_state_received() ||
355 global_state_get_runstate() == RUN_STATE_RUNNING) {
356 if (autostart) {
357 vm_start();
358 } else {
359 runstate_set(RUN_STATE_PAUSED);
360 }
361 } else {
362 runstate_set(global_state_get_runstate());
363 }
364 migrate_decompress_threads_join();
365 /*
366 * This must happen after any state changes since as soon as an external
367 * observer sees this event they might start to prod at the VM assuming
368 * it's ready to use.
369 */
370 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
371 MIGRATION_STATUS_COMPLETED);
372 qemu_bh_delete(mis->bh);
373 migration_incoming_state_destroy();
374 }
375
376 static void process_incoming_migration_co(void *opaque)
377 {
378 QEMUFile *f = opaque;
379 MigrationIncomingState *mis;
380 PostcopyState ps;
381 int ret;
382
383 mis = migration_incoming_state_new(f);
384 postcopy_state_set(POSTCOPY_INCOMING_NONE);
385 migrate_set_state(&mis->state, MIGRATION_STATUS_NONE,
386 MIGRATION_STATUS_ACTIVE);
387 ret = qemu_loadvm_state(f);
388
389 ps = postcopy_state_get();
390 trace_process_incoming_migration_co_end(ret, ps);
391 if (ps != POSTCOPY_INCOMING_NONE) {
392 if (ps == POSTCOPY_INCOMING_ADVISE) {
393 /*
394 * Where a migration had postcopy enabled (and thus went to advise)
395 * but managed to complete within the precopy period, we can use
396 * the normal exit.
397 */
398 postcopy_ram_incoming_cleanup(mis);
399 } else if (ret >= 0) {
400 /*
401 * Postcopy was started, cleanup should happen at the end of the
402 * postcopy thread.
403 */
404 trace_process_incoming_migration_co_postcopy_end_main();
405 return;
406 }
407 /* Else if something went wrong then just fall out of the normal exit */
408 }
409
410 qemu_fclose(f);
411 free_xbzrle_decoded_buf();
412
413 if (ret < 0) {
414 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
415 MIGRATION_STATUS_FAILED);
416 error_report("load of migration failed: %s", strerror(-ret));
417 migrate_decompress_threads_join();
418 exit(EXIT_FAILURE);
419 }
420
421 mis->bh = qemu_bh_new(process_incoming_migration_bh, mis);
422 qemu_bh_schedule(mis->bh);
423 }
424
425 void process_incoming_migration(QEMUFile *f)
426 {
427 Coroutine *co = qemu_coroutine_create(process_incoming_migration_co);
428
429 migrate_decompress_threads_create();
430 qemu_file_set_blocking(f, false);
431 qemu_coroutine_enter(co, f);
432 }
433
434
435 void migration_set_incoming_channel(MigrationState *s,
436 QIOChannel *ioc)
437 {
438 QEMUFile *f = qemu_fopen_channel_input(ioc);
439
440 process_incoming_migration(f);
441 }
442
443
444 void migration_set_outgoing_channel(MigrationState *s,
445 QIOChannel *ioc)
446 {
447 QEMUFile *f = qemu_fopen_channel_output(ioc);
448
449 s->to_dst_file = f;
450
451 migrate_fd_connect(s);
452 }
453
454
455 /*
456 * Send a message on the return channel back to the source
457 * of the migration.
458 */
459 void migrate_send_rp_message(MigrationIncomingState *mis,
460 enum mig_rp_message_type message_type,
461 uint16_t len, void *data)
462 {
463 trace_migrate_send_rp_message((int)message_type, len);
464 qemu_mutex_lock(&mis->rp_mutex);
465 qemu_put_be16(mis->to_src_file, (unsigned int)message_type);
466 qemu_put_be16(mis->to_src_file, len);
467 qemu_put_buffer(mis->to_src_file, data, len);
468 qemu_fflush(mis->to_src_file);
469 qemu_mutex_unlock(&mis->rp_mutex);
470 }
471
472 /*
473 * Send a 'SHUT' message on the return channel with the given value
474 * to indicate that we've finished with the RP. Non-0 value indicates
475 * error.
476 */
477 void migrate_send_rp_shut(MigrationIncomingState *mis,
478 uint32_t value)
479 {
480 uint32_t buf;
481
482 buf = cpu_to_be32(value);
483 migrate_send_rp_message(mis, MIG_RP_MSG_SHUT, sizeof(buf), &buf);
484 }
485
486 /*
487 * Send a 'PONG' message on the return channel with the given value
488 * (normally in response to a 'PING')
489 */
490 void migrate_send_rp_pong(MigrationIncomingState *mis,
491 uint32_t value)
492 {
493 uint32_t buf;
494
495 buf = cpu_to_be32(value);
496 migrate_send_rp_message(mis, MIG_RP_MSG_PONG, sizeof(buf), &buf);
497 }
498
499 /* amount of nanoseconds we are willing to wait for migration to be down.
500 * the choice of nanoseconds is because it is the maximum resolution that
501 * get_clock() can achieve. It is an internal measure. All user-visible
502 * units must be in seconds */
503 static uint64_t max_downtime = 300000000;
504
505 uint64_t migrate_max_downtime(void)
506 {
507 return max_downtime;
508 }
509
510 MigrationCapabilityStatusList *qmp_query_migrate_capabilities(Error **errp)
511 {
512 MigrationCapabilityStatusList *head = NULL;
513 MigrationCapabilityStatusList *caps;
514 MigrationState *s = migrate_get_current();
515 int i;
516
517 caps = NULL; /* silence compiler warning */
518 for (i = 0; i < MIGRATION_CAPABILITY__MAX; i++) {
519 if (head == NULL) {
520 head = g_malloc0(sizeof(*caps));
521 caps = head;
522 } else {
523 caps->next = g_malloc0(sizeof(*caps));
524 caps = caps->next;
525 }
526 caps->value =
527 g_malloc(sizeof(*caps->value));
528 caps->value->capability = i;
529 caps->value->state = s->enabled_capabilities[i];
530 }
531
532 return head;
533 }
534
535 MigrationParameters *qmp_query_migrate_parameters(Error **errp)
536 {
537 MigrationParameters *params;
538 MigrationState *s = migrate_get_current();
539
540 params = g_malloc0(sizeof(*params));
541 params->compress_level = s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL];
542 params->compress_threads =
543 s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS];
544 params->decompress_threads =
545 s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS];
546 params->cpu_throttle_initial =
547 s->parameters[MIGRATION_PARAMETER_CPU_THROTTLE_INITIAL];
548 params->cpu_throttle_increment =
549 s->parameters[MIGRATION_PARAMETER_CPU_THROTTLE_INCREMENT];
550
551 return params;
552 }
553
554 /*
555 * Return true if we're already in the middle of a migration
556 * (i.e. any of the active or setup states)
557 */
558 static bool migration_is_setup_or_active(int state)
559 {
560 switch (state) {
561 case MIGRATION_STATUS_ACTIVE:
562 case MIGRATION_STATUS_POSTCOPY_ACTIVE:
563 case MIGRATION_STATUS_SETUP:
564 return true;
565
566 default:
567 return false;
568
569 }
570 }
571
572 static void get_xbzrle_cache_stats(MigrationInfo *info)
573 {
574 if (migrate_use_xbzrle()) {
575 info->has_xbzrle_cache = true;
576 info->xbzrle_cache = g_malloc0(sizeof(*info->xbzrle_cache));
577 info->xbzrle_cache->cache_size = migrate_xbzrle_cache_size();
578 info->xbzrle_cache->bytes = xbzrle_mig_bytes_transferred();
579 info->xbzrle_cache->pages = xbzrle_mig_pages_transferred();
580 info->xbzrle_cache->cache_miss = xbzrle_mig_pages_cache_miss();
581 info->xbzrle_cache->cache_miss_rate = xbzrle_mig_cache_miss_rate();
582 info->xbzrle_cache->overflow = xbzrle_mig_pages_overflow();
583 }
584 }
585
586 MigrationInfo *qmp_query_migrate(Error **errp)
587 {
588 MigrationInfo *info = g_malloc0(sizeof(*info));
589 MigrationState *s = migrate_get_current();
590
591 switch (s->state) {
592 case MIGRATION_STATUS_NONE:
593 /* no migration has happened ever */
594 break;
595 case MIGRATION_STATUS_SETUP:
596 info->has_status = true;
597 info->has_total_time = false;
598 break;
599 case MIGRATION_STATUS_ACTIVE:
600 case MIGRATION_STATUS_CANCELLING:
601 info->has_status = true;
602 info->has_total_time = true;
603 info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME)
604 - s->total_time;
605 info->has_expected_downtime = true;
606 info->expected_downtime = s->expected_downtime;
607 info->has_setup_time = true;
608 info->setup_time = s->setup_time;
609
610 info->has_ram = true;
611 info->ram = g_malloc0(sizeof(*info->ram));
612 info->ram->transferred = ram_bytes_transferred();
613 info->ram->remaining = ram_bytes_remaining();
614 info->ram->total = ram_bytes_total();
615 info->ram->duplicate = dup_mig_pages_transferred();
616 info->ram->skipped = skipped_mig_pages_transferred();
617 info->ram->normal = norm_mig_pages_transferred();
618 info->ram->normal_bytes = norm_mig_bytes_transferred();
619 info->ram->dirty_pages_rate = s->dirty_pages_rate;
620 info->ram->mbps = s->mbps;
621 info->ram->dirty_sync_count = s->dirty_sync_count;
622
623 if (blk_mig_active()) {
624 info->has_disk = true;
625 info->disk = g_malloc0(sizeof(*info->disk));
626 info->disk->transferred = blk_mig_bytes_transferred();
627 info->disk->remaining = blk_mig_bytes_remaining();
628 info->disk->total = blk_mig_bytes_total();
629 }
630
631 if (cpu_throttle_active()) {
632 info->has_cpu_throttle_percentage = true;
633 info->cpu_throttle_percentage = cpu_throttle_get_percentage();
634 }
635
636 get_xbzrle_cache_stats(info);
637 break;
638 case MIGRATION_STATUS_POSTCOPY_ACTIVE:
639 /* Mostly the same as active; TODO add some postcopy stats */
640 info->has_status = true;
641 info->has_total_time = true;
642 info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME)
643 - s->total_time;
644 info->has_expected_downtime = true;
645 info->expected_downtime = s->expected_downtime;
646 info->has_setup_time = true;
647 info->setup_time = s->setup_time;
648
649 info->has_ram = true;
650 info->ram = g_malloc0(sizeof(*info->ram));
651 info->ram->transferred = ram_bytes_transferred();
652 info->ram->remaining = ram_bytes_remaining();
653 info->ram->total = ram_bytes_total();
654 info->ram->duplicate = dup_mig_pages_transferred();
655 info->ram->skipped = skipped_mig_pages_transferred();
656 info->ram->normal = norm_mig_pages_transferred();
657 info->ram->normal_bytes = norm_mig_bytes_transferred();
658 info->ram->dirty_pages_rate = s->dirty_pages_rate;
659 info->ram->mbps = s->mbps;
660 info->ram->dirty_sync_count = s->dirty_sync_count;
661
662 if (blk_mig_active()) {
663 info->has_disk = true;
664 info->disk = g_malloc0(sizeof(*info->disk));
665 info->disk->transferred = blk_mig_bytes_transferred();
666 info->disk->remaining = blk_mig_bytes_remaining();
667 info->disk->total = blk_mig_bytes_total();
668 }
669
670 get_xbzrle_cache_stats(info);
671 break;
672 case MIGRATION_STATUS_COMPLETED:
673 get_xbzrle_cache_stats(info);
674
675 info->has_status = true;
676 info->has_total_time = true;
677 info->total_time = s->total_time;
678 info->has_downtime = true;
679 info->downtime = s->downtime;
680 info->has_setup_time = true;
681 info->setup_time = s->setup_time;
682
683 info->has_ram = true;
684 info->ram = g_malloc0(sizeof(*info->ram));
685 info->ram->transferred = ram_bytes_transferred();
686 info->ram->remaining = 0;
687 info->ram->total = ram_bytes_total();
688 info->ram->duplicate = dup_mig_pages_transferred();
689 info->ram->skipped = skipped_mig_pages_transferred();
690 info->ram->normal = norm_mig_pages_transferred();
691 info->ram->normal_bytes = norm_mig_bytes_transferred();
692 info->ram->mbps = s->mbps;
693 info->ram->dirty_sync_count = s->dirty_sync_count;
694 break;
695 case MIGRATION_STATUS_FAILED:
696 info->has_status = true;
697 if (s->error) {
698 info->has_error_desc = true;
699 info->error_desc = g_strdup(error_get_pretty(s->error));
700 }
701 break;
702 case MIGRATION_STATUS_CANCELLED:
703 info->has_status = true;
704 break;
705 }
706 info->status = s->state;
707
708 return info;
709 }
710
711 void qmp_migrate_set_capabilities(MigrationCapabilityStatusList *params,
712 Error **errp)
713 {
714 MigrationState *s = migrate_get_current();
715 MigrationCapabilityStatusList *cap;
716
717 if (migration_is_setup_or_active(s->state)) {
718 error_setg(errp, QERR_MIGRATION_ACTIVE);
719 return;
720 }
721
722 for (cap = params; cap; cap = cap->next) {
723 s->enabled_capabilities[cap->value->capability] = cap->value->state;
724 }
725
726 if (migrate_postcopy_ram()) {
727 if (migrate_use_compression()) {
728 /* The decompression threads asynchronously write into RAM
729 * rather than use the atomic copies needed to avoid
730 * userfaulting. It should be possible to fix the decompression
731 * threads for compatibility in future.
732 */
733 error_report("Postcopy is not currently compatible with "
734 "compression");
735 s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM] =
736 false;
737 }
738 }
739 }
740
741 void qmp_migrate_set_parameters(bool has_compress_level,
742 int64_t compress_level,
743 bool has_compress_threads,
744 int64_t compress_threads,
745 bool has_decompress_threads,
746 int64_t decompress_threads,
747 bool has_cpu_throttle_initial,
748 int64_t cpu_throttle_initial,
749 bool has_cpu_throttle_increment,
750 int64_t cpu_throttle_increment, Error **errp)
751 {
752 MigrationState *s = migrate_get_current();
753
754 if (has_compress_level && (compress_level < 0 || compress_level > 9)) {
755 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "compress_level",
756 "is invalid, it should be in the range of 0 to 9");
757 return;
758 }
759 if (has_compress_threads &&
760 (compress_threads < 1 || compress_threads > 255)) {
761 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
762 "compress_threads",
763 "is invalid, it should be in the range of 1 to 255");
764 return;
765 }
766 if (has_decompress_threads &&
767 (decompress_threads < 1 || decompress_threads > 255)) {
768 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
769 "decompress_threads",
770 "is invalid, it should be in the range of 1 to 255");
771 return;
772 }
773 if (has_cpu_throttle_initial &&
774 (cpu_throttle_initial < 1 || cpu_throttle_initial > 99)) {
775 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
776 "cpu_throttle_initial",
777 "an integer in the range of 1 to 99");
778 }
779 if (has_cpu_throttle_increment &&
780 (cpu_throttle_increment < 1 || cpu_throttle_increment > 99)) {
781 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
782 "cpu_throttle_increment",
783 "an integer in the range of 1 to 99");
784 }
785
786 if (has_compress_level) {
787 s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL] = compress_level;
788 }
789 if (has_compress_threads) {
790 s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS] = compress_threads;
791 }
792 if (has_decompress_threads) {
793 s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS] =
794 decompress_threads;
795 }
796 if (has_cpu_throttle_initial) {
797 s->parameters[MIGRATION_PARAMETER_CPU_THROTTLE_INITIAL] =
798 cpu_throttle_initial;
799 }
800
801 if (has_cpu_throttle_increment) {
802 s->parameters[MIGRATION_PARAMETER_CPU_THROTTLE_INCREMENT] =
803 cpu_throttle_increment;
804 }
805 }
806
807 void qmp_migrate_start_postcopy(Error **errp)
808 {
809 MigrationState *s = migrate_get_current();
810
811 if (!migrate_postcopy_ram()) {
812 error_setg(errp, "Enable postcopy with migrate_set_capability before"
813 " the start of migration");
814 return;
815 }
816
817 if (s->state == MIGRATION_STATUS_NONE) {
818 error_setg(errp, "Postcopy must be started after migration has been"
819 " started");
820 return;
821 }
822 /*
823 * we don't error if migration has finished since that would be racy
824 * with issuing this command.
825 */
826 atomic_set(&s->start_postcopy, true);
827 }
828
829 /* shared migration helpers */
830
831 void migrate_set_state(int *state, int old_state, int new_state)
832 {
833 if (atomic_cmpxchg(state, old_state, new_state) == old_state) {
834 trace_migrate_set_state(new_state);
835 migrate_generate_event(new_state);
836 }
837 }
838
839 static void migrate_fd_cleanup(void *opaque)
840 {
841 MigrationState *s = opaque;
842
843 qemu_bh_delete(s->cleanup_bh);
844 s->cleanup_bh = NULL;
845
846 flush_page_queue(s);
847
848 if (s->to_dst_file) {
849 trace_migrate_fd_cleanup();
850 qemu_mutex_unlock_iothread();
851 if (s->migration_thread_running) {
852 qemu_thread_join(&s->thread);
853 s->migration_thread_running = false;
854 }
855 qemu_mutex_lock_iothread();
856
857 migrate_compress_threads_join();
858 qemu_fclose(s->to_dst_file);
859 s->to_dst_file = NULL;
860 }
861
862 assert((s->state != MIGRATION_STATUS_ACTIVE) &&
863 (s->state != MIGRATION_STATUS_POSTCOPY_ACTIVE));
864
865 if (s->state == MIGRATION_STATUS_CANCELLING) {
866 migrate_set_state(&s->state, MIGRATION_STATUS_CANCELLING,
867 MIGRATION_STATUS_CANCELLED);
868 }
869
870 notifier_list_notify(&migration_state_notifiers, s);
871 }
872
873 void migrate_fd_error(MigrationState *s, const Error *error)
874 {
875 trace_migrate_fd_error(error ? error_get_pretty(error) : "");
876 assert(s->to_dst_file == NULL);
877 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
878 MIGRATION_STATUS_FAILED);
879 if (!s->error) {
880 s->error = error_copy(error);
881 }
882 notifier_list_notify(&migration_state_notifiers, s);
883 }
884
885 static void migrate_fd_cancel(MigrationState *s)
886 {
887 int old_state ;
888 QEMUFile *f = migrate_get_current()->to_dst_file;
889 trace_migrate_fd_cancel();
890
891 if (s->rp_state.from_dst_file) {
892 /* shutdown the rp socket, so causing the rp thread to shutdown */
893 qemu_file_shutdown(s->rp_state.from_dst_file);
894 }
895
896 do {
897 old_state = s->state;
898 if (!migration_is_setup_or_active(old_state)) {
899 break;
900 }
901 migrate_set_state(&s->state, old_state, MIGRATION_STATUS_CANCELLING);
902 } while (s->state != MIGRATION_STATUS_CANCELLING);
903
904 /*
905 * If we're unlucky the migration code might be stuck somewhere in a
906 * send/write while the network has failed and is waiting to timeout;
907 * if we've got shutdown(2) available then we can force it to quit.
908 * The outgoing qemu file gets closed in migrate_fd_cleanup that is
909 * called in a bh, so there is no race against this cancel.
910 */
911 if (s->state == MIGRATION_STATUS_CANCELLING && f) {
912 qemu_file_shutdown(f);
913 }
914 }
915
916 void add_migration_state_change_notifier(Notifier *notify)
917 {
918 notifier_list_add(&migration_state_notifiers, notify);
919 }
920
921 void remove_migration_state_change_notifier(Notifier *notify)
922 {
923 notifier_remove(notify);
924 }
925
926 bool migration_in_setup(MigrationState *s)
927 {
928 return s->state == MIGRATION_STATUS_SETUP;
929 }
930
931 bool migration_has_finished(MigrationState *s)
932 {
933 return s->state == MIGRATION_STATUS_COMPLETED;
934 }
935
936 bool migration_has_failed(MigrationState *s)
937 {
938 return (s->state == MIGRATION_STATUS_CANCELLED ||
939 s->state == MIGRATION_STATUS_FAILED);
940 }
941
942 bool migration_in_postcopy(MigrationState *s)
943 {
944 return (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE);
945 }
946
947 bool migration_in_postcopy_after_devices(MigrationState *s)
948 {
949 return migration_in_postcopy(s) && s->postcopy_after_devices;
950 }
951
952 MigrationState *migrate_init(const MigrationParams *params)
953 {
954 MigrationState *s = migrate_get_current();
955
956 /*
957 * Reinitialise all migration state, except
958 * parameters/capabilities that the user set, and
959 * locks.
960 */
961 s->bytes_xfer = 0;
962 s->xfer_limit = 0;
963 s->cleanup_bh = 0;
964 s->to_dst_file = NULL;
965 s->state = MIGRATION_STATUS_NONE;
966 s->params = *params;
967 s->rp_state.from_dst_file = NULL;
968 s->rp_state.error = false;
969 s->mbps = 0.0;
970 s->downtime = 0;
971 s->expected_downtime = 0;
972 s->dirty_pages_rate = 0;
973 s->dirty_bytes_rate = 0;
974 s->setup_time = 0;
975 s->dirty_sync_count = 0;
976 s->start_postcopy = false;
977 s->postcopy_after_devices = false;
978 s->migration_thread_running = false;
979 s->last_req_rb = NULL;
980 error_free(s->error);
981 s->error = NULL;
982
983 migrate_set_state(&s->state, MIGRATION_STATUS_NONE, MIGRATION_STATUS_SETUP);
984
985 QSIMPLEQ_INIT(&s->src_page_requests);
986
987 s->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
988 return s;
989 }
990
991 static GSList *migration_blockers;
992
993 void migrate_add_blocker(Error *reason)
994 {
995 migration_blockers = g_slist_prepend(migration_blockers, reason);
996 }
997
998 void migrate_del_blocker(Error *reason)
999 {
1000 migration_blockers = g_slist_remove(migration_blockers, reason);
1001 }
1002
1003 void qmp_migrate_incoming(const char *uri, Error **errp)
1004 {
1005 Error *local_err = NULL;
1006 static bool once = true;
1007
1008 if (!deferred_incoming) {
1009 error_setg(errp, "For use with '-incoming defer'");
1010 return;
1011 }
1012 if (!once) {
1013 error_setg(errp, "The incoming migration has already been started");
1014 }
1015
1016 qemu_start_incoming_migration(uri, &local_err);
1017
1018 if (local_err) {
1019 error_propagate(errp, local_err);
1020 return;
1021 }
1022
1023 once = false;
1024 }
1025
1026 bool migration_is_blocked(Error **errp)
1027 {
1028 if (qemu_savevm_state_blocked(errp)) {
1029 return true;
1030 }
1031
1032 if (migration_blockers) {
1033 *errp = error_copy(migration_blockers->data);
1034 return true;
1035 }
1036
1037 return false;
1038 }
1039
1040 void qmp_migrate(const char *uri, bool has_blk, bool blk,
1041 bool has_inc, bool inc, bool has_detach, bool detach,
1042 Error **errp)
1043 {
1044 Error *local_err = NULL;
1045 MigrationState *s = migrate_get_current();
1046 MigrationParams params;
1047 const char *p;
1048
1049 params.blk = has_blk && blk;
1050 params.shared = has_inc && inc;
1051
1052 if (migration_is_setup_or_active(s->state) ||
1053 s->state == MIGRATION_STATUS_CANCELLING) {
1054 error_setg(errp, QERR_MIGRATION_ACTIVE);
1055 return;
1056 }
1057 if (runstate_check(RUN_STATE_INMIGRATE)) {
1058 error_setg(errp, "Guest is waiting for an incoming migration");
1059 return;
1060 }
1061
1062 if (migration_is_blocked(errp)) {
1063 return;
1064 }
1065
1066 s = migrate_init(&params);
1067
1068 if (strstart(uri, "tcp:", &p)) {
1069 tcp_start_outgoing_migration(s, p, &local_err);
1070 #ifdef CONFIG_RDMA
1071 } else if (strstart(uri, "rdma:", &p)) {
1072 rdma_start_outgoing_migration(s, p, &local_err);
1073 #endif
1074 #if !defined(WIN32)
1075 } else if (strstart(uri, "exec:", &p)) {
1076 exec_start_outgoing_migration(s, p, &local_err);
1077 #endif
1078 } else if (strstart(uri, "unix:", &p)) {
1079 unix_start_outgoing_migration(s, p, &local_err);
1080 #if !defined(WIN32)
1081 } else if (strstart(uri, "fd:", &p)) {
1082 fd_start_outgoing_migration(s, p, &local_err);
1083 #endif
1084 } else {
1085 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "uri",
1086 "a valid migration protocol");
1087 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
1088 MIGRATION_STATUS_FAILED);
1089 return;
1090 }
1091
1092 if (local_err) {
1093 migrate_fd_error(s, local_err);
1094 error_propagate(errp, local_err);
1095 return;
1096 }
1097 }
1098
1099 void qmp_migrate_cancel(Error **errp)
1100 {
1101 migrate_fd_cancel(migrate_get_current());
1102 }
1103
1104 void qmp_migrate_set_cache_size(int64_t value, Error **errp)
1105 {
1106 MigrationState *s = migrate_get_current();
1107 int64_t new_size;
1108
1109 /* Check for truncation */
1110 if (value != (size_t)value) {
1111 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
1112 "exceeding address space");
1113 return;
1114 }
1115
1116 /* Cache should not be larger than guest ram size */
1117 if (value > ram_bytes_total()) {
1118 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
1119 "exceeds guest ram size ");
1120 return;
1121 }
1122
1123 new_size = xbzrle_cache_resize(value);
1124 if (new_size < 0) {
1125 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
1126 "is smaller than page size");
1127 return;
1128 }
1129
1130 s->xbzrle_cache_size = new_size;
1131 }
1132
1133 int64_t qmp_query_migrate_cache_size(Error **errp)
1134 {
1135 return migrate_xbzrle_cache_size();
1136 }
1137
1138 void qmp_migrate_set_speed(int64_t value, Error **errp)
1139 {
1140 MigrationState *s;
1141
1142 if (value < 0) {
1143 value = 0;
1144 }
1145 if (value > SIZE_MAX) {
1146 value = SIZE_MAX;
1147 }
1148
1149 s = migrate_get_current();
1150 s->bandwidth_limit = value;
1151 if (s->to_dst_file) {
1152 qemu_file_set_rate_limit(s->to_dst_file,
1153 s->bandwidth_limit / XFER_LIMIT_RATIO);
1154 }
1155 }
1156
1157 void qmp_migrate_set_downtime(double value, Error **errp)
1158 {
1159 value *= 1e9;
1160 value = MAX(0, MIN(UINT64_MAX, value));
1161 max_downtime = (uint64_t)value;
1162 }
1163
1164 bool migrate_postcopy_ram(void)
1165 {
1166 MigrationState *s;
1167
1168 s = migrate_get_current();
1169
1170 return s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM];
1171 }
1172
1173 bool migrate_auto_converge(void)
1174 {
1175 MigrationState *s;
1176
1177 s = migrate_get_current();
1178
1179 return s->enabled_capabilities[MIGRATION_CAPABILITY_AUTO_CONVERGE];
1180 }
1181
1182 bool migrate_zero_blocks(void)
1183 {
1184 MigrationState *s;
1185
1186 s = migrate_get_current();
1187
1188 return s->enabled_capabilities[MIGRATION_CAPABILITY_ZERO_BLOCKS];
1189 }
1190
1191 bool migrate_use_compression(void)
1192 {
1193 MigrationState *s;
1194
1195 s = migrate_get_current();
1196
1197 return s->enabled_capabilities[MIGRATION_CAPABILITY_COMPRESS];
1198 }
1199
1200 int migrate_compress_level(void)
1201 {
1202 MigrationState *s;
1203
1204 s = migrate_get_current();
1205
1206 return s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL];
1207 }
1208
1209 int migrate_compress_threads(void)
1210 {
1211 MigrationState *s;
1212
1213 s = migrate_get_current();
1214
1215 return s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS];
1216 }
1217
1218 int migrate_decompress_threads(void)
1219 {
1220 MigrationState *s;
1221
1222 s = migrate_get_current();
1223
1224 return s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS];
1225 }
1226
1227 bool migrate_use_events(void)
1228 {
1229 MigrationState *s;
1230
1231 s = migrate_get_current();
1232
1233 return s->enabled_capabilities[MIGRATION_CAPABILITY_EVENTS];
1234 }
1235
1236 int migrate_use_xbzrle(void)
1237 {
1238 MigrationState *s;
1239
1240 s = migrate_get_current();
1241
1242 return s->enabled_capabilities[MIGRATION_CAPABILITY_XBZRLE];
1243 }
1244
1245 int64_t migrate_xbzrle_cache_size(void)
1246 {
1247 MigrationState *s;
1248
1249 s = migrate_get_current();
1250
1251 return s->xbzrle_cache_size;
1252 }
1253
1254 /* migration thread support */
1255 /*
1256 * Something bad happened to the RP stream, mark an error
1257 * The caller shall print or trace something to indicate why
1258 */
1259 static void mark_source_rp_bad(MigrationState *s)
1260 {
1261 s->rp_state.error = true;
1262 }
1263
1264 static struct rp_cmd_args {
1265 ssize_t len; /* -1 = variable */
1266 const char *name;
1267 } rp_cmd_args[] = {
1268 [MIG_RP_MSG_INVALID] = { .len = -1, .name = "INVALID" },
1269 [MIG_RP_MSG_SHUT] = { .len = 4, .name = "SHUT" },
1270 [MIG_RP_MSG_PONG] = { .len = 4, .name = "PONG" },
1271 [MIG_RP_MSG_REQ_PAGES] = { .len = 12, .name = "REQ_PAGES" },
1272 [MIG_RP_MSG_REQ_PAGES_ID] = { .len = -1, .name = "REQ_PAGES_ID" },
1273 [MIG_RP_MSG_MAX] = { .len = -1, .name = "MAX" },
1274 };
1275
1276 /*
1277 * Process a request for pages received on the return path,
1278 * We're allowed to send more than requested (e.g. to round to our page size)
1279 * and we don't need to send pages that have already been sent.
1280 */
1281 static void migrate_handle_rp_req_pages(MigrationState *ms, const char* rbname,
1282 ram_addr_t start, size_t len)
1283 {
1284 long our_host_ps = getpagesize();
1285
1286 trace_migrate_handle_rp_req_pages(rbname, start, len);
1287
1288 /*
1289 * Since we currently insist on matching page sizes, just sanity check
1290 * we're being asked for whole host pages.
1291 */
1292 if (start & (our_host_ps-1) ||
1293 (len & (our_host_ps-1))) {
1294 error_report("%s: Misaligned page request, start: " RAM_ADDR_FMT
1295 " len: %zd", __func__, start, len);
1296 mark_source_rp_bad(ms);
1297 return;
1298 }
1299
1300 if (ram_save_queue_pages(ms, rbname, start, len)) {
1301 mark_source_rp_bad(ms);
1302 }
1303 }
1304
1305 /*
1306 * Handles messages sent on the return path towards the source VM
1307 *
1308 */
1309 static void *source_return_path_thread(void *opaque)
1310 {
1311 MigrationState *ms = opaque;
1312 QEMUFile *rp = ms->rp_state.from_dst_file;
1313 uint16_t header_len, header_type;
1314 uint8_t buf[512];
1315 uint32_t tmp32, sibling_error;
1316 ram_addr_t start = 0; /* =0 to silence warning */
1317 size_t len = 0, expected_len;
1318 int res;
1319
1320 trace_source_return_path_thread_entry();
1321 while (!ms->rp_state.error && !qemu_file_get_error(rp) &&
1322 migration_is_setup_or_active(ms->state)) {
1323 trace_source_return_path_thread_loop_top();
1324 header_type = qemu_get_be16(rp);
1325 header_len = qemu_get_be16(rp);
1326
1327 if (header_type >= MIG_RP_MSG_MAX ||
1328 header_type == MIG_RP_MSG_INVALID) {
1329 error_report("RP: Received invalid message 0x%04x length 0x%04x",
1330 header_type, header_len);
1331 mark_source_rp_bad(ms);
1332 goto out;
1333 }
1334
1335 if ((rp_cmd_args[header_type].len != -1 &&
1336 header_len != rp_cmd_args[header_type].len) ||
1337 header_len > sizeof(buf)) {
1338 error_report("RP: Received '%s' message (0x%04x) with"
1339 "incorrect length %d expecting %zu",
1340 rp_cmd_args[header_type].name, header_type, header_len,
1341 (size_t)rp_cmd_args[header_type].len);
1342 mark_source_rp_bad(ms);
1343 goto out;
1344 }
1345
1346 /* We know we've got a valid header by this point */
1347 res = qemu_get_buffer(rp, buf, header_len);
1348 if (res != header_len) {
1349 error_report("RP: Failed reading data for message 0x%04x"
1350 " read %d expected %d",
1351 header_type, res, header_len);
1352 mark_source_rp_bad(ms);
1353 goto out;
1354 }
1355
1356 /* OK, we have the message and the data */
1357 switch (header_type) {
1358 case MIG_RP_MSG_SHUT:
1359 sibling_error = be32_to_cpup((uint32_t *)buf);
1360 trace_source_return_path_thread_shut(sibling_error);
1361 if (sibling_error) {
1362 error_report("RP: Sibling indicated error %d", sibling_error);
1363 mark_source_rp_bad(ms);
1364 }
1365 /*
1366 * We'll let the main thread deal with closing the RP
1367 * we could do a shutdown(2) on it, but we're the only user
1368 * anyway, so there's nothing gained.
1369 */
1370 goto out;
1371
1372 case MIG_RP_MSG_PONG:
1373 tmp32 = be32_to_cpup((uint32_t *)buf);
1374 trace_source_return_path_thread_pong(tmp32);
1375 break;
1376
1377 case MIG_RP_MSG_REQ_PAGES:
1378 start = be64_to_cpup((uint64_t *)buf);
1379 len = be32_to_cpup((uint32_t *)(buf + 8));
1380 migrate_handle_rp_req_pages(ms, NULL, start, len);
1381 break;
1382
1383 case MIG_RP_MSG_REQ_PAGES_ID:
1384 expected_len = 12 + 1; /* header + termination */
1385
1386 if (header_len >= expected_len) {
1387 start = be64_to_cpup((uint64_t *)buf);
1388 len = be32_to_cpup((uint32_t *)(buf + 8));
1389 /* Now we expect an idstr */
1390 tmp32 = buf[12]; /* Length of the following idstr */
1391 buf[13 + tmp32] = '\0';
1392 expected_len += tmp32;
1393 }
1394 if (header_len != expected_len) {
1395 error_report("RP: Req_Page_id with length %d expecting %zd",
1396 header_len, expected_len);
1397 mark_source_rp_bad(ms);
1398 goto out;
1399 }
1400 migrate_handle_rp_req_pages(ms, (char *)&buf[13], start, len);
1401 break;
1402
1403 default:
1404 break;
1405 }
1406 }
1407 if (qemu_file_get_error(rp)) {
1408 trace_source_return_path_thread_bad_end();
1409 mark_source_rp_bad(ms);
1410 }
1411
1412 trace_source_return_path_thread_end();
1413 out:
1414 ms->rp_state.from_dst_file = NULL;
1415 qemu_fclose(rp);
1416 return NULL;
1417 }
1418
1419 static int open_return_path_on_source(MigrationState *ms)
1420 {
1421
1422 ms->rp_state.from_dst_file = qemu_file_get_return_path(ms->to_dst_file);
1423 if (!ms->rp_state.from_dst_file) {
1424 return -1;
1425 }
1426
1427 trace_open_return_path_on_source();
1428 qemu_thread_create(&ms->rp_state.rp_thread, "return path",
1429 source_return_path_thread, ms, QEMU_THREAD_JOINABLE);
1430
1431 trace_open_return_path_on_source_continue();
1432
1433 return 0;
1434 }
1435
1436 /* Returns 0 if the RP was ok, otherwise there was an error on the RP */
1437 static int await_return_path_close_on_source(MigrationState *ms)
1438 {
1439 /*
1440 * If this is a normal exit then the destination will send a SHUT and the
1441 * rp_thread will exit, however if there's an error we need to cause
1442 * it to exit.
1443 */
1444 if (qemu_file_get_error(ms->to_dst_file) && ms->rp_state.from_dst_file) {
1445 /*
1446 * shutdown(2), if we have it, will cause it to unblock if it's stuck
1447 * waiting for the destination.
1448 */
1449 qemu_file_shutdown(ms->rp_state.from_dst_file);
1450 mark_source_rp_bad(ms);
1451 }
1452 trace_await_return_path_close_on_source_joining();
1453 qemu_thread_join(&ms->rp_state.rp_thread);
1454 trace_await_return_path_close_on_source_close();
1455 return ms->rp_state.error;
1456 }
1457
1458 /*
1459 * Switch from normal iteration to postcopy
1460 * Returns non-0 on error
1461 */
1462 static int postcopy_start(MigrationState *ms, bool *old_vm_running)
1463 {
1464 int ret;
1465 QIOChannelBuffer *bioc;
1466 QEMUFile *fb;
1467 int64_t time_at_stop = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1468 migrate_set_state(&ms->state, MIGRATION_STATUS_ACTIVE,
1469 MIGRATION_STATUS_POSTCOPY_ACTIVE);
1470
1471 trace_postcopy_start();
1472 qemu_mutex_lock_iothread();
1473 trace_postcopy_start_set_run();
1474
1475 qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER);
1476 *old_vm_running = runstate_is_running();
1477 global_state_store();
1478 ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
1479 if (ret < 0) {
1480 goto fail;
1481 }
1482
1483 ret = bdrv_inactivate_all();
1484 if (ret < 0) {
1485 goto fail;
1486 }
1487
1488 /*
1489 * Cause any non-postcopiable, but iterative devices to
1490 * send out their final data.
1491 */
1492 qemu_savevm_state_complete_precopy(ms->to_dst_file, true);
1493
1494 /*
1495 * in Finish migrate and with the io-lock held everything should
1496 * be quiet, but we've potentially still got dirty pages and we
1497 * need to tell the destination to throw any pages it's already received
1498 * that are dirty
1499 */
1500 if (ram_postcopy_send_discard_bitmap(ms)) {
1501 error_report("postcopy send discard bitmap failed");
1502 goto fail;
1503 }
1504
1505 /*
1506 * send rest of state - note things that are doing postcopy
1507 * will notice we're in POSTCOPY_ACTIVE and not actually
1508 * wrap their state up here
1509 */
1510 qemu_file_set_rate_limit(ms->to_dst_file, INT64_MAX);
1511 /* Ping just for debugging, helps line traces up */
1512 qemu_savevm_send_ping(ms->to_dst_file, 2);
1513
1514 /*
1515 * While loading the device state we may trigger page transfer
1516 * requests and the fd must be free to process those, and thus
1517 * the destination must read the whole device state off the fd before
1518 * it starts processing it. Unfortunately the ad-hoc migration format
1519 * doesn't allow the destination to know the size to read without fully
1520 * parsing it through each devices load-state code (especially the open
1521 * coded devices that use get/put).
1522 * So we wrap the device state up in a package with a length at the start;
1523 * to do this we use a qemu_buf to hold the whole of the device state.
1524 */
1525 bioc = qio_channel_buffer_new(4096);
1526 fb = qemu_fopen_channel_output(QIO_CHANNEL(bioc));
1527 object_unref(OBJECT(bioc));
1528
1529 /*
1530 * Make sure the receiver can get incoming pages before we send the rest
1531 * of the state
1532 */
1533 qemu_savevm_send_postcopy_listen(fb);
1534
1535 qemu_savevm_state_complete_precopy(fb, false);
1536 qemu_savevm_send_ping(fb, 3);
1537
1538 qemu_savevm_send_postcopy_run(fb);
1539
1540 /* <><> end of stuff going into the package */
1541
1542 /* Now send that blob */
1543 if (qemu_savevm_send_packaged(ms->to_dst_file, bioc->data, bioc->usage)) {
1544 goto fail_closefb;
1545 }
1546 qemu_fclose(fb);
1547
1548 /* Send a notify to give a chance for anything that needs to happen
1549 * at the transition to postcopy and after the device state; in particular
1550 * spice needs to trigger a transition now
1551 */
1552 ms->postcopy_after_devices = true;
1553 notifier_list_notify(&migration_state_notifiers, ms);
1554
1555 ms->downtime = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - time_at_stop;
1556
1557 qemu_mutex_unlock_iothread();
1558
1559 /*
1560 * Although this ping is just for debug, it could potentially be
1561 * used for getting a better measurement of downtime at the source.
1562 */
1563 qemu_savevm_send_ping(ms->to_dst_file, 4);
1564
1565 ret = qemu_file_get_error(ms->to_dst_file);
1566 if (ret) {
1567 error_report("postcopy_start: Migration stream errored");
1568 migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
1569 MIGRATION_STATUS_FAILED);
1570 }
1571
1572 return ret;
1573
1574 fail_closefb:
1575 qemu_fclose(fb);
1576 fail:
1577 migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
1578 MIGRATION_STATUS_FAILED);
1579 qemu_mutex_unlock_iothread();
1580 return -1;
1581 }
1582
1583 /**
1584 * migration_completion: Used by migration_thread when there's not much left.
1585 * The caller 'breaks' the loop when this returns.
1586 *
1587 * @s: Current migration state
1588 * @current_active_state: The migration state we expect to be in
1589 * @*old_vm_running: Pointer to old_vm_running flag
1590 * @*start_time: Pointer to time to update
1591 */
1592 static void migration_completion(MigrationState *s, int current_active_state,
1593 bool *old_vm_running,
1594 int64_t *start_time)
1595 {
1596 int ret;
1597
1598 if (s->state == MIGRATION_STATUS_ACTIVE) {
1599 qemu_mutex_lock_iothread();
1600 *start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1601 qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER);
1602 *old_vm_running = runstate_is_running();
1603 ret = global_state_store();
1604
1605 if (!ret) {
1606 ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
1607 if (ret >= 0) {
1608 ret = bdrv_inactivate_all();
1609 }
1610 if (ret >= 0) {
1611 qemu_file_set_rate_limit(s->to_dst_file, INT64_MAX);
1612 qemu_savevm_state_complete_precopy(s->to_dst_file, false);
1613 }
1614 }
1615 qemu_mutex_unlock_iothread();
1616
1617 if (ret < 0) {
1618 goto fail;
1619 }
1620 } else if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
1621 trace_migration_completion_postcopy_end();
1622
1623 qemu_savevm_state_complete_postcopy(s->to_dst_file);
1624 trace_migration_completion_postcopy_end_after_complete();
1625 }
1626
1627 /*
1628 * If rp was opened we must clean up the thread before
1629 * cleaning everything else up (since if there are no failures
1630 * it will wait for the destination to send it's status in
1631 * a SHUT command).
1632 * Postcopy opens rp if enabled (even if it's not avtivated)
1633 */
1634 if (migrate_postcopy_ram()) {
1635 int rp_error;
1636 trace_migration_completion_postcopy_end_before_rp();
1637 rp_error = await_return_path_close_on_source(s);
1638 trace_migration_completion_postcopy_end_after_rp(rp_error);
1639 if (rp_error) {
1640 goto fail_invalidate;
1641 }
1642 }
1643
1644 if (qemu_file_get_error(s->to_dst_file)) {
1645 trace_migration_completion_file_err();
1646 goto fail_invalidate;
1647 }
1648
1649 migrate_set_state(&s->state, current_active_state,
1650 MIGRATION_STATUS_COMPLETED);
1651 return;
1652
1653 fail_invalidate:
1654 /* If not doing postcopy, vm_start() will be called: let's regain
1655 * control on images.
1656 */
1657 if (s->state == MIGRATION_STATUS_ACTIVE) {
1658 Error *local_err = NULL;
1659
1660 bdrv_invalidate_cache_all(&local_err);
1661 if (local_err) {
1662 error_report_err(local_err);
1663 }
1664 }
1665
1666 fail:
1667 migrate_set_state(&s->state, current_active_state,
1668 MIGRATION_STATUS_FAILED);
1669 }
1670
1671 /*
1672 * Master migration thread on the source VM.
1673 * It drives the migration and pumps the data down the outgoing channel.
1674 */
1675 static void *migration_thread(void *opaque)
1676 {
1677 MigrationState *s = opaque;
1678 /* Used by the bandwidth calcs, updated later */
1679 int64_t initial_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1680 int64_t setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST);
1681 int64_t initial_bytes = 0;
1682 int64_t max_size = 0;
1683 int64_t start_time = initial_time;
1684 int64_t end_time;
1685 bool old_vm_running = false;
1686 bool entered_postcopy = false;
1687 /* The active state we expect to be in; ACTIVE or POSTCOPY_ACTIVE */
1688 enum MigrationStatus current_active_state = MIGRATION_STATUS_ACTIVE;
1689
1690 rcu_register_thread();
1691
1692 qemu_savevm_state_header(s->to_dst_file);
1693
1694 if (migrate_postcopy_ram()) {
1695 /* Now tell the dest that it should open its end so it can reply */
1696 qemu_savevm_send_open_return_path(s->to_dst_file);
1697
1698 /* And do a ping that will make stuff easier to debug */
1699 qemu_savevm_send_ping(s->to_dst_file, 1);
1700
1701 /*
1702 * Tell the destination that we *might* want to do postcopy later;
1703 * if the other end can't do postcopy it should fail now, nice and
1704 * early.
1705 */
1706 qemu_savevm_send_postcopy_advise(s->to_dst_file);
1707 }
1708
1709 qemu_savevm_state_begin(s->to_dst_file, &s->params);
1710
1711 s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start;
1712 current_active_state = MIGRATION_STATUS_ACTIVE;
1713 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
1714 MIGRATION_STATUS_ACTIVE);
1715
1716 trace_migration_thread_setup_complete();
1717
1718 while (s->state == MIGRATION_STATUS_ACTIVE ||
1719 s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
1720 int64_t current_time;
1721 uint64_t pending_size;
1722
1723 if (!qemu_file_rate_limit(s->to_dst_file)) {
1724 uint64_t pend_post, pend_nonpost;
1725
1726 qemu_savevm_state_pending(s->to_dst_file, max_size, &pend_nonpost,
1727 &pend_post);
1728 pending_size = pend_nonpost + pend_post;
1729 trace_migrate_pending(pending_size, max_size,
1730 pend_post, pend_nonpost);
1731 if (pending_size && pending_size >= max_size) {
1732 /* Still a significant amount to transfer */
1733
1734 if (migrate_postcopy_ram() &&
1735 s->state != MIGRATION_STATUS_POSTCOPY_ACTIVE &&
1736 pend_nonpost <= max_size &&
1737 atomic_read(&s->start_postcopy)) {
1738
1739 if (!postcopy_start(s, &old_vm_running)) {
1740 current_active_state = MIGRATION_STATUS_POSTCOPY_ACTIVE;
1741 entered_postcopy = true;
1742 }
1743
1744 continue;
1745 }
1746 /* Just another iteration step */
1747 qemu_savevm_state_iterate(s->to_dst_file, entered_postcopy);
1748 } else {
1749 trace_migration_thread_low_pending(pending_size);
1750 migration_completion(s, current_active_state,
1751 &old_vm_running, &start_time);
1752 break;
1753 }
1754 }
1755
1756 if (qemu_file_get_error(s->to_dst_file)) {
1757 migrate_set_state(&s->state, current_active_state,
1758 MIGRATION_STATUS_FAILED);
1759 trace_migration_thread_file_err();
1760 break;
1761 }
1762 current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1763 if (current_time >= initial_time + BUFFER_DELAY) {
1764 uint64_t transferred_bytes = qemu_ftell(s->to_dst_file) -
1765 initial_bytes;
1766 uint64_t time_spent = current_time - initial_time;
1767 double bandwidth = (double)transferred_bytes / time_spent;
1768 max_size = bandwidth * migrate_max_downtime() / 1000000;
1769
1770 s->mbps = (((double) transferred_bytes * 8.0) /
1771 ((double) time_spent / 1000.0)) / 1000.0 / 1000.0;
1772
1773 trace_migrate_transferred(transferred_bytes, time_spent,
1774 bandwidth, max_size);
1775 /* if we haven't sent anything, we don't want to recalculate
1776 10000 is a small enough number for our purposes */
1777 if (s->dirty_bytes_rate && transferred_bytes > 10000) {
1778 s->expected_downtime = s->dirty_bytes_rate / bandwidth;
1779 }
1780
1781 qemu_file_reset_rate_limit(s->to_dst_file);
1782 initial_time = current_time;
1783 initial_bytes = qemu_ftell(s->to_dst_file);
1784 }
1785 if (qemu_file_rate_limit(s->to_dst_file)) {
1786 /* usleep expects microseconds */
1787 g_usleep((initial_time + BUFFER_DELAY - current_time)*1000);
1788 }
1789 }
1790
1791 trace_migration_thread_after_loop();
1792 /* If we enabled cpu throttling for auto-converge, turn it off. */
1793 cpu_throttle_stop();
1794 end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1795
1796 qemu_mutex_lock_iothread();
1797 qemu_savevm_state_cleanup();
1798 if (s->state == MIGRATION_STATUS_COMPLETED) {
1799 uint64_t transferred_bytes = qemu_ftell(s->to_dst_file);
1800 s->total_time = end_time - s->total_time;
1801 if (!entered_postcopy) {
1802 s->downtime = end_time - start_time;
1803 }
1804 if (s->total_time) {
1805 s->mbps = (((double) transferred_bytes * 8.0) /
1806 ((double) s->total_time)) / 1000;
1807 }
1808 runstate_set(RUN_STATE_POSTMIGRATE);
1809 } else {
1810 if (old_vm_running && !entered_postcopy) {
1811 vm_start();
1812 }
1813 }
1814 qemu_bh_schedule(s->cleanup_bh);
1815 qemu_mutex_unlock_iothread();
1816
1817 rcu_unregister_thread();
1818 return NULL;
1819 }
1820
1821 void migrate_fd_connect(MigrationState *s)
1822 {
1823 /* This is a best 1st approximation. ns to ms */
1824 s->expected_downtime = max_downtime/1000000;
1825 s->cleanup_bh = qemu_bh_new(migrate_fd_cleanup, s);
1826
1827 qemu_file_set_blocking(s->to_dst_file, true);
1828 qemu_file_set_rate_limit(s->to_dst_file,
1829 s->bandwidth_limit / XFER_LIMIT_RATIO);
1830
1831 /* Notify before starting migration thread */
1832 notifier_list_notify(&migration_state_notifiers, s);
1833
1834 /*
1835 * Open the return path; currently for postcopy but other things might
1836 * also want it.
1837 */
1838 if (migrate_postcopy_ram()) {
1839 if (open_return_path_on_source(s)) {
1840 error_report("Unable to open return-path for postcopy");
1841 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
1842 MIGRATION_STATUS_FAILED);
1843 migrate_fd_cleanup(s);
1844 return;
1845 }
1846 }
1847
1848 migrate_compress_threads_create();
1849 qemu_thread_create(&s->thread, "migration", migration_thread, s,
1850 QEMU_THREAD_JOINABLE);
1851 s->migration_thread_running = true;
1852 }
1853
1854 PostcopyState postcopy_state_get(void)
1855 {
1856 return atomic_mb_read(&incoming_postcopy_state);
1857 }
1858
1859 /* Set the state and return the old state */
1860 PostcopyState postcopy_state_set(PostcopyState new_state)
1861 {
1862 return atomic_xchg(&incoming_postcopy_state, new_state);
1863 }
1864