qapi2texi: Fix up output around #optional
[qemu.git] / migration / block.c
1 /*
2 * QEMU live block migration
3 *
4 * Copyright IBM, Corp. 2009
5 *
6 * Authors:
7 * Liran Schour <lirans@il.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
14 */
15
16 #include "qemu/osdep.h"
17 #include "qapi/error.h"
18 #include "qemu-common.h"
19 #include "block/block.h"
20 #include "qemu/error-report.h"
21 #include "qemu/main-loop.h"
22 #include "hw/hw.h"
23 #include "qemu/cutils.h"
24 #include "qemu/queue.h"
25 #include "qemu/timer.h"
26 #include "migration/block.h"
27 #include "migration/migration.h"
28 #include "sysemu/blockdev.h"
29 #include "sysemu/block-backend.h"
30
31 #define BLOCK_SIZE (1 << 20)
32 #define BDRV_SECTORS_PER_DIRTY_CHUNK (BLOCK_SIZE >> BDRV_SECTOR_BITS)
33
34 #define BLK_MIG_FLAG_DEVICE_BLOCK 0x01
35 #define BLK_MIG_FLAG_EOS 0x02
36 #define BLK_MIG_FLAG_PROGRESS 0x04
37 #define BLK_MIG_FLAG_ZERO_BLOCK 0x08
38
39 #define MAX_IS_ALLOCATED_SEARCH 65536
40
41 #define MAX_INFLIGHT_IO 512
42
43 //#define DEBUG_BLK_MIGRATION
44
45 #ifdef DEBUG_BLK_MIGRATION
46 #define DPRINTF(fmt, ...) \
47 do { printf("blk_migration: " fmt, ## __VA_ARGS__); } while (0)
48 #else
49 #define DPRINTF(fmt, ...) \
50 do { } while (0)
51 #endif
52
53 typedef struct BlkMigDevState {
54 /* Written during setup phase. Can be read without a lock. */
55 BlockBackend *blk;
56 char *blk_name;
57 int shared_base;
58 int64_t total_sectors;
59 QSIMPLEQ_ENTRY(BlkMigDevState) entry;
60 Error *blocker;
61
62 /* Only used by migration thread. Does not need a lock. */
63 int bulk_completed;
64 int64_t cur_sector;
65 int64_t cur_dirty;
66
67 /* Data in the aio_bitmap is protected by block migration lock.
68 * Allocation and free happen during setup and cleanup respectively.
69 */
70 unsigned long *aio_bitmap;
71
72 /* Protected by block migration lock. */
73 int64_t completed_sectors;
74
75 /* During migration this is protected by iothread lock / AioContext.
76 * Allocation and free happen during setup and cleanup respectively.
77 */
78 BdrvDirtyBitmap *dirty_bitmap;
79 } BlkMigDevState;
80
81 typedef struct BlkMigBlock {
82 /* Only used by migration thread. */
83 uint8_t *buf;
84 BlkMigDevState *bmds;
85 int64_t sector;
86 int nr_sectors;
87 struct iovec iov;
88 QEMUIOVector qiov;
89 BlockAIOCB *aiocb;
90
91 /* Protected by block migration lock. */
92 int ret;
93 QSIMPLEQ_ENTRY(BlkMigBlock) entry;
94 } BlkMigBlock;
95
96 typedef struct BlkMigState {
97 /* Written during setup phase. Can be read without a lock. */
98 int blk_enable;
99 int shared_base;
100 QSIMPLEQ_HEAD(bmds_list, BlkMigDevState) bmds_list;
101 int64_t total_sector_sum;
102 bool zero_blocks;
103
104 /* Protected by lock. */
105 QSIMPLEQ_HEAD(blk_list, BlkMigBlock) blk_list;
106 int submitted;
107 int read_done;
108
109 /* Only used by migration thread. Does not need a lock. */
110 int transferred;
111 int prev_progress;
112 int bulk_completed;
113
114 /* Lock must be taken _inside_ the iothread lock and any AioContexts. */
115 QemuMutex lock;
116 } BlkMigState;
117
118 static BlkMigState block_mig_state;
119
120 static void blk_mig_lock(void)
121 {
122 qemu_mutex_lock(&block_mig_state.lock);
123 }
124
125 static void blk_mig_unlock(void)
126 {
127 qemu_mutex_unlock(&block_mig_state.lock);
128 }
129
130 /* Must run outside of the iothread lock during the bulk phase,
131 * or the VM will stall.
132 */
133
134 static void blk_send(QEMUFile *f, BlkMigBlock * blk)
135 {
136 int len;
137 uint64_t flags = BLK_MIG_FLAG_DEVICE_BLOCK;
138
139 if (block_mig_state.zero_blocks &&
140 buffer_is_zero(blk->buf, BLOCK_SIZE)) {
141 flags |= BLK_MIG_FLAG_ZERO_BLOCK;
142 }
143
144 /* sector number and flags */
145 qemu_put_be64(f, (blk->sector << BDRV_SECTOR_BITS)
146 | flags);
147
148 /* device name */
149 len = strlen(blk->bmds->blk_name);
150 qemu_put_byte(f, len);
151 qemu_put_buffer(f, (uint8_t *) blk->bmds->blk_name, len);
152
153 /* if a block is zero we need to flush here since the network
154 * bandwidth is now a lot higher than the storage device bandwidth.
155 * thus if we queue zero blocks we slow down the migration */
156 if (flags & BLK_MIG_FLAG_ZERO_BLOCK) {
157 qemu_fflush(f);
158 return;
159 }
160
161 qemu_put_buffer(f, blk->buf, BLOCK_SIZE);
162 }
163
164 int blk_mig_active(void)
165 {
166 return !QSIMPLEQ_EMPTY(&block_mig_state.bmds_list);
167 }
168
169 uint64_t blk_mig_bytes_transferred(void)
170 {
171 BlkMigDevState *bmds;
172 uint64_t sum = 0;
173
174 blk_mig_lock();
175 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
176 sum += bmds->completed_sectors;
177 }
178 blk_mig_unlock();
179 return sum << BDRV_SECTOR_BITS;
180 }
181
182 uint64_t blk_mig_bytes_remaining(void)
183 {
184 return blk_mig_bytes_total() - blk_mig_bytes_transferred();
185 }
186
187 uint64_t blk_mig_bytes_total(void)
188 {
189 BlkMigDevState *bmds;
190 uint64_t sum = 0;
191
192 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
193 sum += bmds->total_sectors;
194 }
195 return sum << BDRV_SECTOR_BITS;
196 }
197
198
199 /* Called with migration lock held. */
200
201 static int bmds_aio_inflight(BlkMigDevState *bmds, int64_t sector)
202 {
203 int64_t chunk = sector / (int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK;
204
205 if (sector < blk_nb_sectors(bmds->blk)) {
206 return !!(bmds->aio_bitmap[chunk / (sizeof(unsigned long) * 8)] &
207 (1UL << (chunk % (sizeof(unsigned long) * 8))));
208 } else {
209 return 0;
210 }
211 }
212
213 /* Called with migration lock held. */
214
215 static void bmds_set_aio_inflight(BlkMigDevState *bmds, int64_t sector_num,
216 int nb_sectors, int set)
217 {
218 int64_t start, end;
219 unsigned long val, idx, bit;
220
221 start = sector_num / BDRV_SECTORS_PER_DIRTY_CHUNK;
222 end = (sector_num + nb_sectors - 1) / BDRV_SECTORS_PER_DIRTY_CHUNK;
223
224 for (; start <= end; start++) {
225 idx = start / (sizeof(unsigned long) * 8);
226 bit = start % (sizeof(unsigned long) * 8);
227 val = bmds->aio_bitmap[idx];
228 if (set) {
229 val |= 1UL << bit;
230 } else {
231 val &= ~(1UL << bit);
232 }
233 bmds->aio_bitmap[idx] = val;
234 }
235 }
236
237 static void alloc_aio_bitmap(BlkMigDevState *bmds)
238 {
239 BlockBackend *bb = bmds->blk;
240 int64_t bitmap_size;
241
242 bitmap_size = blk_nb_sectors(bb) + BDRV_SECTORS_PER_DIRTY_CHUNK * 8 - 1;
243 bitmap_size /= BDRV_SECTORS_PER_DIRTY_CHUNK * 8;
244
245 bmds->aio_bitmap = g_malloc0(bitmap_size);
246 }
247
248 /* Never hold migration lock when yielding to the main loop! */
249
250 static void blk_mig_read_cb(void *opaque, int ret)
251 {
252 BlkMigBlock *blk = opaque;
253
254 blk_mig_lock();
255 blk->ret = ret;
256
257 QSIMPLEQ_INSERT_TAIL(&block_mig_state.blk_list, blk, entry);
258 bmds_set_aio_inflight(blk->bmds, blk->sector, blk->nr_sectors, 0);
259
260 block_mig_state.submitted--;
261 block_mig_state.read_done++;
262 assert(block_mig_state.submitted >= 0);
263 blk_mig_unlock();
264 }
265
266 /* Called with no lock taken. */
267
268 static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds)
269 {
270 int64_t total_sectors = bmds->total_sectors;
271 int64_t cur_sector = bmds->cur_sector;
272 BlockBackend *bb = bmds->blk;
273 BlkMigBlock *blk;
274 int nr_sectors;
275
276 if (bmds->shared_base) {
277 qemu_mutex_lock_iothread();
278 aio_context_acquire(blk_get_aio_context(bb));
279 /* Skip unallocated sectors; intentionally treats failure as
280 * an allocated sector */
281 while (cur_sector < total_sectors &&
282 !bdrv_is_allocated(blk_bs(bb), cur_sector,
283 MAX_IS_ALLOCATED_SEARCH, &nr_sectors)) {
284 cur_sector += nr_sectors;
285 }
286 aio_context_release(blk_get_aio_context(bb));
287 qemu_mutex_unlock_iothread();
288 }
289
290 if (cur_sector >= total_sectors) {
291 bmds->cur_sector = bmds->completed_sectors = total_sectors;
292 return 1;
293 }
294
295 bmds->completed_sectors = cur_sector;
296
297 cur_sector &= ~((int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK - 1);
298
299 /* we are going to transfer a full block even if it is not allocated */
300 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
301
302 if (total_sectors - cur_sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
303 nr_sectors = total_sectors - cur_sector;
304 }
305
306 blk = g_new(BlkMigBlock, 1);
307 blk->buf = g_malloc(BLOCK_SIZE);
308 blk->bmds = bmds;
309 blk->sector = cur_sector;
310 blk->nr_sectors = nr_sectors;
311
312 blk->iov.iov_base = blk->buf;
313 blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
314 qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
315
316 blk_mig_lock();
317 block_mig_state.submitted++;
318 blk_mig_unlock();
319
320 /* We do not know if bs is under the main thread (and thus does
321 * not acquire the AioContext when doing AIO) or rather under
322 * dataplane. Thus acquire both the iothread mutex and the
323 * AioContext.
324 *
325 * This is ugly and will disappear when we make bdrv_* thread-safe,
326 * without the need to acquire the AioContext.
327 */
328 qemu_mutex_lock_iothread();
329 aio_context_acquire(blk_get_aio_context(bmds->blk));
330 blk->aiocb = blk_aio_preadv(bb, cur_sector * BDRV_SECTOR_SIZE, &blk->qiov,
331 0, blk_mig_read_cb, blk);
332
333 bdrv_reset_dirty_bitmap(bmds->dirty_bitmap, cur_sector, nr_sectors);
334 aio_context_release(blk_get_aio_context(bmds->blk));
335 qemu_mutex_unlock_iothread();
336
337 bmds->cur_sector = cur_sector + nr_sectors;
338 return (bmds->cur_sector >= total_sectors);
339 }
340
341 /* Called with iothread lock taken. */
342
343 static int set_dirty_tracking(void)
344 {
345 BlkMigDevState *bmds;
346 int ret;
347
348 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
349 aio_context_acquire(blk_get_aio_context(bmds->blk));
350 bmds->dirty_bitmap = bdrv_create_dirty_bitmap(blk_bs(bmds->blk),
351 BLOCK_SIZE, NULL, NULL);
352 aio_context_release(blk_get_aio_context(bmds->blk));
353 if (!bmds->dirty_bitmap) {
354 ret = -errno;
355 goto fail;
356 }
357 }
358 return 0;
359
360 fail:
361 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
362 if (bmds->dirty_bitmap) {
363 aio_context_acquire(blk_get_aio_context(bmds->blk));
364 bdrv_release_dirty_bitmap(blk_bs(bmds->blk), bmds->dirty_bitmap);
365 aio_context_release(blk_get_aio_context(bmds->blk));
366 }
367 }
368 return ret;
369 }
370
371 /* Called with iothread lock taken. */
372
373 static void unset_dirty_tracking(void)
374 {
375 BlkMigDevState *bmds;
376
377 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
378 aio_context_acquire(blk_get_aio_context(bmds->blk));
379 bdrv_release_dirty_bitmap(blk_bs(bmds->blk), bmds->dirty_bitmap);
380 aio_context_release(blk_get_aio_context(bmds->blk));
381 }
382 }
383
384 static int init_blk_migration(QEMUFile *f)
385 {
386 BlockDriverState *bs;
387 BlkMigDevState *bmds;
388 int64_t sectors;
389 BdrvNextIterator it;
390 int i, num_bs = 0;
391 struct {
392 BlkMigDevState *bmds;
393 BlockDriverState *bs;
394 } *bmds_bs;
395 Error *local_err = NULL;
396 int ret;
397
398 block_mig_state.submitted = 0;
399 block_mig_state.read_done = 0;
400 block_mig_state.transferred = 0;
401 block_mig_state.total_sector_sum = 0;
402 block_mig_state.prev_progress = -1;
403 block_mig_state.bulk_completed = 0;
404 block_mig_state.zero_blocks = migrate_zero_blocks();
405
406 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
407 num_bs++;
408 }
409 bmds_bs = g_malloc0(num_bs * sizeof(*bmds_bs));
410
411 for (i = 0, bs = bdrv_first(&it); bs; bs = bdrv_next(&it), i++) {
412 if (bdrv_is_read_only(bs)) {
413 continue;
414 }
415
416 sectors = bdrv_nb_sectors(bs);
417 if (sectors <= 0) {
418 ret = sectors;
419 goto out;
420 }
421
422 bmds = g_new0(BlkMigDevState, 1);
423 bmds->blk = blk_new(BLK_PERM_CONSISTENT_READ, BLK_PERM_ALL);
424 bmds->blk_name = g_strdup(bdrv_get_device_name(bs));
425 bmds->bulk_completed = 0;
426 bmds->total_sectors = sectors;
427 bmds->completed_sectors = 0;
428 bmds->shared_base = block_mig_state.shared_base;
429
430 assert(i < num_bs);
431 bmds_bs[i].bmds = bmds;
432 bmds_bs[i].bs = bs;
433
434 block_mig_state.total_sector_sum += sectors;
435
436 if (bmds->shared_base) {
437 DPRINTF("Start migration for %s with shared base image\n",
438 bdrv_get_device_name(bs));
439 } else {
440 DPRINTF("Start full migration for %s\n", bdrv_get_device_name(bs));
441 }
442
443 QSIMPLEQ_INSERT_TAIL(&block_mig_state.bmds_list, bmds, entry);
444 }
445
446 /* Can only insert new BDSes now because doing so while iterating block
447 * devices may end up in a deadlock (iterating the new BDSes, too). */
448 for (i = 0; i < num_bs; i++) {
449 BlkMigDevState *bmds = bmds_bs[i].bmds;
450 BlockDriverState *bs = bmds_bs[i].bs;
451
452 if (bmds) {
453 ret = blk_insert_bs(bmds->blk, bs, &local_err);
454 if (ret < 0) {
455 error_report_err(local_err);
456 goto out;
457 }
458
459 alloc_aio_bitmap(bmds);
460 error_setg(&bmds->blocker, "block device is in use by migration");
461 bdrv_op_block_all(bs, bmds->blocker);
462 }
463 }
464
465 ret = 0;
466 out:
467 g_free(bmds_bs);
468 return ret;
469 }
470
471 /* Called with no lock taken. */
472
473 static int blk_mig_save_bulked_block(QEMUFile *f)
474 {
475 int64_t completed_sector_sum = 0;
476 BlkMigDevState *bmds;
477 int progress;
478 int ret = 0;
479
480 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
481 if (bmds->bulk_completed == 0) {
482 if (mig_save_device_bulk(f, bmds) == 1) {
483 /* completed bulk section for this device */
484 bmds->bulk_completed = 1;
485 }
486 completed_sector_sum += bmds->completed_sectors;
487 ret = 1;
488 break;
489 } else {
490 completed_sector_sum += bmds->completed_sectors;
491 }
492 }
493
494 if (block_mig_state.total_sector_sum != 0) {
495 progress = completed_sector_sum * 100 /
496 block_mig_state.total_sector_sum;
497 } else {
498 progress = 100;
499 }
500 if (progress != block_mig_state.prev_progress) {
501 block_mig_state.prev_progress = progress;
502 qemu_put_be64(f, (progress << BDRV_SECTOR_BITS)
503 | BLK_MIG_FLAG_PROGRESS);
504 DPRINTF("Completed %d %%\r", progress);
505 }
506
507 return ret;
508 }
509
510 static void blk_mig_reset_dirty_cursor(void)
511 {
512 BlkMigDevState *bmds;
513
514 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
515 bmds->cur_dirty = 0;
516 }
517 }
518
519 /* Called with iothread lock and AioContext taken. */
520
521 static int mig_save_device_dirty(QEMUFile *f, BlkMigDevState *bmds,
522 int is_async)
523 {
524 BlkMigBlock *blk;
525 BlockDriverState *bs = blk_bs(bmds->blk);
526 int64_t total_sectors = bmds->total_sectors;
527 int64_t sector;
528 int nr_sectors;
529 int ret = -EIO;
530
531 for (sector = bmds->cur_dirty; sector < bmds->total_sectors;) {
532 blk_mig_lock();
533 if (bmds_aio_inflight(bmds, sector)) {
534 blk_mig_unlock();
535 blk_drain(bmds->blk);
536 } else {
537 blk_mig_unlock();
538 }
539 if (bdrv_get_dirty(bs, bmds->dirty_bitmap, sector)) {
540
541 if (total_sectors - sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
542 nr_sectors = total_sectors - sector;
543 } else {
544 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
545 }
546 blk = g_new(BlkMigBlock, 1);
547 blk->buf = g_malloc(BLOCK_SIZE);
548 blk->bmds = bmds;
549 blk->sector = sector;
550 blk->nr_sectors = nr_sectors;
551
552 if (is_async) {
553 blk->iov.iov_base = blk->buf;
554 blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
555 qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
556
557 blk->aiocb = blk_aio_preadv(bmds->blk,
558 sector * BDRV_SECTOR_SIZE,
559 &blk->qiov, 0, blk_mig_read_cb,
560 blk);
561
562 blk_mig_lock();
563 block_mig_state.submitted++;
564 bmds_set_aio_inflight(bmds, sector, nr_sectors, 1);
565 blk_mig_unlock();
566 } else {
567 ret = blk_pread(bmds->blk, sector * BDRV_SECTOR_SIZE, blk->buf,
568 nr_sectors * BDRV_SECTOR_SIZE);
569 if (ret < 0) {
570 goto error;
571 }
572 blk_send(f, blk);
573
574 g_free(blk->buf);
575 g_free(blk);
576 }
577
578 bdrv_reset_dirty_bitmap(bmds->dirty_bitmap, sector, nr_sectors);
579 break;
580 }
581 sector += BDRV_SECTORS_PER_DIRTY_CHUNK;
582 bmds->cur_dirty = sector;
583 }
584
585 return (bmds->cur_dirty >= bmds->total_sectors);
586
587 error:
588 DPRINTF("Error reading sector %" PRId64 "\n", sector);
589 g_free(blk->buf);
590 g_free(blk);
591 return ret;
592 }
593
594 /* Called with iothread lock taken.
595 *
596 * return value:
597 * 0: too much data for max_downtime
598 * 1: few enough data for max_downtime
599 */
600 static int blk_mig_save_dirty_block(QEMUFile *f, int is_async)
601 {
602 BlkMigDevState *bmds;
603 int ret = 1;
604
605 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
606 aio_context_acquire(blk_get_aio_context(bmds->blk));
607 ret = mig_save_device_dirty(f, bmds, is_async);
608 aio_context_release(blk_get_aio_context(bmds->blk));
609 if (ret <= 0) {
610 break;
611 }
612 }
613
614 return ret;
615 }
616
617 /* Called with no locks taken. */
618
619 static int flush_blks(QEMUFile *f)
620 {
621 BlkMigBlock *blk;
622 int ret = 0;
623
624 DPRINTF("%s Enter submitted %d read_done %d transferred %d\n",
625 __FUNCTION__, block_mig_state.submitted, block_mig_state.read_done,
626 block_mig_state.transferred);
627
628 blk_mig_lock();
629 while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
630 if (qemu_file_rate_limit(f)) {
631 break;
632 }
633 if (blk->ret < 0) {
634 ret = blk->ret;
635 break;
636 }
637
638 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
639 blk_mig_unlock();
640 blk_send(f, blk);
641 blk_mig_lock();
642
643 g_free(blk->buf);
644 g_free(blk);
645
646 block_mig_state.read_done--;
647 block_mig_state.transferred++;
648 assert(block_mig_state.read_done >= 0);
649 }
650 blk_mig_unlock();
651
652 DPRINTF("%s Exit submitted %d read_done %d transferred %d\n", __FUNCTION__,
653 block_mig_state.submitted, block_mig_state.read_done,
654 block_mig_state.transferred);
655 return ret;
656 }
657
658 /* Called with iothread lock taken. */
659
660 static int64_t get_remaining_dirty(void)
661 {
662 BlkMigDevState *bmds;
663 int64_t dirty = 0;
664
665 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
666 aio_context_acquire(blk_get_aio_context(bmds->blk));
667 dirty += bdrv_get_dirty_count(bmds->dirty_bitmap);
668 aio_context_release(blk_get_aio_context(bmds->blk));
669 }
670
671 return dirty << BDRV_SECTOR_BITS;
672 }
673
674 /* Called with iothread lock taken. */
675
676 static void block_migration_cleanup(void *opaque)
677 {
678 BlkMigDevState *bmds;
679 BlkMigBlock *blk;
680 AioContext *ctx;
681
682 bdrv_drain_all();
683
684 unset_dirty_tracking();
685
686 while ((bmds = QSIMPLEQ_FIRST(&block_mig_state.bmds_list)) != NULL) {
687 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.bmds_list, entry);
688 bdrv_op_unblock_all(blk_bs(bmds->blk), bmds->blocker);
689 error_free(bmds->blocker);
690
691 /* Save ctx, because bmds->blk can disappear during blk_unref. */
692 ctx = blk_get_aio_context(bmds->blk);
693 aio_context_acquire(ctx);
694 blk_unref(bmds->blk);
695 aio_context_release(ctx);
696
697 g_free(bmds->blk_name);
698 g_free(bmds->aio_bitmap);
699 g_free(bmds);
700 }
701
702 blk_mig_lock();
703 while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
704 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
705 g_free(blk->buf);
706 g_free(blk);
707 }
708 blk_mig_unlock();
709 }
710
711 static int block_save_setup(QEMUFile *f, void *opaque)
712 {
713 int ret;
714
715 DPRINTF("Enter save live setup submitted %d transferred %d\n",
716 block_mig_state.submitted, block_mig_state.transferred);
717
718 qemu_mutex_lock_iothread();
719 ret = init_blk_migration(f);
720 if (ret < 0) {
721 qemu_mutex_unlock_iothread();
722 return ret;
723 }
724
725 /* start track dirty blocks */
726 ret = set_dirty_tracking();
727
728 qemu_mutex_unlock_iothread();
729
730 if (ret) {
731 return ret;
732 }
733
734 ret = flush_blks(f);
735 blk_mig_reset_dirty_cursor();
736 qemu_put_be64(f, BLK_MIG_FLAG_EOS);
737
738 return ret;
739 }
740
741 static int block_save_iterate(QEMUFile *f, void *opaque)
742 {
743 int ret;
744 int64_t last_ftell = qemu_ftell(f);
745 int64_t delta_ftell;
746
747 DPRINTF("Enter save live iterate submitted %d transferred %d\n",
748 block_mig_state.submitted, block_mig_state.transferred);
749
750 ret = flush_blks(f);
751 if (ret) {
752 return ret;
753 }
754
755 blk_mig_reset_dirty_cursor();
756
757 /* control the rate of transfer */
758 blk_mig_lock();
759 while ((block_mig_state.submitted +
760 block_mig_state.read_done) * BLOCK_SIZE <
761 qemu_file_get_rate_limit(f) &&
762 (block_mig_state.submitted +
763 block_mig_state.read_done) <
764 MAX_INFLIGHT_IO) {
765 blk_mig_unlock();
766 if (block_mig_state.bulk_completed == 0) {
767 /* first finish the bulk phase */
768 if (blk_mig_save_bulked_block(f) == 0) {
769 /* finished saving bulk on all devices */
770 block_mig_state.bulk_completed = 1;
771 }
772 ret = 0;
773 } else {
774 /* Always called with iothread lock taken for
775 * simplicity, block_save_complete also calls it.
776 */
777 qemu_mutex_lock_iothread();
778 ret = blk_mig_save_dirty_block(f, 1);
779 qemu_mutex_unlock_iothread();
780 }
781 if (ret < 0) {
782 return ret;
783 }
784 blk_mig_lock();
785 if (ret != 0) {
786 /* no more dirty blocks */
787 break;
788 }
789 }
790 blk_mig_unlock();
791
792 ret = flush_blks(f);
793 if (ret) {
794 return ret;
795 }
796
797 qemu_put_be64(f, BLK_MIG_FLAG_EOS);
798 delta_ftell = qemu_ftell(f) - last_ftell;
799 if (delta_ftell > 0) {
800 return 1;
801 } else if (delta_ftell < 0) {
802 return -1;
803 } else {
804 return 0;
805 }
806 }
807
808 /* Called with iothread lock taken. */
809
810 static int block_save_complete(QEMUFile *f, void *opaque)
811 {
812 int ret;
813
814 DPRINTF("Enter save live complete submitted %d transferred %d\n",
815 block_mig_state.submitted, block_mig_state.transferred);
816
817 ret = flush_blks(f);
818 if (ret) {
819 return ret;
820 }
821
822 blk_mig_reset_dirty_cursor();
823
824 /* we know for sure that save bulk is completed and
825 all async read completed */
826 blk_mig_lock();
827 assert(block_mig_state.submitted == 0);
828 blk_mig_unlock();
829
830 do {
831 ret = blk_mig_save_dirty_block(f, 0);
832 if (ret < 0) {
833 return ret;
834 }
835 } while (ret == 0);
836
837 /* report completion */
838 qemu_put_be64(f, (100 << BDRV_SECTOR_BITS) | BLK_MIG_FLAG_PROGRESS);
839
840 DPRINTF("Block migration completed\n");
841
842 qemu_put_be64(f, BLK_MIG_FLAG_EOS);
843
844 return 0;
845 }
846
847 static void block_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
848 uint64_t *non_postcopiable_pending,
849 uint64_t *postcopiable_pending)
850 {
851 /* Estimate pending number of bytes to send */
852 uint64_t pending;
853
854 qemu_mutex_lock_iothread();
855 pending = get_remaining_dirty();
856 qemu_mutex_unlock_iothread();
857
858 blk_mig_lock();
859 pending += block_mig_state.submitted * BLOCK_SIZE +
860 block_mig_state.read_done * BLOCK_SIZE;
861 blk_mig_unlock();
862
863 /* Report at least one block pending during bulk phase */
864 if (pending <= max_size && !block_mig_state.bulk_completed) {
865 pending = max_size + BLOCK_SIZE;
866 }
867
868 DPRINTF("Enter save live pending %" PRIu64 "\n", pending);
869 /* We don't do postcopy */
870 *non_postcopiable_pending += pending;
871 }
872
873 static int block_load(QEMUFile *f, void *opaque, int version_id)
874 {
875 static int banner_printed;
876 int len, flags;
877 char device_name[256];
878 int64_t addr;
879 BlockBackend *blk, *blk_prev = NULL;;
880 Error *local_err = NULL;
881 uint8_t *buf;
882 int64_t total_sectors = 0;
883 int nr_sectors;
884 int ret;
885
886 do {
887 addr = qemu_get_be64(f);
888
889 flags = addr & ~BDRV_SECTOR_MASK;
890 addr >>= BDRV_SECTOR_BITS;
891
892 if (flags & BLK_MIG_FLAG_DEVICE_BLOCK) {
893 /* get device name */
894 len = qemu_get_byte(f);
895 qemu_get_buffer(f, (uint8_t *)device_name, len);
896 device_name[len] = '\0';
897
898 blk = blk_by_name(device_name);
899 if (!blk) {
900 fprintf(stderr, "Error unknown block device %s\n",
901 device_name);
902 return -EINVAL;
903 }
904
905 if (blk != blk_prev) {
906 blk_prev = blk;
907 total_sectors = blk_nb_sectors(blk);
908 if (total_sectors <= 0) {
909 error_report("Error getting length of block device %s",
910 device_name);
911 return -EINVAL;
912 }
913
914 blk_invalidate_cache(blk, &local_err);
915 if (local_err) {
916 error_report_err(local_err);
917 return -EINVAL;
918 }
919 }
920
921 if (total_sectors - addr < BDRV_SECTORS_PER_DIRTY_CHUNK) {
922 nr_sectors = total_sectors - addr;
923 } else {
924 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
925 }
926
927 if (flags & BLK_MIG_FLAG_ZERO_BLOCK) {
928 ret = blk_pwrite_zeroes(blk, addr * BDRV_SECTOR_SIZE,
929 nr_sectors * BDRV_SECTOR_SIZE,
930 BDRV_REQ_MAY_UNMAP);
931 } else {
932 buf = g_malloc(BLOCK_SIZE);
933 qemu_get_buffer(f, buf, BLOCK_SIZE);
934 ret = blk_pwrite(blk, addr * BDRV_SECTOR_SIZE, buf,
935 nr_sectors * BDRV_SECTOR_SIZE, 0);
936 g_free(buf);
937 }
938
939 if (ret < 0) {
940 return ret;
941 }
942 } else if (flags & BLK_MIG_FLAG_PROGRESS) {
943 if (!banner_printed) {
944 printf("Receiving block device images\n");
945 banner_printed = 1;
946 }
947 printf("Completed %d %%%c", (int)addr,
948 (addr == 100) ? '\n' : '\r');
949 fflush(stdout);
950 } else if (!(flags & BLK_MIG_FLAG_EOS)) {
951 fprintf(stderr, "Unknown block migration flags: %#x\n", flags);
952 return -EINVAL;
953 }
954 ret = qemu_file_get_error(f);
955 if (ret != 0) {
956 return ret;
957 }
958 } while (!(flags & BLK_MIG_FLAG_EOS));
959
960 return 0;
961 }
962
963 static void block_set_params(const MigrationParams *params, void *opaque)
964 {
965 block_mig_state.blk_enable = params->blk;
966 block_mig_state.shared_base = params->shared;
967
968 /* shared base means that blk_enable = 1 */
969 block_mig_state.blk_enable |= params->shared;
970 }
971
972 static bool block_is_active(void *opaque)
973 {
974 return block_mig_state.blk_enable == 1;
975 }
976
977 static SaveVMHandlers savevm_block_handlers = {
978 .set_params = block_set_params,
979 .save_live_setup = block_save_setup,
980 .save_live_iterate = block_save_iterate,
981 .save_live_complete_precopy = block_save_complete,
982 .save_live_pending = block_save_pending,
983 .load_state = block_load,
984 .cleanup = block_migration_cleanup,
985 .is_active = block_is_active,
986 };
987
988 void blk_mig_init(void)
989 {
990 QSIMPLEQ_INIT(&block_mig_state.bmds_list);
991 QSIMPLEQ_INIT(&block_mig_state.blk_list);
992 qemu_mutex_init(&block_mig_state.lock);
993
994 register_savevm_live(NULL, "block", 0, 1, &savevm_block_handlers,
995 &block_mig_state);
996 }