Merge remote-tracking branch 'remotes/armbru/tags/pull-qapi-2017-03-16' into staging
[qemu.git] / block / mirror.c
1 /*
2 * Image mirroring
3 *
4 * Copyright Red Hat, Inc. 2012
5 *
6 * Authors:
7 * Paolo Bonzini <pbonzini@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
10 * See the COPYING.LIB file in the top-level directory.
11 *
12 */
13
14 #include "qemu/osdep.h"
15 #include "qemu/cutils.h"
16 #include "trace.h"
17 #include "block/blockjob_int.h"
18 #include "block/block_int.h"
19 #include "sysemu/block-backend.h"
20 #include "qapi/error.h"
21 #include "qapi/qmp/qerror.h"
22 #include "qemu/ratelimit.h"
23 #include "qemu/bitmap.h"
24
25 #define SLICE_TIME 100000000ULL /* ns */
26 #define MAX_IN_FLIGHT 16
27 #define MAX_IO_SECTORS ((1 << 20) >> BDRV_SECTOR_BITS) /* 1 Mb */
28 #define DEFAULT_MIRROR_BUF_SIZE \
29 (MAX_IN_FLIGHT * MAX_IO_SECTORS * BDRV_SECTOR_SIZE)
30
31 /* The mirroring buffer is a list of granularity-sized chunks.
32 * Free chunks are organized in a list.
33 */
34 typedef struct MirrorBuffer {
35 QSIMPLEQ_ENTRY(MirrorBuffer) next;
36 } MirrorBuffer;
37
38 typedef struct MirrorBlockJob {
39 BlockJob common;
40 RateLimit limit;
41 BlockBackend *target;
42 BlockDriverState *mirror_top_bs;
43 BlockDriverState *source;
44 BlockDriverState *base;
45
46 /* The name of the graph node to replace */
47 char *replaces;
48 /* The BDS to replace */
49 BlockDriverState *to_replace;
50 /* Used to block operations on the drive-mirror-replace target */
51 Error *replace_blocker;
52 bool is_none_mode;
53 BlockMirrorBackingMode backing_mode;
54 BlockdevOnError on_source_error, on_target_error;
55 bool synced;
56 bool should_complete;
57 int64_t granularity;
58 size_t buf_size;
59 int64_t bdev_length;
60 unsigned long *cow_bitmap;
61 BdrvDirtyBitmap *dirty_bitmap;
62 BdrvDirtyBitmapIter *dbi;
63 uint8_t *buf;
64 QSIMPLEQ_HEAD(, MirrorBuffer) buf_free;
65 int buf_free_count;
66
67 uint64_t last_pause_ns;
68 unsigned long *in_flight_bitmap;
69 int in_flight;
70 int64_t sectors_in_flight;
71 int ret;
72 bool unmap;
73 bool waiting_for_io;
74 int target_cluster_sectors;
75 int max_iov;
76 bool initial_zeroing_ongoing;
77 } MirrorBlockJob;
78
79 typedef struct MirrorOp {
80 MirrorBlockJob *s;
81 QEMUIOVector qiov;
82 int64_t sector_num;
83 int nb_sectors;
84 } MirrorOp;
85
86 static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read,
87 int error)
88 {
89 s->synced = false;
90 if (read) {
91 return block_job_error_action(&s->common, s->on_source_error,
92 true, error);
93 } else {
94 return block_job_error_action(&s->common, s->on_target_error,
95 false, error);
96 }
97 }
98
99 static void mirror_iteration_done(MirrorOp *op, int ret)
100 {
101 MirrorBlockJob *s = op->s;
102 struct iovec *iov;
103 int64_t chunk_num;
104 int i, nb_chunks, sectors_per_chunk;
105
106 trace_mirror_iteration_done(s, op->sector_num, op->nb_sectors, ret);
107
108 s->in_flight--;
109 s->sectors_in_flight -= op->nb_sectors;
110 iov = op->qiov.iov;
111 for (i = 0; i < op->qiov.niov; i++) {
112 MirrorBuffer *buf = (MirrorBuffer *) iov[i].iov_base;
113 QSIMPLEQ_INSERT_TAIL(&s->buf_free, buf, next);
114 s->buf_free_count++;
115 }
116
117 sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
118 chunk_num = op->sector_num / sectors_per_chunk;
119 nb_chunks = DIV_ROUND_UP(op->nb_sectors, sectors_per_chunk);
120 bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks);
121 if (ret >= 0) {
122 if (s->cow_bitmap) {
123 bitmap_set(s->cow_bitmap, chunk_num, nb_chunks);
124 }
125 if (!s->initial_zeroing_ongoing) {
126 s->common.offset += (uint64_t)op->nb_sectors * BDRV_SECTOR_SIZE;
127 }
128 }
129 qemu_iovec_destroy(&op->qiov);
130 g_free(op);
131
132 if (s->waiting_for_io) {
133 qemu_coroutine_enter(s->common.co);
134 }
135 }
136
137 static void mirror_write_complete(void *opaque, int ret)
138 {
139 MirrorOp *op = opaque;
140 MirrorBlockJob *s = op->s;
141
142 aio_context_acquire(blk_get_aio_context(s->common.blk));
143 if (ret < 0) {
144 BlockErrorAction action;
145
146 bdrv_set_dirty_bitmap(s->dirty_bitmap, op->sector_num, op->nb_sectors);
147 action = mirror_error_action(s, false, -ret);
148 if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
149 s->ret = ret;
150 }
151 }
152 mirror_iteration_done(op, ret);
153 aio_context_release(blk_get_aio_context(s->common.blk));
154 }
155
156 static void mirror_read_complete(void *opaque, int ret)
157 {
158 MirrorOp *op = opaque;
159 MirrorBlockJob *s = op->s;
160
161 aio_context_acquire(blk_get_aio_context(s->common.blk));
162 if (ret < 0) {
163 BlockErrorAction action;
164
165 bdrv_set_dirty_bitmap(s->dirty_bitmap, op->sector_num, op->nb_sectors);
166 action = mirror_error_action(s, true, -ret);
167 if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
168 s->ret = ret;
169 }
170
171 mirror_iteration_done(op, ret);
172 } else {
173 blk_aio_pwritev(s->target, op->sector_num * BDRV_SECTOR_SIZE, &op->qiov,
174 0, mirror_write_complete, op);
175 }
176 aio_context_release(blk_get_aio_context(s->common.blk));
177 }
178
179 static inline void mirror_clip_sectors(MirrorBlockJob *s,
180 int64_t sector_num,
181 int *nb_sectors)
182 {
183 *nb_sectors = MIN(*nb_sectors,
184 s->bdev_length / BDRV_SECTOR_SIZE - sector_num);
185 }
186
187 /* Round sector_num and/or nb_sectors to target cluster if COW is needed, and
188 * return the offset of the adjusted tail sector against original. */
189 static int mirror_cow_align(MirrorBlockJob *s,
190 int64_t *sector_num,
191 int *nb_sectors)
192 {
193 bool need_cow;
194 int ret = 0;
195 int chunk_sectors = s->granularity >> BDRV_SECTOR_BITS;
196 int64_t align_sector_num = *sector_num;
197 int align_nb_sectors = *nb_sectors;
198 int max_sectors = chunk_sectors * s->max_iov;
199
200 need_cow = !test_bit(*sector_num / chunk_sectors, s->cow_bitmap);
201 need_cow |= !test_bit((*sector_num + *nb_sectors - 1) / chunk_sectors,
202 s->cow_bitmap);
203 if (need_cow) {
204 bdrv_round_sectors_to_clusters(blk_bs(s->target), *sector_num,
205 *nb_sectors, &align_sector_num,
206 &align_nb_sectors);
207 }
208
209 if (align_nb_sectors > max_sectors) {
210 align_nb_sectors = max_sectors;
211 if (need_cow) {
212 align_nb_sectors = QEMU_ALIGN_DOWN(align_nb_sectors,
213 s->target_cluster_sectors);
214 }
215 }
216 /* Clipping may result in align_nb_sectors unaligned to chunk boundary, but
217 * that doesn't matter because it's already the end of source image. */
218 mirror_clip_sectors(s, align_sector_num, &align_nb_sectors);
219
220 ret = align_sector_num + align_nb_sectors - (*sector_num + *nb_sectors);
221 *sector_num = align_sector_num;
222 *nb_sectors = align_nb_sectors;
223 assert(ret >= 0);
224 return ret;
225 }
226
227 static inline void mirror_wait_for_io(MirrorBlockJob *s)
228 {
229 assert(!s->waiting_for_io);
230 s->waiting_for_io = true;
231 qemu_coroutine_yield();
232 s->waiting_for_io = false;
233 }
234
235 /* Submit async read while handling COW.
236 * Returns: The number of sectors copied after and including sector_num,
237 * excluding any sectors copied prior to sector_num due to alignment.
238 * This will be nb_sectors if no alignment is necessary, or
239 * (new_end - sector_num) if tail is rounded up or down due to
240 * alignment or buffer limit.
241 */
242 static int mirror_do_read(MirrorBlockJob *s, int64_t sector_num,
243 int nb_sectors)
244 {
245 BlockBackend *source = s->common.blk;
246 int sectors_per_chunk, nb_chunks;
247 int ret;
248 MirrorOp *op;
249 int max_sectors;
250
251 sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
252 max_sectors = sectors_per_chunk * s->max_iov;
253
254 /* We can only handle as much as buf_size at a time. */
255 nb_sectors = MIN(s->buf_size >> BDRV_SECTOR_BITS, nb_sectors);
256 nb_sectors = MIN(max_sectors, nb_sectors);
257 assert(nb_sectors);
258 ret = nb_sectors;
259
260 if (s->cow_bitmap) {
261 ret += mirror_cow_align(s, &sector_num, &nb_sectors);
262 }
263 assert(nb_sectors << BDRV_SECTOR_BITS <= s->buf_size);
264 /* The sector range must meet granularity because:
265 * 1) Caller passes in aligned values;
266 * 2) mirror_cow_align is used only when target cluster is larger. */
267 assert(!(sector_num % sectors_per_chunk));
268 nb_chunks = DIV_ROUND_UP(nb_sectors, sectors_per_chunk);
269
270 while (s->buf_free_count < nb_chunks) {
271 trace_mirror_yield_in_flight(s, sector_num, s->in_flight);
272 mirror_wait_for_io(s);
273 }
274
275 /* Allocate a MirrorOp that is used as an AIO callback. */
276 op = g_new(MirrorOp, 1);
277 op->s = s;
278 op->sector_num = sector_num;
279 op->nb_sectors = nb_sectors;
280
281 /* Now make a QEMUIOVector taking enough granularity-sized chunks
282 * from s->buf_free.
283 */
284 qemu_iovec_init(&op->qiov, nb_chunks);
285 while (nb_chunks-- > 0) {
286 MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free);
287 size_t remaining = nb_sectors * BDRV_SECTOR_SIZE - op->qiov.size;
288
289 QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next);
290 s->buf_free_count--;
291 qemu_iovec_add(&op->qiov, buf, MIN(s->granularity, remaining));
292 }
293
294 /* Copy the dirty cluster. */
295 s->in_flight++;
296 s->sectors_in_flight += nb_sectors;
297 trace_mirror_one_iteration(s, sector_num, nb_sectors);
298
299 blk_aio_preadv(source, sector_num * BDRV_SECTOR_SIZE, &op->qiov, 0,
300 mirror_read_complete, op);
301 return ret;
302 }
303
304 static void mirror_do_zero_or_discard(MirrorBlockJob *s,
305 int64_t sector_num,
306 int nb_sectors,
307 bool is_discard)
308 {
309 MirrorOp *op;
310
311 /* Allocate a MirrorOp that is used as an AIO callback. The qiov is zeroed
312 * so the freeing in mirror_iteration_done is nop. */
313 op = g_new0(MirrorOp, 1);
314 op->s = s;
315 op->sector_num = sector_num;
316 op->nb_sectors = nb_sectors;
317
318 s->in_flight++;
319 s->sectors_in_flight += nb_sectors;
320 if (is_discard) {
321 blk_aio_pdiscard(s->target, sector_num << BDRV_SECTOR_BITS,
322 op->nb_sectors << BDRV_SECTOR_BITS,
323 mirror_write_complete, op);
324 } else {
325 blk_aio_pwrite_zeroes(s->target, sector_num * BDRV_SECTOR_SIZE,
326 op->nb_sectors * BDRV_SECTOR_SIZE,
327 s->unmap ? BDRV_REQ_MAY_UNMAP : 0,
328 mirror_write_complete, op);
329 }
330 }
331
332 static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s)
333 {
334 BlockDriverState *source = s->source;
335 int64_t sector_num, first_chunk;
336 uint64_t delay_ns = 0;
337 /* At least the first dirty chunk is mirrored in one iteration. */
338 int nb_chunks = 1;
339 int64_t end = s->bdev_length / BDRV_SECTOR_SIZE;
340 int sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
341 bool write_zeroes_ok = bdrv_can_write_zeroes_with_unmap(blk_bs(s->target));
342 int max_io_sectors = MAX((s->buf_size >> BDRV_SECTOR_BITS) / MAX_IN_FLIGHT,
343 MAX_IO_SECTORS);
344
345 sector_num = bdrv_dirty_iter_next(s->dbi);
346 if (sector_num < 0) {
347 bdrv_set_dirty_iter(s->dbi, 0);
348 sector_num = bdrv_dirty_iter_next(s->dbi);
349 trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap));
350 assert(sector_num >= 0);
351 }
352
353 first_chunk = sector_num / sectors_per_chunk;
354 while (test_bit(first_chunk, s->in_flight_bitmap)) {
355 trace_mirror_yield_in_flight(s, sector_num, s->in_flight);
356 mirror_wait_for_io(s);
357 }
358
359 block_job_pause_point(&s->common);
360
361 /* Find the number of consective dirty chunks following the first dirty
362 * one, and wait for in flight requests in them. */
363 while (nb_chunks * sectors_per_chunk < (s->buf_size >> BDRV_SECTOR_BITS)) {
364 int64_t next_dirty;
365 int64_t next_sector = sector_num + nb_chunks * sectors_per_chunk;
366 int64_t next_chunk = next_sector / sectors_per_chunk;
367 if (next_sector >= end ||
368 !bdrv_get_dirty(source, s->dirty_bitmap, next_sector)) {
369 break;
370 }
371 if (test_bit(next_chunk, s->in_flight_bitmap)) {
372 break;
373 }
374
375 next_dirty = bdrv_dirty_iter_next(s->dbi);
376 if (next_dirty > next_sector || next_dirty < 0) {
377 /* The bitmap iterator's cache is stale, refresh it */
378 bdrv_set_dirty_iter(s->dbi, next_sector);
379 next_dirty = bdrv_dirty_iter_next(s->dbi);
380 }
381 assert(next_dirty == next_sector);
382 nb_chunks++;
383 }
384
385 /* Clear dirty bits before querying the block status, because
386 * calling bdrv_get_block_status_above could yield - if some blocks are
387 * marked dirty in this window, we need to know.
388 */
389 bdrv_reset_dirty_bitmap(s->dirty_bitmap, sector_num,
390 nb_chunks * sectors_per_chunk);
391 bitmap_set(s->in_flight_bitmap, sector_num / sectors_per_chunk, nb_chunks);
392 while (nb_chunks > 0 && sector_num < end) {
393 int64_t ret;
394 int io_sectors, io_sectors_acct;
395 BlockDriverState *file;
396 enum MirrorMethod {
397 MIRROR_METHOD_COPY,
398 MIRROR_METHOD_ZERO,
399 MIRROR_METHOD_DISCARD
400 } mirror_method = MIRROR_METHOD_COPY;
401
402 assert(!(sector_num % sectors_per_chunk));
403 ret = bdrv_get_block_status_above(source, NULL, sector_num,
404 nb_chunks * sectors_per_chunk,
405 &io_sectors, &file);
406 if (ret < 0) {
407 io_sectors = MIN(nb_chunks * sectors_per_chunk, max_io_sectors);
408 } else if (ret & BDRV_BLOCK_DATA) {
409 io_sectors = MIN(io_sectors, max_io_sectors);
410 }
411
412 io_sectors -= io_sectors % sectors_per_chunk;
413 if (io_sectors < sectors_per_chunk) {
414 io_sectors = sectors_per_chunk;
415 } else if (ret >= 0 && !(ret & BDRV_BLOCK_DATA)) {
416 int64_t target_sector_num;
417 int target_nb_sectors;
418 bdrv_round_sectors_to_clusters(blk_bs(s->target), sector_num,
419 io_sectors, &target_sector_num,
420 &target_nb_sectors);
421 if (target_sector_num == sector_num &&
422 target_nb_sectors == io_sectors) {
423 mirror_method = ret & BDRV_BLOCK_ZERO ?
424 MIRROR_METHOD_ZERO :
425 MIRROR_METHOD_DISCARD;
426 }
427 }
428
429 while (s->in_flight >= MAX_IN_FLIGHT) {
430 trace_mirror_yield_in_flight(s, sector_num, s->in_flight);
431 mirror_wait_for_io(s);
432 }
433
434 if (s->ret < 0) {
435 return 0;
436 }
437
438 mirror_clip_sectors(s, sector_num, &io_sectors);
439 switch (mirror_method) {
440 case MIRROR_METHOD_COPY:
441 io_sectors = mirror_do_read(s, sector_num, io_sectors);
442 io_sectors_acct = io_sectors;
443 break;
444 case MIRROR_METHOD_ZERO:
445 case MIRROR_METHOD_DISCARD:
446 mirror_do_zero_or_discard(s, sector_num, io_sectors,
447 mirror_method == MIRROR_METHOD_DISCARD);
448 if (write_zeroes_ok) {
449 io_sectors_acct = 0;
450 } else {
451 io_sectors_acct = io_sectors;
452 }
453 break;
454 default:
455 abort();
456 }
457 assert(io_sectors);
458 sector_num += io_sectors;
459 nb_chunks -= DIV_ROUND_UP(io_sectors, sectors_per_chunk);
460 if (s->common.speed) {
461 delay_ns = ratelimit_calculate_delay(&s->limit, io_sectors_acct);
462 }
463 }
464 return delay_ns;
465 }
466
467 static void mirror_free_init(MirrorBlockJob *s)
468 {
469 int granularity = s->granularity;
470 size_t buf_size = s->buf_size;
471 uint8_t *buf = s->buf;
472
473 assert(s->buf_free_count == 0);
474 QSIMPLEQ_INIT(&s->buf_free);
475 while (buf_size != 0) {
476 MirrorBuffer *cur = (MirrorBuffer *)buf;
477 QSIMPLEQ_INSERT_TAIL(&s->buf_free, cur, next);
478 s->buf_free_count++;
479 buf_size -= granularity;
480 buf += granularity;
481 }
482 }
483
484 /* This is also used for the .pause callback. There is no matching
485 * mirror_resume() because mirror_run() will begin iterating again
486 * when the job is resumed.
487 */
488 static void mirror_wait_for_all_io(MirrorBlockJob *s)
489 {
490 while (s->in_flight > 0) {
491 mirror_wait_for_io(s);
492 }
493 }
494
495 typedef struct {
496 int ret;
497 } MirrorExitData;
498
499 static void mirror_exit(BlockJob *job, void *opaque)
500 {
501 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
502 MirrorExitData *data = opaque;
503 AioContext *replace_aio_context = NULL;
504 BlockDriverState *src = s->source;
505 BlockDriverState *target_bs = blk_bs(s->target);
506 BlockDriverState *mirror_top_bs = s->mirror_top_bs;
507 Error *local_err = NULL;
508
509 /* Make sure that the source BDS doesn't go away before we called
510 * block_job_completed(). */
511 bdrv_ref(src);
512 bdrv_ref(mirror_top_bs);
513 bdrv_ref(target_bs);
514
515 /* Remove target parent that still uses BLK_PERM_WRITE/RESIZE before
516 * inserting target_bs at s->to_replace, where we might not be able to get
517 * these permissions. */
518 blk_unref(s->target);
519 s->target = NULL;
520
521 /* We don't access the source any more. Dropping any WRITE/RESIZE is
522 * required before it could become a backing file of target_bs. */
523 bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL,
524 &error_abort);
525 if (s->backing_mode == MIRROR_SOURCE_BACKING_CHAIN) {
526 BlockDriverState *backing = s->is_none_mode ? src : s->base;
527 if (backing_bs(target_bs) != backing) {
528 bdrv_set_backing_hd(target_bs, backing, &local_err);
529 if (local_err) {
530 error_report_err(local_err);
531 data->ret = -EPERM;
532 }
533 }
534 }
535
536 if (s->to_replace) {
537 replace_aio_context = bdrv_get_aio_context(s->to_replace);
538 aio_context_acquire(replace_aio_context);
539 }
540
541 if (s->should_complete && data->ret == 0) {
542 BlockDriverState *to_replace = src;
543 if (s->to_replace) {
544 to_replace = s->to_replace;
545 }
546
547 if (bdrv_get_flags(target_bs) != bdrv_get_flags(to_replace)) {
548 bdrv_reopen(target_bs, bdrv_get_flags(to_replace), NULL);
549 }
550
551 /* The mirror job has no requests in flight any more, but we need to
552 * drain potential other users of the BDS before changing the graph. */
553 bdrv_drained_begin(target_bs);
554 bdrv_replace_node(to_replace, target_bs, &local_err);
555 bdrv_drained_end(target_bs);
556 if (local_err) {
557 error_report_err(local_err);
558 data->ret = -EPERM;
559 }
560 }
561 if (s->to_replace) {
562 bdrv_op_unblock_all(s->to_replace, s->replace_blocker);
563 error_free(s->replace_blocker);
564 bdrv_unref(s->to_replace);
565 }
566 if (replace_aio_context) {
567 aio_context_release(replace_aio_context);
568 }
569 g_free(s->replaces);
570 bdrv_unref(target_bs);
571
572 /* Remove the mirror filter driver from the graph. Before this, get rid of
573 * the blockers on the intermediate nodes so that the resulting state is
574 * valid. Also give up permissions on mirror_top_bs->backing, which might
575 * block the removal. */
576 block_job_remove_all_bdrv(job);
577 bdrv_child_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL);
578 bdrv_replace_node(mirror_top_bs, backing_bs(mirror_top_bs), &error_abort);
579
580 /* We just changed the BDS the job BB refers to (with either or both of the
581 * bdrv_replace_node() calls), so switch the BB back so the cleanup does
582 * the right thing. We don't need any permissions any more now. */
583 blk_remove_bs(job->blk);
584 blk_set_perm(job->blk, 0, BLK_PERM_ALL, &error_abort);
585 blk_insert_bs(job->blk, mirror_top_bs, &error_abort);
586
587 block_job_completed(&s->common, data->ret);
588
589 g_free(data);
590 bdrv_drained_end(src);
591 bdrv_unref(mirror_top_bs);
592 bdrv_unref(src);
593 }
594
595 static void mirror_throttle(MirrorBlockJob *s)
596 {
597 int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
598
599 if (now - s->last_pause_ns > SLICE_TIME) {
600 s->last_pause_ns = now;
601 block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, 0);
602 } else {
603 block_job_pause_point(&s->common);
604 }
605 }
606
607 static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s)
608 {
609 int64_t sector_num, end;
610 BlockDriverState *base = s->base;
611 BlockDriverState *bs = s->source;
612 BlockDriverState *target_bs = blk_bs(s->target);
613 int ret, n;
614
615 end = s->bdev_length / BDRV_SECTOR_SIZE;
616
617 if (base == NULL && !bdrv_has_zero_init(target_bs)) {
618 if (!bdrv_can_write_zeroes_with_unmap(target_bs)) {
619 bdrv_set_dirty_bitmap(s->dirty_bitmap, 0, end);
620 return 0;
621 }
622
623 s->initial_zeroing_ongoing = true;
624 for (sector_num = 0; sector_num < end; ) {
625 int nb_sectors = MIN(end - sector_num,
626 QEMU_ALIGN_DOWN(INT_MAX, s->granularity) >> BDRV_SECTOR_BITS);
627
628 mirror_throttle(s);
629
630 if (block_job_is_cancelled(&s->common)) {
631 s->initial_zeroing_ongoing = false;
632 return 0;
633 }
634
635 if (s->in_flight >= MAX_IN_FLIGHT) {
636 trace_mirror_yield(s, s->in_flight, s->buf_free_count, -1);
637 mirror_wait_for_io(s);
638 continue;
639 }
640
641 mirror_do_zero_or_discard(s, sector_num, nb_sectors, false);
642 sector_num += nb_sectors;
643 }
644
645 mirror_wait_for_all_io(s);
646 s->initial_zeroing_ongoing = false;
647 }
648
649 /* First part, loop on the sectors and initialize the dirty bitmap. */
650 for (sector_num = 0; sector_num < end; ) {
651 /* Just to make sure we are not exceeding int limit. */
652 int nb_sectors = MIN(INT_MAX >> BDRV_SECTOR_BITS,
653 end - sector_num);
654
655 mirror_throttle(s);
656
657 if (block_job_is_cancelled(&s->common)) {
658 return 0;
659 }
660
661 ret = bdrv_is_allocated_above(bs, base, sector_num, nb_sectors, &n);
662 if (ret < 0) {
663 return ret;
664 }
665
666 assert(n > 0);
667 if (ret == 1) {
668 bdrv_set_dirty_bitmap(s->dirty_bitmap, sector_num, n);
669 }
670 sector_num += n;
671 }
672 return 0;
673 }
674
675 /* Called when going out of the streaming phase to flush the bulk of the
676 * data to the medium, or just before completing.
677 */
678 static int mirror_flush(MirrorBlockJob *s)
679 {
680 int ret = blk_flush(s->target);
681 if (ret < 0) {
682 if (mirror_error_action(s, false, -ret) == BLOCK_ERROR_ACTION_REPORT) {
683 s->ret = ret;
684 }
685 }
686 return ret;
687 }
688
689 static void coroutine_fn mirror_run(void *opaque)
690 {
691 MirrorBlockJob *s = opaque;
692 MirrorExitData *data;
693 BlockDriverState *bs = s->source;
694 BlockDriverState *target_bs = blk_bs(s->target);
695 bool need_drain = true;
696 int64_t length;
697 BlockDriverInfo bdi;
698 char backing_filename[2]; /* we only need 2 characters because we are only
699 checking for a NULL string */
700 int ret = 0;
701 int target_cluster_size = BDRV_SECTOR_SIZE;
702
703 if (block_job_is_cancelled(&s->common)) {
704 goto immediate_exit;
705 }
706
707 s->bdev_length = bdrv_getlength(bs);
708 if (s->bdev_length < 0) {
709 ret = s->bdev_length;
710 goto immediate_exit;
711 }
712
713 /* Active commit must resize the base image if its size differs from the
714 * active layer. */
715 if (s->base == blk_bs(s->target)) {
716 int64_t base_length;
717
718 base_length = blk_getlength(s->target);
719 if (base_length < 0) {
720 ret = base_length;
721 goto immediate_exit;
722 }
723
724 if (s->bdev_length > base_length) {
725 ret = blk_truncate(s->target, s->bdev_length);
726 if (ret < 0) {
727 goto immediate_exit;
728 }
729 }
730 }
731
732 if (s->bdev_length == 0) {
733 /* Report BLOCK_JOB_READY and wait for complete. */
734 block_job_event_ready(&s->common);
735 s->synced = true;
736 while (!block_job_is_cancelled(&s->common) && !s->should_complete) {
737 block_job_yield(&s->common);
738 }
739 s->common.cancelled = false;
740 goto immediate_exit;
741 }
742
743 length = DIV_ROUND_UP(s->bdev_length, s->granularity);
744 s->in_flight_bitmap = bitmap_new(length);
745
746 /* If we have no backing file yet in the destination, we cannot let
747 * the destination do COW. Instead, we copy sectors around the
748 * dirty data if needed. We need a bitmap to do that.
749 */
750 bdrv_get_backing_filename(target_bs, backing_filename,
751 sizeof(backing_filename));
752 if (!bdrv_get_info(target_bs, &bdi) && bdi.cluster_size) {
753 target_cluster_size = bdi.cluster_size;
754 }
755 if (backing_filename[0] && !target_bs->backing
756 && s->granularity < target_cluster_size) {
757 s->buf_size = MAX(s->buf_size, target_cluster_size);
758 s->cow_bitmap = bitmap_new(length);
759 }
760 s->target_cluster_sectors = target_cluster_size >> BDRV_SECTOR_BITS;
761 s->max_iov = MIN(bs->bl.max_iov, target_bs->bl.max_iov);
762
763 s->buf = qemu_try_blockalign(bs, s->buf_size);
764 if (s->buf == NULL) {
765 ret = -ENOMEM;
766 goto immediate_exit;
767 }
768
769 mirror_free_init(s);
770
771 s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
772 if (!s->is_none_mode) {
773 ret = mirror_dirty_init(s);
774 if (ret < 0 || block_job_is_cancelled(&s->common)) {
775 goto immediate_exit;
776 }
777 }
778
779 assert(!s->dbi);
780 s->dbi = bdrv_dirty_iter_new(s->dirty_bitmap, 0);
781 for (;;) {
782 uint64_t delay_ns = 0;
783 int64_t cnt, delta;
784 bool should_complete;
785
786 if (s->ret < 0) {
787 ret = s->ret;
788 goto immediate_exit;
789 }
790
791 block_job_pause_point(&s->common);
792
793 cnt = bdrv_get_dirty_count(s->dirty_bitmap);
794 /* s->common.offset contains the number of bytes already processed so
795 * far, cnt is the number of dirty sectors remaining and
796 * s->sectors_in_flight is the number of sectors currently being
797 * processed; together those are the current total operation length */
798 s->common.len = s->common.offset +
799 (cnt + s->sectors_in_flight) * BDRV_SECTOR_SIZE;
800
801 /* Note that even when no rate limit is applied we need to yield
802 * periodically with no pending I/O so that bdrv_drain_all() returns.
803 * We do so every SLICE_TIME nanoseconds, or when there is an error,
804 * or when the source is clean, whichever comes first.
805 */
806 delta = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - s->last_pause_ns;
807 if (delta < SLICE_TIME &&
808 s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
809 if (s->in_flight >= MAX_IN_FLIGHT || s->buf_free_count == 0 ||
810 (cnt == 0 && s->in_flight > 0)) {
811 trace_mirror_yield(s, s->in_flight, s->buf_free_count, cnt);
812 mirror_wait_for_io(s);
813 continue;
814 } else if (cnt != 0) {
815 delay_ns = mirror_iteration(s);
816 }
817 }
818
819 should_complete = false;
820 if (s->in_flight == 0 && cnt == 0) {
821 trace_mirror_before_flush(s);
822 if (!s->synced) {
823 if (mirror_flush(s) < 0) {
824 /* Go check s->ret. */
825 continue;
826 }
827 /* We're out of the streaming phase. From now on, if the job
828 * is cancelled we will actually complete all pending I/O and
829 * report completion. This way, block-job-cancel will leave
830 * the target in a consistent state.
831 */
832 block_job_event_ready(&s->common);
833 s->synced = true;
834 }
835
836 should_complete = s->should_complete ||
837 block_job_is_cancelled(&s->common);
838 cnt = bdrv_get_dirty_count(s->dirty_bitmap);
839 }
840
841 if (cnt == 0 && should_complete) {
842 /* The dirty bitmap is not updated while operations are pending.
843 * If we're about to exit, wait for pending operations before
844 * calling bdrv_get_dirty_count(bs), or we may exit while the
845 * source has dirty data to copy!
846 *
847 * Note that I/O can be submitted by the guest while
848 * mirror_populate runs, so pause it now. Before deciding
849 * whether to switch to target check one last time if I/O has
850 * come in the meanwhile, and if not flush the data to disk.
851 */
852 trace_mirror_before_drain(s, cnt);
853
854 bdrv_drained_begin(bs);
855 cnt = bdrv_get_dirty_count(s->dirty_bitmap);
856 if (cnt > 0 || mirror_flush(s) < 0) {
857 bdrv_drained_end(bs);
858 continue;
859 }
860
861 /* The two disks are in sync. Exit and report successful
862 * completion.
863 */
864 assert(QLIST_EMPTY(&bs->tracked_requests));
865 s->common.cancelled = false;
866 need_drain = false;
867 break;
868 }
869
870 ret = 0;
871 trace_mirror_before_sleep(s, cnt, s->synced, delay_ns);
872 if (!s->synced) {
873 block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns);
874 if (block_job_is_cancelled(&s->common)) {
875 break;
876 }
877 } else if (!should_complete) {
878 delay_ns = (s->in_flight == 0 && cnt == 0 ? SLICE_TIME : 0);
879 block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns);
880 }
881 s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
882 }
883
884 immediate_exit:
885 if (s->in_flight > 0) {
886 /* We get here only if something went wrong. Either the job failed,
887 * or it was cancelled prematurely so that we do not guarantee that
888 * the target is a copy of the source.
889 */
890 assert(ret < 0 || (!s->synced && block_job_is_cancelled(&s->common)));
891 assert(need_drain);
892 mirror_wait_for_all_io(s);
893 }
894
895 assert(s->in_flight == 0);
896 qemu_vfree(s->buf);
897 g_free(s->cow_bitmap);
898 g_free(s->in_flight_bitmap);
899 bdrv_dirty_iter_free(s->dbi);
900 bdrv_release_dirty_bitmap(bs, s->dirty_bitmap);
901
902 data = g_malloc(sizeof(*data));
903 data->ret = ret;
904
905 if (need_drain) {
906 bdrv_drained_begin(bs);
907 }
908 block_job_defer_to_main_loop(&s->common, mirror_exit, data);
909 }
910
911 static void mirror_set_speed(BlockJob *job, int64_t speed, Error **errp)
912 {
913 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
914
915 if (speed < 0) {
916 error_setg(errp, QERR_INVALID_PARAMETER, "speed");
917 return;
918 }
919 ratelimit_set_speed(&s->limit, speed / BDRV_SECTOR_SIZE, SLICE_TIME);
920 }
921
922 static void mirror_complete(BlockJob *job, Error **errp)
923 {
924 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
925 BlockDriverState *target;
926
927 target = blk_bs(s->target);
928
929 if (!s->synced) {
930 error_setg(errp, "The active block job '%s' cannot be completed",
931 job->id);
932 return;
933 }
934
935 if (s->backing_mode == MIRROR_OPEN_BACKING_CHAIN) {
936 int ret;
937
938 assert(!target->backing);
939 ret = bdrv_open_backing_file(target, NULL, "backing", errp);
940 if (ret < 0) {
941 return;
942 }
943 }
944
945 /* block all operations on to_replace bs */
946 if (s->replaces) {
947 AioContext *replace_aio_context;
948
949 s->to_replace = bdrv_find_node(s->replaces);
950 if (!s->to_replace) {
951 error_setg(errp, "Node name '%s' not found", s->replaces);
952 return;
953 }
954
955 replace_aio_context = bdrv_get_aio_context(s->to_replace);
956 aio_context_acquire(replace_aio_context);
957
958 /* TODO Translate this into permission system. Current definition of
959 * GRAPH_MOD would require to request it for the parents; they might
960 * not even be BlockDriverStates, however, so a BdrvChild can't address
961 * them. May need redefinition of GRAPH_MOD. */
962 error_setg(&s->replace_blocker,
963 "block device is in use by block-job-complete");
964 bdrv_op_block_all(s->to_replace, s->replace_blocker);
965 bdrv_ref(s->to_replace);
966
967 aio_context_release(replace_aio_context);
968 }
969
970 s->should_complete = true;
971 block_job_enter(&s->common);
972 }
973
974 static void mirror_pause(BlockJob *job)
975 {
976 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
977
978 mirror_wait_for_all_io(s);
979 }
980
981 static void mirror_attached_aio_context(BlockJob *job, AioContext *new_context)
982 {
983 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
984
985 blk_set_aio_context(s->target, new_context);
986 }
987
988 static void mirror_drain(BlockJob *job)
989 {
990 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
991
992 /* Need to keep a reference in case blk_drain triggers execution
993 * of mirror_complete...
994 */
995 if (s->target) {
996 BlockBackend *target = s->target;
997 blk_ref(target);
998 blk_drain(target);
999 blk_unref(target);
1000 }
1001 }
1002
1003 static const BlockJobDriver mirror_job_driver = {
1004 .instance_size = sizeof(MirrorBlockJob),
1005 .job_type = BLOCK_JOB_TYPE_MIRROR,
1006 .set_speed = mirror_set_speed,
1007 .start = mirror_run,
1008 .complete = mirror_complete,
1009 .pause = mirror_pause,
1010 .attached_aio_context = mirror_attached_aio_context,
1011 .drain = mirror_drain,
1012 };
1013
1014 static const BlockJobDriver commit_active_job_driver = {
1015 .instance_size = sizeof(MirrorBlockJob),
1016 .job_type = BLOCK_JOB_TYPE_COMMIT,
1017 .set_speed = mirror_set_speed,
1018 .start = mirror_run,
1019 .complete = mirror_complete,
1020 .pause = mirror_pause,
1021 .attached_aio_context = mirror_attached_aio_context,
1022 .drain = mirror_drain,
1023 };
1024
1025 static int coroutine_fn bdrv_mirror_top_preadv(BlockDriverState *bs,
1026 uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags)
1027 {
1028 return bdrv_co_preadv(bs->backing, offset, bytes, qiov, flags);
1029 }
1030
1031 static int coroutine_fn bdrv_mirror_top_pwritev(BlockDriverState *bs,
1032 uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags)
1033 {
1034 return bdrv_co_pwritev(bs->backing, offset, bytes, qiov, flags);
1035 }
1036
1037 static int coroutine_fn bdrv_mirror_top_flush(BlockDriverState *bs)
1038 {
1039 return bdrv_co_flush(bs->backing->bs);
1040 }
1041
1042 static int64_t coroutine_fn bdrv_mirror_top_get_block_status(
1043 BlockDriverState *bs, int64_t sector_num, int nb_sectors, int *pnum,
1044 BlockDriverState **file)
1045 {
1046 *pnum = nb_sectors;
1047 *file = bs->backing->bs;
1048 return BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID | BDRV_BLOCK_DATA |
1049 (sector_num << BDRV_SECTOR_BITS);
1050 }
1051
1052 static int coroutine_fn bdrv_mirror_top_pwrite_zeroes(BlockDriverState *bs,
1053 int64_t offset, int count, BdrvRequestFlags flags)
1054 {
1055 return bdrv_co_pwrite_zeroes(bs->backing, offset, count, flags);
1056 }
1057
1058 static int coroutine_fn bdrv_mirror_top_pdiscard(BlockDriverState *bs,
1059 int64_t offset, int count)
1060 {
1061 return bdrv_co_pdiscard(bs->backing->bs, offset, count);
1062 }
1063
1064 static void bdrv_mirror_top_refresh_filename(BlockDriverState *bs, QDict *opts)
1065 {
1066 bdrv_refresh_filename(bs->backing->bs);
1067 pstrcpy(bs->exact_filename, sizeof(bs->exact_filename),
1068 bs->backing->bs->filename);
1069 }
1070
1071 static void bdrv_mirror_top_close(BlockDriverState *bs)
1072 {
1073 }
1074
1075 static void bdrv_mirror_top_child_perm(BlockDriverState *bs, BdrvChild *c,
1076 const BdrvChildRole *role,
1077 uint64_t perm, uint64_t shared,
1078 uint64_t *nperm, uint64_t *nshared)
1079 {
1080 /* Must be able to forward guest writes to the real image */
1081 *nperm = 0;
1082 if (perm & BLK_PERM_WRITE) {
1083 *nperm |= BLK_PERM_WRITE;
1084 }
1085
1086 *nshared = BLK_PERM_ALL;
1087 }
1088
1089 /* Dummy node that provides consistent read to its users without requiring it
1090 * from its backing file and that allows writes on the backing file chain. */
1091 static BlockDriver bdrv_mirror_top = {
1092 .format_name = "mirror_top",
1093 .bdrv_co_preadv = bdrv_mirror_top_preadv,
1094 .bdrv_co_pwritev = bdrv_mirror_top_pwritev,
1095 .bdrv_co_pwrite_zeroes = bdrv_mirror_top_pwrite_zeroes,
1096 .bdrv_co_pdiscard = bdrv_mirror_top_pdiscard,
1097 .bdrv_co_flush = bdrv_mirror_top_flush,
1098 .bdrv_co_get_block_status = bdrv_mirror_top_get_block_status,
1099 .bdrv_refresh_filename = bdrv_mirror_top_refresh_filename,
1100 .bdrv_close = bdrv_mirror_top_close,
1101 .bdrv_child_perm = bdrv_mirror_top_child_perm,
1102 };
1103
1104 static void mirror_start_job(const char *job_id, BlockDriverState *bs,
1105 int creation_flags, BlockDriverState *target,
1106 const char *replaces, int64_t speed,
1107 uint32_t granularity, int64_t buf_size,
1108 BlockMirrorBackingMode backing_mode,
1109 BlockdevOnError on_source_error,
1110 BlockdevOnError on_target_error,
1111 bool unmap,
1112 BlockCompletionFunc *cb,
1113 void *opaque, Error **errp,
1114 const BlockJobDriver *driver,
1115 bool is_none_mode, BlockDriverState *base,
1116 bool auto_complete, const char *filter_node_name)
1117 {
1118 MirrorBlockJob *s;
1119 BlockDriverState *mirror_top_bs;
1120 bool target_graph_mod;
1121 bool target_is_backing;
1122 Error *local_err = NULL;
1123 int ret;
1124
1125 if (granularity == 0) {
1126 granularity = bdrv_get_default_bitmap_granularity(target);
1127 }
1128
1129 assert ((granularity & (granularity - 1)) == 0);
1130
1131 if (buf_size < 0) {
1132 error_setg(errp, "Invalid parameter 'buf-size'");
1133 return;
1134 }
1135
1136 if (buf_size == 0) {
1137 buf_size = DEFAULT_MIRROR_BUF_SIZE;
1138 }
1139
1140 /* In the case of active commit, add dummy driver to provide consistent
1141 * reads on the top, while disabling it in the intermediate nodes, and make
1142 * the backing chain writable. */
1143 mirror_top_bs = bdrv_new_open_driver(&bdrv_mirror_top, filter_node_name,
1144 BDRV_O_RDWR, errp);
1145 if (mirror_top_bs == NULL) {
1146 return;
1147 }
1148 mirror_top_bs->total_sectors = bs->total_sectors;
1149
1150 /* bdrv_append takes ownership of the mirror_top_bs reference, need to keep
1151 * it alive until block_job_create() even if bs has no parent. */
1152 bdrv_ref(mirror_top_bs);
1153 bdrv_drained_begin(bs);
1154 bdrv_append(mirror_top_bs, bs, &local_err);
1155 bdrv_drained_end(bs);
1156
1157 if (local_err) {
1158 bdrv_unref(mirror_top_bs);
1159 error_propagate(errp, local_err);
1160 return;
1161 }
1162
1163 /* Make sure that the source is not resized while the job is running */
1164 s = block_job_create(job_id, driver, mirror_top_bs,
1165 BLK_PERM_CONSISTENT_READ,
1166 BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED |
1167 BLK_PERM_WRITE | BLK_PERM_GRAPH_MOD, speed,
1168 creation_flags, cb, opaque, errp);
1169 bdrv_unref(mirror_top_bs);
1170 if (!s) {
1171 goto fail;
1172 }
1173 s->source = bs;
1174 s->mirror_top_bs = mirror_top_bs;
1175
1176 /* No resize for the target either; while the mirror is still running, a
1177 * consistent read isn't necessarily possible. We could possibly allow
1178 * writes and graph modifications, though it would likely defeat the
1179 * purpose of a mirror, so leave them blocked for now.
1180 *
1181 * In the case of active commit, things look a bit different, though,
1182 * because the target is an already populated backing file in active use.
1183 * We can allow anything except resize there.*/
1184 target_is_backing = bdrv_chain_contains(bs, target);
1185 target_graph_mod = (backing_mode != MIRROR_LEAVE_BACKING_CHAIN);
1186 s->target = blk_new(BLK_PERM_WRITE | BLK_PERM_RESIZE |
1187 (target_graph_mod ? BLK_PERM_GRAPH_MOD : 0),
1188 BLK_PERM_WRITE_UNCHANGED |
1189 (target_is_backing ? BLK_PERM_CONSISTENT_READ |
1190 BLK_PERM_WRITE |
1191 BLK_PERM_GRAPH_MOD : 0));
1192 ret = blk_insert_bs(s->target, target, errp);
1193 if (ret < 0) {
1194 goto fail;
1195 }
1196
1197 s->replaces = g_strdup(replaces);
1198 s->on_source_error = on_source_error;
1199 s->on_target_error = on_target_error;
1200 s->is_none_mode = is_none_mode;
1201 s->backing_mode = backing_mode;
1202 s->base = base;
1203 s->granularity = granularity;
1204 s->buf_size = ROUND_UP(buf_size, granularity);
1205 s->unmap = unmap;
1206 if (auto_complete) {
1207 s->should_complete = true;
1208 }
1209
1210 s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp);
1211 if (!s->dirty_bitmap) {
1212 goto fail;
1213 }
1214
1215 /* Required permissions are already taken with blk_new() */
1216 block_job_add_bdrv(&s->common, "target", target, 0, BLK_PERM_ALL,
1217 &error_abort);
1218
1219 /* In commit_active_start() all intermediate nodes disappear, so
1220 * any jobs in them must be blocked */
1221 if (target_is_backing) {
1222 BlockDriverState *iter;
1223 for (iter = backing_bs(bs); iter != target; iter = backing_bs(iter)) {
1224 /* XXX BLK_PERM_WRITE needs to be allowed so we don't block
1225 * ourselves at s->base (if writes are blocked for a node, they are
1226 * also blocked for its backing file). The other options would be a
1227 * second filter driver above s->base (== target). */
1228 ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0,
1229 BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE,
1230 errp);
1231 if (ret < 0) {
1232 goto fail;
1233 }
1234 }
1235 }
1236
1237 trace_mirror_start(bs, s, opaque);
1238 block_job_start(&s->common);
1239 return;
1240
1241 fail:
1242 if (s) {
1243 g_free(s->replaces);
1244 blk_unref(s->target);
1245 block_job_unref(&s->common);
1246 }
1247
1248 bdrv_child_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL);
1249 bdrv_replace_node(mirror_top_bs, backing_bs(mirror_top_bs), &error_abort);
1250 }
1251
1252 void mirror_start(const char *job_id, BlockDriverState *bs,
1253 BlockDriverState *target, const char *replaces,
1254 int64_t speed, uint32_t granularity, int64_t buf_size,
1255 MirrorSyncMode mode, BlockMirrorBackingMode backing_mode,
1256 BlockdevOnError on_source_error,
1257 BlockdevOnError on_target_error,
1258 bool unmap, const char *filter_node_name, Error **errp)
1259 {
1260 bool is_none_mode;
1261 BlockDriverState *base;
1262
1263 if (mode == MIRROR_SYNC_MODE_INCREMENTAL) {
1264 error_setg(errp, "Sync mode 'incremental' not supported");
1265 return;
1266 }
1267 is_none_mode = mode == MIRROR_SYNC_MODE_NONE;
1268 base = mode == MIRROR_SYNC_MODE_TOP ? backing_bs(bs) : NULL;
1269 mirror_start_job(job_id, bs, BLOCK_JOB_DEFAULT, target, replaces,
1270 speed, granularity, buf_size, backing_mode,
1271 on_source_error, on_target_error, unmap, NULL, NULL, errp,
1272 &mirror_job_driver, is_none_mode, base, false,
1273 filter_node_name);
1274 }
1275
1276 void commit_active_start(const char *job_id, BlockDriverState *bs,
1277 BlockDriverState *base, int creation_flags,
1278 int64_t speed, BlockdevOnError on_error,
1279 const char *filter_node_name,
1280 BlockCompletionFunc *cb, void *opaque, Error **errp,
1281 bool auto_complete)
1282 {
1283 int orig_base_flags;
1284 Error *local_err = NULL;
1285
1286 orig_base_flags = bdrv_get_flags(base);
1287
1288 if (bdrv_reopen(base, bs->open_flags, errp)) {
1289 return;
1290 }
1291
1292 mirror_start_job(job_id, bs, creation_flags, base, NULL, speed, 0, 0,
1293 MIRROR_LEAVE_BACKING_CHAIN,
1294 on_error, on_error, true, cb, opaque, &local_err,
1295 &commit_active_job_driver, false, base, auto_complete,
1296 filter_node_name);
1297 if (local_err) {
1298 error_propagate(errp, local_err);
1299 goto error_restore_flags;
1300 }
1301
1302 return;
1303
1304 error_restore_flags:
1305 /* ignore error and errp for bdrv_reopen, because we want to propagate
1306 * the original error */
1307 bdrv_reopen(base, orig_base_flags, NULL);
1308 return;
1309 }