block: Remove second bdrv_open() recursion
[qemu.git] / block.c
1 /*
2 * QEMU System Emulator block driver
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24 #include "config-host.h"
25 #include "qemu-common.h"
26 #include "trace.h"
27 #include "block/block_int.h"
28 #include "block/blockjob.h"
29 #include "qemu/module.h"
30 #include "qapi/qmp/qjson.h"
31 #include "sysemu/sysemu.h"
32 #include "qemu/notify.h"
33 #include "block/coroutine.h"
34 #include "block/qapi.h"
35 #include "qmp-commands.h"
36 #include "qemu/timer.h"
37 #include "qapi-event.h"
38
39 #ifdef CONFIG_BSD
40 #include <sys/types.h>
41 #include <sys/stat.h>
42 #include <sys/ioctl.h>
43 #include <sys/queue.h>
44 #ifndef __DragonFly__
45 #include <sys/disk.h>
46 #endif
47 #endif
48
49 #ifdef _WIN32
50 #include <windows.h>
51 #endif
52
53 struct BdrvDirtyBitmap {
54 HBitmap *bitmap;
55 QLIST_ENTRY(BdrvDirtyBitmap) list;
56 };
57
58 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
59
60 static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load);
61 static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
62 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
63 BlockDriverCompletionFunc *cb, void *opaque);
64 static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
65 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
66 BlockDriverCompletionFunc *cb, void *opaque);
67 static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
68 int64_t sector_num, int nb_sectors,
69 QEMUIOVector *iov);
70 static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
71 int64_t sector_num, int nb_sectors,
72 QEMUIOVector *iov);
73 static int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs,
74 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
75 BdrvRequestFlags flags);
76 static int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs,
77 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
78 BdrvRequestFlags flags);
79 static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
80 int64_t sector_num,
81 QEMUIOVector *qiov,
82 int nb_sectors,
83 BdrvRequestFlags flags,
84 BlockDriverCompletionFunc *cb,
85 void *opaque,
86 bool is_write);
87 static void coroutine_fn bdrv_co_do_rw(void *opaque);
88 static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
89 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags);
90
91 static QTAILQ_HEAD(, BlockDriverState) bdrv_states =
92 QTAILQ_HEAD_INITIALIZER(bdrv_states);
93
94 static QTAILQ_HEAD(, BlockDriverState) graph_bdrv_states =
95 QTAILQ_HEAD_INITIALIZER(graph_bdrv_states);
96
97 static QLIST_HEAD(, BlockDriver) bdrv_drivers =
98 QLIST_HEAD_INITIALIZER(bdrv_drivers);
99
100 /* If non-zero, use only whitelisted block drivers */
101 static int use_bdrv_whitelist;
102
103 #ifdef _WIN32
104 static int is_windows_drive_prefix(const char *filename)
105 {
106 return (((filename[0] >= 'a' && filename[0] <= 'z') ||
107 (filename[0] >= 'A' && filename[0] <= 'Z')) &&
108 filename[1] == ':');
109 }
110
111 int is_windows_drive(const char *filename)
112 {
113 if (is_windows_drive_prefix(filename) &&
114 filename[2] == '\0')
115 return 1;
116 if (strstart(filename, "\\\\.\\", NULL) ||
117 strstart(filename, "//./", NULL))
118 return 1;
119 return 0;
120 }
121 #endif
122
123 /* throttling disk I/O limits */
124 void bdrv_set_io_limits(BlockDriverState *bs,
125 ThrottleConfig *cfg)
126 {
127 int i;
128
129 throttle_config(&bs->throttle_state, cfg);
130
131 for (i = 0; i < 2; i++) {
132 qemu_co_enter_next(&bs->throttled_reqs[i]);
133 }
134 }
135
136 /* this function drain all the throttled IOs */
137 static bool bdrv_start_throttled_reqs(BlockDriverState *bs)
138 {
139 bool drained = false;
140 bool enabled = bs->io_limits_enabled;
141 int i;
142
143 bs->io_limits_enabled = false;
144
145 for (i = 0; i < 2; i++) {
146 while (qemu_co_enter_next(&bs->throttled_reqs[i])) {
147 drained = true;
148 }
149 }
150
151 bs->io_limits_enabled = enabled;
152
153 return drained;
154 }
155
156 void bdrv_io_limits_disable(BlockDriverState *bs)
157 {
158 bs->io_limits_enabled = false;
159
160 bdrv_start_throttled_reqs(bs);
161
162 throttle_destroy(&bs->throttle_state);
163 }
164
165 static void bdrv_throttle_read_timer_cb(void *opaque)
166 {
167 BlockDriverState *bs = opaque;
168 qemu_co_enter_next(&bs->throttled_reqs[0]);
169 }
170
171 static void bdrv_throttle_write_timer_cb(void *opaque)
172 {
173 BlockDriverState *bs = opaque;
174 qemu_co_enter_next(&bs->throttled_reqs[1]);
175 }
176
177 /* should be called before bdrv_set_io_limits if a limit is set */
178 void bdrv_io_limits_enable(BlockDriverState *bs)
179 {
180 assert(!bs->io_limits_enabled);
181 throttle_init(&bs->throttle_state,
182 bdrv_get_aio_context(bs),
183 QEMU_CLOCK_VIRTUAL,
184 bdrv_throttle_read_timer_cb,
185 bdrv_throttle_write_timer_cb,
186 bs);
187 bs->io_limits_enabled = true;
188 }
189
190 /* This function makes an IO wait if needed
191 *
192 * @nb_sectors: the number of sectors of the IO
193 * @is_write: is the IO a write
194 */
195 static void bdrv_io_limits_intercept(BlockDriverState *bs,
196 unsigned int bytes,
197 bool is_write)
198 {
199 /* does this io must wait */
200 bool must_wait = throttle_schedule_timer(&bs->throttle_state, is_write);
201
202 /* if must wait or any request of this type throttled queue the IO */
203 if (must_wait ||
204 !qemu_co_queue_empty(&bs->throttled_reqs[is_write])) {
205 qemu_co_queue_wait(&bs->throttled_reqs[is_write]);
206 }
207
208 /* the IO will be executed, do the accounting */
209 throttle_account(&bs->throttle_state, is_write, bytes);
210
211
212 /* if the next request must wait -> do nothing */
213 if (throttle_schedule_timer(&bs->throttle_state, is_write)) {
214 return;
215 }
216
217 /* else queue next request for execution */
218 qemu_co_queue_next(&bs->throttled_reqs[is_write]);
219 }
220
221 size_t bdrv_opt_mem_align(BlockDriverState *bs)
222 {
223 if (!bs || !bs->drv) {
224 /* 4k should be on the safe side */
225 return 4096;
226 }
227
228 return bs->bl.opt_mem_alignment;
229 }
230
231 /* check if the path starts with "<protocol>:" */
232 static int path_has_protocol(const char *path)
233 {
234 const char *p;
235
236 #ifdef _WIN32
237 if (is_windows_drive(path) ||
238 is_windows_drive_prefix(path)) {
239 return 0;
240 }
241 p = path + strcspn(path, ":/\\");
242 #else
243 p = path + strcspn(path, ":/");
244 #endif
245
246 return *p == ':';
247 }
248
249 int path_is_absolute(const char *path)
250 {
251 #ifdef _WIN32
252 /* specific case for names like: "\\.\d:" */
253 if (is_windows_drive(path) || is_windows_drive_prefix(path)) {
254 return 1;
255 }
256 return (*path == '/' || *path == '\\');
257 #else
258 return (*path == '/');
259 #endif
260 }
261
262 /* if filename is absolute, just copy it to dest. Otherwise, build a
263 path to it by considering it is relative to base_path. URL are
264 supported. */
265 void path_combine(char *dest, int dest_size,
266 const char *base_path,
267 const char *filename)
268 {
269 const char *p, *p1;
270 int len;
271
272 if (dest_size <= 0)
273 return;
274 if (path_is_absolute(filename)) {
275 pstrcpy(dest, dest_size, filename);
276 } else {
277 p = strchr(base_path, ':');
278 if (p)
279 p++;
280 else
281 p = base_path;
282 p1 = strrchr(base_path, '/');
283 #ifdef _WIN32
284 {
285 const char *p2;
286 p2 = strrchr(base_path, '\\');
287 if (!p1 || p2 > p1)
288 p1 = p2;
289 }
290 #endif
291 if (p1)
292 p1++;
293 else
294 p1 = base_path;
295 if (p1 > p)
296 p = p1;
297 len = p - base_path;
298 if (len > dest_size - 1)
299 len = dest_size - 1;
300 memcpy(dest, base_path, len);
301 dest[len] = '\0';
302 pstrcat(dest, dest_size, filename);
303 }
304 }
305
306 void bdrv_get_full_backing_filename(BlockDriverState *bs, char *dest, size_t sz)
307 {
308 if (bs->backing_file[0] == '\0' || path_has_protocol(bs->backing_file)) {
309 pstrcpy(dest, sz, bs->backing_file);
310 } else {
311 path_combine(dest, sz, bs->filename, bs->backing_file);
312 }
313 }
314
315 void bdrv_register(BlockDriver *bdrv)
316 {
317 /* Block drivers without coroutine functions need emulation */
318 if (!bdrv->bdrv_co_readv) {
319 bdrv->bdrv_co_readv = bdrv_co_readv_em;
320 bdrv->bdrv_co_writev = bdrv_co_writev_em;
321
322 /* bdrv_co_readv_em()/brdv_co_writev_em() work in terms of aio, so if
323 * the block driver lacks aio we need to emulate that too.
324 */
325 if (!bdrv->bdrv_aio_readv) {
326 /* add AIO emulation layer */
327 bdrv->bdrv_aio_readv = bdrv_aio_readv_em;
328 bdrv->bdrv_aio_writev = bdrv_aio_writev_em;
329 }
330 }
331
332 QLIST_INSERT_HEAD(&bdrv_drivers, bdrv, list);
333 }
334
335 /* create a new block device (by default it is empty) */
336 BlockDriverState *bdrv_new(const char *device_name, Error **errp)
337 {
338 BlockDriverState *bs;
339 int i;
340
341 if (bdrv_find(device_name)) {
342 error_setg(errp, "Device with id '%s' already exists",
343 device_name);
344 return NULL;
345 }
346 if (bdrv_find_node(device_name)) {
347 error_setg(errp, "Device with node-name '%s' already exists",
348 device_name);
349 return NULL;
350 }
351
352 bs = g_malloc0(sizeof(BlockDriverState));
353 QLIST_INIT(&bs->dirty_bitmaps);
354 pstrcpy(bs->device_name, sizeof(bs->device_name), device_name);
355 if (device_name[0] != '\0') {
356 QTAILQ_INSERT_TAIL(&bdrv_states, bs, device_list);
357 }
358 for (i = 0; i < BLOCK_OP_TYPE_MAX; i++) {
359 QLIST_INIT(&bs->op_blockers[i]);
360 }
361 bdrv_iostatus_disable(bs);
362 notifier_list_init(&bs->close_notifiers);
363 notifier_with_return_list_init(&bs->before_write_notifiers);
364 qemu_co_queue_init(&bs->throttled_reqs[0]);
365 qemu_co_queue_init(&bs->throttled_reqs[1]);
366 bs->refcnt = 1;
367 bs->aio_context = qemu_get_aio_context();
368
369 return bs;
370 }
371
372 void bdrv_add_close_notifier(BlockDriverState *bs, Notifier *notify)
373 {
374 notifier_list_add(&bs->close_notifiers, notify);
375 }
376
377 BlockDriver *bdrv_find_format(const char *format_name)
378 {
379 BlockDriver *drv1;
380 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
381 if (!strcmp(drv1->format_name, format_name)) {
382 return drv1;
383 }
384 }
385 return NULL;
386 }
387
388 static int bdrv_is_whitelisted(BlockDriver *drv, bool read_only)
389 {
390 static const char *whitelist_rw[] = {
391 CONFIG_BDRV_RW_WHITELIST
392 };
393 static const char *whitelist_ro[] = {
394 CONFIG_BDRV_RO_WHITELIST
395 };
396 const char **p;
397
398 if (!whitelist_rw[0] && !whitelist_ro[0]) {
399 return 1; /* no whitelist, anything goes */
400 }
401
402 for (p = whitelist_rw; *p; p++) {
403 if (!strcmp(drv->format_name, *p)) {
404 return 1;
405 }
406 }
407 if (read_only) {
408 for (p = whitelist_ro; *p; p++) {
409 if (!strcmp(drv->format_name, *p)) {
410 return 1;
411 }
412 }
413 }
414 return 0;
415 }
416
417 BlockDriver *bdrv_find_whitelisted_format(const char *format_name,
418 bool read_only)
419 {
420 BlockDriver *drv = bdrv_find_format(format_name);
421 return drv && bdrv_is_whitelisted(drv, read_only) ? drv : NULL;
422 }
423
424 typedef struct CreateCo {
425 BlockDriver *drv;
426 char *filename;
427 QemuOpts *opts;
428 int ret;
429 Error *err;
430 } CreateCo;
431
432 static void coroutine_fn bdrv_create_co_entry(void *opaque)
433 {
434 Error *local_err = NULL;
435 int ret;
436
437 CreateCo *cco = opaque;
438 assert(cco->drv);
439
440 ret = cco->drv->bdrv_create(cco->filename, cco->opts, &local_err);
441 if (local_err) {
442 error_propagate(&cco->err, local_err);
443 }
444 cco->ret = ret;
445 }
446
447 int bdrv_create(BlockDriver *drv, const char* filename,
448 QemuOpts *opts, Error **errp)
449 {
450 int ret;
451
452 Coroutine *co;
453 CreateCo cco = {
454 .drv = drv,
455 .filename = g_strdup(filename),
456 .opts = opts,
457 .ret = NOT_DONE,
458 .err = NULL,
459 };
460
461 if (!drv->bdrv_create) {
462 error_setg(errp, "Driver '%s' does not support image creation", drv->format_name);
463 ret = -ENOTSUP;
464 goto out;
465 }
466
467 if (qemu_in_coroutine()) {
468 /* Fast-path if already in coroutine context */
469 bdrv_create_co_entry(&cco);
470 } else {
471 co = qemu_coroutine_create(bdrv_create_co_entry);
472 qemu_coroutine_enter(co, &cco);
473 while (cco.ret == NOT_DONE) {
474 qemu_aio_wait();
475 }
476 }
477
478 ret = cco.ret;
479 if (ret < 0) {
480 if (cco.err) {
481 error_propagate(errp, cco.err);
482 } else {
483 error_setg_errno(errp, -ret, "Could not create image");
484 }
485 }
486
487 out:
488 g_free(cco.filename);
489 return ret;
490 }
491
492 int bdrv_create_file(const char *filename, QemuOpts *opts, Error **errp)
493 {
494 BlockDriver *drv;
495 Error *local_err = NULL;
496 int ret;
497
498 drv = bdrv_find_protocol(filename, true);
499 if (drv == NULL) {
500 error_setg(errp, "Could not find protocol for file '%s'", filename);
501 return -ENOENT;
502 }
503
504 ret = bdrv_create(drv, filename, opts, &local_err);
505 if (local_err) {
506 error_propagate(errp, local_err);
507 }
508 return ret;
509 }
510
511 int bdrv_refresh_limits(BlockDriverState *bs)
512 {
513 BlockDriver *drv = bs->drv;
514
515 memset(&bs->bl, 0, sizeof(bs->bl));
516
517 if (!drv) {
518 return 0;
519 }
520
521 /* Take some limits from the children as a default */
522 if (bs->file) {
523 bdrv_refresh_limits(bs->file);
524 bs->bl.opt_transfer_length = bs->file->bl.opt_transfer_length;
525 bs->bl.opt_mem_alignment = bs->file->bl.opt_mem_alignment;
526 } else {
527 bs->bl.opt_mem_alignment = 512;
528 }
529
530 if (bs->backing_hd) {
531 bdrv_refresh_limits(bs->backing_hd);
532 bs->bl.opt_transfer_length =
533 MAX(bs->bl.opt_transfer_length,
534 bs->backing_hd->bl.opt_transfer_length);
535 bs->bl.opt_mem_alignment =
536 MAX(bs->bl.opt_mem_alignment,
537 bs->backing_hd->bl.opt_mem_alignment);
538 }
539
540 /* Then let the driver override it */
541 if (drv->bdrv_refresh_limits) {
542 return drv->bdrv_refresh_limits(bs);
543 }
544
545 return 0;
546 }
547
548 /*
549 * Create a uniquely-named empty temporary file.
550 * Return 0 upon success, otherwise a negative errno value.
551 */
552 int get_tmp_filename(char *filename, int size)
553 {
554 #ifdef _WIN32
555 char temp_dir[MAX_PATH];
556 /* GetTempFileName requires that its output buffer (4th param)
557 have length MAX_PATH or greater. */
558 assert(size >= MAX_PATH);
559 return (GetTempPath(MAX_PATH, temp_dir)
560 && GetTempFileName(temp_dir, "qem", 0, filename)
561 ? 0 : -GetLastError());
562 #else
563 int fd;
564 const char *tmpdir;
565 tmpdir = getenv("TMPDIR");
566 if (!tmpdir) {
567 tmpdir = "/var/tmp";
568 }
569 if (snprintf(filename, size, "%s/vl.XXXXXX", tmpdir) >= size) {
570 return -EOVERFLOW;
571 }
572 fd = mkstemp(filename);
573 if (fd < 0) {
574 return -errno;
575 }
576 if (close(fd) != 0) {
577 unlink(filename);
578 return -errno;
579 }
580 return 0;
581 #endif
582 }
583
584 /*
585 * Detect host devices. By convention, /dev/cdrom[N] is always
586 * recognized as a host CDROM.
587 */
588 static BlockDriver *find_hdev_driver(const char *filename)
589 {
590 int score_max = 0, score;
591 BlockDriver *drv = NULL, *d;
592
593 QLIST_FOREACH(d, &bdrv_drivers, list) {
594 if (d->bdrv_probe_device) {
595 score = d->bdrv_probe_device(filename);
596 if (score > score_max) {
597 score_max = score;
598 drv = d;
599 }
600 }
601 }
602
603 return drv;
604 }
605
606 BlockDriver *bdrv_find_protocol(const char *filename,
607 bool allow_protocol_prefix)
608 {
609 BlockDriver *drv1;
610 char protocol[128];
611 int len;
612 const char *p;
613
614 /* TODO Drivers without bdrv_file_open must be specified explicitly */
615
616 /*
617 * XXX(hch): we really should not let host device detection
618 * override an explicit protocol specification, but moving this
619 * later breaks access to device names with colons in them.
620 * Thanks to the brain-dead persistent naming schemes on udev-
621 * based Linux systems those actually are quite common.
622 */
623 drv1 = find_hdev_driver(filename);
624 if (drv1) {
625 return drv1;
626 }
627
628 if (!path_has_protocol(filename) || !allow_protocol_prefix) {
629 return bdrv_find_format("file");
630 }
631
632 p = strchr(filename, ':');
633 assert(p != NULL);
634 len = p - filename;
635 if (len > sizeof(protocol) - 1)
636 len = sizeof(protocol) - 1;
637 memcpy(protocol, filename, len);
638 protocol[len] = '\0';
639 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
640 if (drv1->protocol_name &&
641 !strcmp(drv1->protocol_name, protocol)) {
642 return drv1;
643 }
644 }
645 return NULL;
646 }
647
648 static int find_image_format(BlockDriverState *bs, const char *filename,
649 BlockDriver **pdrv, Error **errp)
650 {
651 int score, score_max;
652 BlockDriver *drv1, *drv;
653 uint8_t buf[2048];
654 int ret = 0;
655
656 /* Return the raw BlockDriver * to scsi-generic devices or empty drives */
657 if (bs->sg || !bdrv_is_inserted(bs) || bdrv_getlength(bs) == 0) {
658 drv = bdrv_find_format("raw");
659 if (!drv) {
660 error_setg(errp, "Could not find raw image format");
661 ret = -ENOENT;
662 }
663 *pdrv = drv;
664 return ret;
665 }
666
667 ret = bdrv_pread(bs, 0, buf, sizeof(buf));
668 if (ret < 0) {
669 error_setg_errno(errp, -ret, "Could not read image for determining its "
670 "format");
671 *pdrv = NULL;
672 return ret;
673 }
674
675 score_max = 0;
676 drv = NULL;
677 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
678 if (drv1->bdrv_probe) {
679 score = drv1->bdrv_probe(buf, ret, filename);
680 if (score > score_max) {
681 score_max = score;
682 drv = drv1;
683 }
684 }
685 }
686 if (!drv) {
687 error_setg(errp, "Could not determine image format: No compatible "
688 "driver found");
689 ret = -ENOENT;
690 }
691 *pdrv = drv;
692 return ret;
693 }
694
695 /**
696 * Set the current 'total_sectors' value
697 */
698 static int refresh_total_sectors(BlockDriverState *bs, int64_t hint)
699 {
700 BlockDriver *drv = bs->drv;
701
702 /* Do not attempt drv->bdrv_getlength() on scsi-generic devices */
703 if (bs->sg)
704 return 0;
705
706 /* query actual device if possible, otherwise just trust the hint */
707 if (drv->bdrv_getlength) {
708 int64_t length = drv->bdrv_getlength(bs);
709 if (length < 0) {
710 return length;
711 }
712 hint = DIV_ROUND_UP(length, BDRV_SECTOR_SIZE);
713 }
714
715 bs->total_sectors = hint;
716 return 0;
717 }
718
719 /**
720 * Set open flags for a given discard mode
721 *
722 * Return 0 on success, -1 if the discard mode was invalid.
723 */
724 int bdrv_parse_discard_flags(const char *mode, int *flags)
725 {
726 *flags &= ~BDRV_O_UNMAP;
727
728 if (!strcmp(mode, "off") || !strcmp(mode, "ignore")) {
729 /* do nothing */
730 } else if (!strcmp(mode, "on") || !strcmp(mode, "unmap")) {
731 *flags |= BDRV_O_UNMAP;
732 } else {
733 return -1;
734 }
735
736 return 0;
737 }
738
739 /**
740 * Set open flags for a given cache mode
741 *
742 * Return 0 on success, -1 if the cache mode was invalid.
743 */
744 int bdrv_parse_cache_flags(const char *mode, int *flags)
745 {
746 *flags &= ~BDRV_O_CACHE_MASK;
747
748 if (!strcmp(mode, "off") || !strcmp(mode, "none")) {
749 *flags |= BDRV_O_NOCACHE | BDRV_O_CACHE_WB;
750 } else if (!strcmp(mode, "directsync")) {
751 *flags |= BDRV_O_NOCACHE;
752 } else if (!strcmp(mode, "writeback")) {
753 *flags |= BDRV_O_CACHE_WB;
754 } else if (!strcmp(mode, "unsafe")) {
755 *flags |= BDRV_O_CACHE_WB;
756 *flags |= BDRV_O_NO_FLUSH;
757 } else if (!strcmp(mode, "writethrough")) {
758 /* this is the default */
759 } else {
760 return -1;
761 }
762
763 return 0;
764 }
765
766 /**
767 * The copy-on-read flag is actually a reference count so multiple users may
768 * use the feature without worrying about clobbering its previous state.
769 * Copy-on-read stays enabled until all users have called to disable it.
770 */
771 void bdrv_enable_copy_on_read(BlockDriverState *bs)
772 {
773 bs->copy_on_read++;
774 }
775
776 void bdrv_disable_copy_on_read(BlockDriverState *bs)
777 {
778 assert(bs->copy_on_read > 0);
779 bs->copy_on_read--;
780 }
781
782 /*
783 * Returns the flags that a temporary snapshot should get, based on the
784 * originally requested flags (the originally requested image will have flags
785 * like a backing file)
786 */
787 static int bdrv_temp_snapshot_flags(int flags)
788 {
789 return (flags & ~BDRV_O_SNAPSHOT) | BDRV_O_TEMPORARY;
790 }
791
792 /*
793 * Returns the flags that bs->file should get, based on the given flags for
794 * the parent BDS
795 */
796 static int bdrv_inherited_flags(int flags)
797 {
798 /* Enable protocol handling, disable format probing for bs->file */
799 flags |= BDRV_O_PROTOCOL;
800
801 /* Our block drivers take care to send flushes and respect unmap policy,
802 * so we can enable both unconditionally on lower layers. */
803 flags |= BDRV_O_CACHE_WB | BDRV_O_UNMAP;
804
805 /* Clear flags that only apply to the top layer */
806 flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING | BDRV_O_COPY_ON_READ);
807
808 return flags;
809 }
810
811 /*
812 * Returns the flags that bs->backing_hd should get, based on the given flags
813 * for the parent BDS
814 */
815 static int bdrv_backing_flags(int flags)
816 {
817 /* backing files always opened read-only */
818 flags &= ~(BDRV_O_RDWR | BDRV_O_COPY_ON_READ);
819
820 /* snapshot=on is handled on the top layer */
821 flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_TEMPORARY);
822
823 return flags;
824 }
825
826 static int bdrv_open_flags(BlockDriverState *bs, int flags)
827 {
828 int open_flags = flags | BDRV_O_CACHE_WB;
829
830 /*
831 * Clear flags that are internal to the block layer before opening the
832 * image.
833 */
834 open_flags &= ~(BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);
835
836 /*
837 * Snapshots should be writable.
838 */
839 if (flags & BDRV_O_TEMPORARY) {
840 open_flags |= BDRV_O_RDWR;
841 }
842
843 return open_flags;
844 }
845
846 static void bdrv_assign_node_name(BlockDriverState *bs,
847 const char *node_name,
848 Error **errp)
849 {
850 if (!node_name) {
851 return;
852 }
853
854 /* empty string node name is invalid */
855 if (node_name[0] == '\0') {
856 error_setg(errp, "Empty node name");
857 return;
858 }
859
860 /* takes care of avoiding namespaces collisions */
861 if (bdrv_find(node_name)) {
862 error_setg(errp, "node-name=%s is conflicting with a device id",
863 node_name);
864 return;
865 }
866
867 /* takes care of avoiding duplicates node names */
868 if (bdrv_find_node(node_name)) {
869 error_setg(errp, "Duplicate node name");
870 return;
871 }
872
873 /* copy node name into the bs and insert it into the graph list */
874 pstrcpy(bs->node_name, sizeof(bs->node_name), node_name);
875 QTAILQ_INSERT_TAIL(&graph_bdrv_states, bs, node_list);
876 }
877
878 /*
879 * Common part for opening disk images and files
880 *
881 * Removes all processed options from *options.
882 */
883 static int bdrv_open_common(BlockDriverState *bs, BlockDriverState *file,
884 QDict *options, int flags, BlockDriver *drv, Error **errp)
885 {
886 int ret, open_flags;
887 const char *filename;
888 const char *node_name = NULL;
889 Error *local_err = NULL;
890
891 assert(drv != NULL);
892 assert(bs->file == NULL);
893 assert(options != NULL && bs->options != options);
894
895 if (file != NULL) {
896 filename = file->filename;
897 } else {
898 filename = qdict_get_try_str(options, "filename");
899 }
900
901 if (drv->bdrv_needs_filename && !filename) {
902 error_setg(errp, "The '%s' block driver requires a file name",
903 drv->format_name);
904 return -EINVAL;
905 }
906
907 trace_bdrv_open_common(bs, filename ?: "", flags, drv->format_name);
908
909 node_name = qdict_get_try_str(options, "node-name");
910 bdrv_assign_node_name(bs, node_name, &local_err);
911 if (local_err) {
912 error_propagate(errp, local_err);
913 return -EINVAL;
914 }
915 qdict_del(options, "node-name");
916
917 /* bdrv_open() with directly using a protocol as drv. This layer is already
918 * opened, so assign it to bs (while file becomes a closed BlockDriverState)
919 * and return immediately. */
920 if (file != NULL && drv->bdrv_file_open) {
921 bdrv_swap(file, bs);
922 return 0;
923 }
924
925 bs->open_flags = flags;
926 bs->guest_block_size = 512;
927 bs->request_alignment = 512;
928 bs->zero_beyond_eof = true;
929 open_flags = bdrv_open_flags(bs, flags);
930 bs->read_only = !(open_flags & BDRV_O_RDWR);
931
932 if (use_bdrv_whitelist && !bdrv_is_whitelisted(drv, bs->read_only)) {
933 error_setg(errp,
934 !bs->read_only && bdrv_is_whitelisted(drv, true)
935 ? "Driver '%s' can only be used for read-only devices"
936 : "Driver '%s' is not whitelisted",
937 drv->format_name);
938 return -ENOTSUP;
939 }
940
941 assert(bs->copy_on_read == 0); /* bdrv_new() and bdrv_close() make it so */
942 if (flags & BDRV_O_COPY_ON_READ) {
943 if (!bs->read_only) {
944 bdrv_enable_copy_on_read(bs);
945 } else {
946 error_setg(errp, "Can't use copy-on-read on read-only device");
947 return -EINVAL;
948 }
949 }
950
951 if (filename != NULL) {
952 pstrcpy(bs->filename, sizeof(bs->filename), filename);
953 } else {
954 bs->filename[0] = '\0';
955 }
956
957 bs->drv = drv;
958 bs->opaque = g_malloc0(drv->instance_size);
959
960 bs->enable_write_cache = !!(flags & BDRV_O_CACHE_WB);
961
962 /* Open the image, either directly or using a protocol */
963 if (drv->bdrv_file_open) {
964 assert(file == NULL);
965 assert(!drv->bdrv_needs_filename || filename != NULL);
966 ret = drv->bdrv_file_open(bs, options, open_flags, &local_err);
967 } else {
968 if (file == NULL) {
969 error_setg(errp, "Can't use '%s' as a block driver for the "
970 "protocol level", drv->format_name);
971 ret = -EINVAL;
972 goto free_and_fail;
973 }
974 bs->file = file;
975 ret = drv->bdrv_open(bs, options, open_flags, &local_err);
976 }
977
978 if (ret < 0) {
979 if (local_err) {
980 error_propagate(errp, local_err);
981 } else if (bs->filename[0]) {
982 error_setg_errno(errp, -ret, "Could not open '%s'", bs->filename);
983 } else {
984 error_setg_errno(errp, -ret, "Could not open image");
985 }
986 goto free_and_fail;
987 }
988
989 ret = refresh_total_sectors(bs, bs->total_sectors);
990 if (ret < 0) {
991 error_setg_errno(errp, -ret, "Could not refresh total sector count");
992 goto free_and_fail;
993 }
994
995 bdrv_refresh_limits(bs);
996 assert(bdrv_opt_mem_align(bs) != 0);
997 assert((bs->request_alignment != 0) || bs->sg);
998 return 0;
999
1000 free_and_fail:
1001 bs->file = NULL;
1002 g_free(bs->opaque);
1003 bs->opaque = NULL;
1004 bs->drv = NULL;
1005 return ret;
1006 }
1007
1008 static QDict *parse_json_filename(const char *filename, Error **errp)
1009 {
1010 QObject *options_obj;
1011 QDict *options;
1012 int ret;
1013
1014 ret = strstart(filename, "json:", &filename);
1015 assert(ret);
1016
1017 options_obj = qobject_from_json(filename);
1018 if (!options_obj) {
1019 error_setg(errp, "Could not parse the JSON options");
1020 return NULL;
1021 }
1022
1023 if (qobject_type(options_obj) != QTYPE_QDICT) {
1024 qobject_decref(options_obj);
1025 error_setg(errp, "Invalid JSON object given");
1026 return NULL;
1027 }
1028
1029 options = qobject_to_qdict(options_obj);
1030 qdict_flatten(options);
1031
1032 return options;
1033 }
1034
1035 /*
1036 * Fills in default options for opening images and converts the legacy
1037 * filename/flags pair to option QDict entries.
1038 */
1039 static int bdrv_fill_options(QDict **options, const char **pfilename, int flags,
1040 BlockDriver *drv, Error **errp)
1041 {
1042 const char *filename = *pfilename;
1043 const char *drvname;
1044 bool protocol = flags & BDRV_O_PROTOCOL;
1045 bool parse_filename = false;
1046 Error *local_err = NULL;
1047
1048 /* Parse json: pseudo-protocol */
1049 if (filename && g_str_has_prefix(filename, "json:")) {
1050 QDict *json_options = parse_json_filename(filename, &local_err);
1051 if (local_err) {
1052 error_propagate(errp, local_err);
1053 return -EINVAL;
1054 }
1055
1056 /* Options given in the filename have lower priority than options
1057 * specified directly */
1058 qdict_join(*options, json_options, false);
1059 QDECREF(json_options);
1060 *pfilename = filename = NULL;
1061 }
1062
1063 /* Fetch the file name from the options QDict if necessary */
1064 if (protocol && filename) {
1065 if (!qdict_haskey(*options, "filename")) {
1066 qdict_put(*options, "filename", qstring_from_str(filename));
1067 parse_filename = true;
1068 } else {
1069 error_setg(errp, "Can't specify 'file' and 'filename' options at "
1070 "the same time");
1071 return -EINVAL;
1072 }
1073 }
1074
1075 /* Find the right block driver */
1076 filename = qdict_get_try_str(*options, "filename");
1077 drvname = qdict_get_try_str(*options, "driver");
1078
1079 if (drv) {
1080 if (drvname) {
1081 error_setg(errp, "Driver specified twice");
1082 return -EINVAL;
1083 }
1084 drvname = drv->format_name;
1085 qdict_put(*options, "driver", qstring_from_str(drvname));
1086 } else {
1087 if (!drvname && protocol) {
1088 if (filename) {
1089 drv = bdrv_find_protocol(filename, parse_filename);
1090 if (!drv) {
1091 error_setg(errp, "Unknown protocol");
1092 return -EINVAL;
1093 }
1094
1095 drvname = drv->format_name;
1096 qdict_put(*options, "driver", qstring_from_str(drvname));
1097 } else {
1098 error_setg(errp, "Must specify either driver or file");
1099 return -EINVAL;
1100 }
1101 } else if (drvname) {
1102 drv = bdrv_find_format(drvname);
1103 if (!drv) {
1104 error_setg(errp, "Unknown driver '%s'", drvname);
1105 return -ENOENT;
1106 }
1107 }
1108 }
1109
1110 assert(drv || !protocol);
1111
1112 /* Driver-specific filename parsing */
1113 if (drv && drv->bdrv_parse_filename && parse_filename) {
1114 drv->bdrv_parse_filename(filename, *options, &local_err);
1115 if (local_err) {
1116 error_propagate(errp, local_err);
1117 return -EINVAL;
1118 }
1119
1120 if (!drv->bdrv_needs_filename) {
1121 qdict_del(*options, "filename");
1122 }
1123 }
1124
1125 return 0;
1126 }
1127
1128 void bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd)
1129 {
1130
1131 if (bs->backing_hd) {
1132 assert(bs->backing_blocker);
1133 bdrv_op_unblock_all(bs->backing_hd, bs->backing_blocker);
1134 } else if (backing_hd) {
1135 error_setg(&bs->backing_blocker,
1136 "device is used as backing hd of '%s'",
1137 bs->device_name);
1138 }
1139
1140 bs->backing_hd = backing_hd;
1141 if (!backing_hd) {
1142 error_free(bs->backing_blocker);
1143 bs->backing_blocker = NULL;
1144 goto out;
1145 }
1146 bs->open_flags &= ~BDRV_O_NO_BACKING;
1147 pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_hd->filename);
1148 pstrcpy(bs->backing_format, sizeof(bs->backing_format),
1149 backing_hd->drv ? backing_hd->drv->format_name : "");
1150
1151 bdrv_op_block_all(bs->backing_hd, bs->backing_blocker);
1152 /* Otherwise we won't be able to commit due to check in bdrv_commit */
1153 bdrv_op_unblock(bs->backing_hd, BLOCK_OP_TYPE_COMMIT,
1154 bs->backing_blocker);
1155 out:
1156 bdrv_refresh_limits(bs);
1157 }
1158
1159 /*
1160 * Opens the backing file for a BlockDriverState if not yet open
1161 *
1162 * options is a QDict of options to pass to the block drivers, or NULL for an
1163 * empty set of options. The reference to the QDict is transferred to this
1164 * function (even on failure), so if the caller intends to reuse the dictionary,
1165 * it needs to use QINCREF() before calling bdrv_file_open.
1166 */
1167 int bdrv_open_backing_file(BlockDriverState *bs, QDict *options, Error **errp)
1168 {
1169 char *backing_filename = g_malloc0(PATH_MAX);
1170 int ret = 0;
1171 BlockDriver *back_drv = NULL;
1172 BlockDriverState *backing_hd;
1173 Error *local_err = NULL;
1174
1175 if (bs->backing_hd != NULL) {
1176 QDECREF(options);
1177 goto free_exit;
1178 }
1179
1180 /* NULL means an empty set of options */
1181 if (options == NULL) {
1182 options = qdict_new();
1183 }
1184
1185 bs->open_flags &= ~BDRV_O_NO_BACKING;
1186 if (qdict_haskey(options, "file.filename")) {
1187 backing_filename[0] = '\0';
1188 } else if (bs->backing_file[0] == '\0' && qdict_size(options) == 0) {
1189 QDECREF(options);
1190 goto free_exit;
1191 } else {
1192 bdrv_get_full_backing_filename(bs, backing_filename, PATH_MAX);
1193 }
1194
1195 backing_hd = bdrv_new("", errp);
1196
1197 if (bs->backing_format[0] != '\0') {
1198 back_drv = bdrv_find_format(bs->backing_format);
1199 }
1200
1201 assert(bs->backing_hd == NULL);
1202 ret = bdrv_open(&backing_hd,
1203 *backing_filename ? backing_filename : NULL, NULL, options,
1204 bdrv_backing_flags(bs->open_flags), back_drv, &local_err);
1205 if (ret < 0) {
1206 bdrv_unref(backing_hd);
1207 backing_hd = NULL;
1208 bs->open_flags |= BDRV_O_NO_BACKING;
1209 error_setg(errp, "Could not open backing file: %s",
1210 error_get_pretty(local_err));
1211 error_free(local_err);
1212 goto free_exit;
1213 }
1214 bdrv_set_backing_hd(bs, backing_hd);
1215
1216 free_exit:
1217 g_free(backing_filename);
1218 return ret;
1219 }
1220
1221 /*
1222 * Opens a disk image whose options are given as BlockdevRef in another block
1223 * device's options.
1224 *
1225 * If allow_none is true, no image will be opened if filename is false and no
1226 * BlockdevRef is given. *pbs will remain unchanged and 0 will be returned.
1227 *
1228 * bdrev_key specifies the key for the image's BlockdevRef in the options QDict.
1229 * That QDict has to be flattened; therefore, if the BlockdevRef is a QDict
1230 * itself, all options starting with "${bdref_key}." are considered part of the
1231 * BlockdevRef.
1232 *
1233 * The BlockdevRef will be removed from the options QDict.
1234 *
1235 * To conform with the behavior of bdrv_open(), *pbs has to be NULL.
1236 */
1237 int bdrv_open_image(BlockDriverState **pbs, const char *filename,
1238 QDict *options, const char *bdref_key, int flags,
1239 bool allow_none, Error **errp)
1240 {
1241 QDict *image_options;
1242 int ret;
1243 char *bdref_key_dot;
1244 const char *reference;
1245
1246 assert(pbs);
1247 assert(*pbs == NULL);
1248
1249 bdref_key_dot = g_strdup_printf("%s.", bdref_key);
1250 qdict_extract_subqdict(options, &image_options, bdref_key_dot);
1251 g_free(bdref_key_dot);
1252
1253 reference = qdict_get_try_str(options, bdref_key);
1254 if (!filename && !reference && !qdict_size(image_options)) {
1255 if (allow_none) {
1256 ret = 0;
1257 } else {
1258 error_setg(errp, "A block device must be specified for \"%s\"",
1259 bdref_key);
1260 ret = -EINVAL;
1261 }
1262 QDECREF(image_options);
1263 goto done;
1264 }
1265
1266 ret = bdrv_open(pbs, filename, reference, image_options, flags, NULL, errp);
1267
1268 done:
1269 qdict_del(options, bdref_key);
1270 return ret;
1271 }
1272
1273 void bdrv_append_temp_snapshot(BlockDriverState *bs, int flags, Error **errp)
1274 {
1275 /* TODO: extra byte is a hack to ensure MAX_PATH space on Windows. */
1276 char *tmp_filename = g_malloc0(PATH_MAX + 1);
1277 int64_t total_size;
1278 BlockDriver *bdrv_qcow2;
1279 QemuOpts *opts = NULL;
1280 QDict *snapshot_options;
1281 BlockDriverState *bs_snapshot;
1282 Error *local_err;
1283 int ret;
1284
1285 /* if snapshot, we create a temporary backing file and open it
1286 instead of opening 'filename' directly */
1287
1288 /* Get the required size from the image */
1289 total_size = bdrv_getlength(bs);
1290 if (total_size < 0) {
1291 error_setg_errno(errp, -total_size, "Could not get image size");
1292 goto out;
1293 }
1294 total_size &= BDRV_SECTOR_MASK;
1295
1296 /* Create the temporary image */
1297 ret = get_tmp_filename(tmp_filename, PATH_MAX + 1);
1298 if (ret < 0) {
1299 error_setg_errno(errp, -ret, "Could not get temporary filename");
1300 goto out;
1301 }
1302
1303 bdrv_qcow2 = bdrv_find_format("qcow2");
1304 opts = qemu_opts_create(bdrv_qcow2->create_opts, NULL, 0,
1305 &error_abort);
1306 qemu_opt_set_number(opts, BLOCK_OPT_SIZE, total_size);
1307 ret = bdrv_create(bdrv_qcow2, tmp_filename, opts, &local_err);
1308 qemu_opts_del(opts);
1309 if (ret < 0) {
1310 error_setg_errno(errp, -ret, "Could not create temporary overlay "
1311 "'%s': %s", tmp_filename,
1312 error_get_pretty(local_err));
1313 error_free(local_err);
1314 goto out;
1315 }
1316
1317 /* Prepare a new options QDict for the temporary file */
1318 snapshot_options = qdict_new();
1319 qdict_put(snapshot_options, "file.driver",
1320 qstring_from_str("file"));
1321 qdict_put(snapshot_options, "file.filename",
1322 qstring_from_str(tmp_filename));
1323
1324 bs_snapshot = bdrv_new("", &error_abort);
1325
1326 ret = bdrv_open(&bs_snapshot, NULL, NULL, snapshot_options,
1327 flags, bdrv_qcow2, &local_err);
1328 if (ret < 0) {
1329 error_propagate(errp, local_err);
1330 goto out;
1331 }
1332
1333 bdrv_append(bs_snapshot, bs);
1334
1335 out:
1336 g_free(tmp_filename);
1337 }
1338
1339 /*
1340 * Opens a disk image (raw, qcow2, vmdk, ...)
1341 *
1342 * options is a QDict of options to pass to the block drivers, or NULL for an
1343 * empty set of options. The reference to the QDict belongs to the block layer
1344 * after the call (even on failure), so if the caller intends to reuse the
1345 * dictionary, it needs to use QINCREF() before calling bdrv_open.
1346 *
1347 * If *pbs is NULL, a new BDS will be created with a pointer to it stored there.
1348 * If it is not NULL, the referenced BDS will be reused.
1349 *
1350 * The reference parameter may be used to specify an existing block device which
1351 * should be opened. If specified, neither options nor a filename may be given,
1352 * nor can an existing BDS be reused (that is, *pbs has to be NULL).
1353 */
1354 int bdrv_open(BlockDriverState **pbs, const char *filename,
1355 const char *reference, QDict *options, int flags,
1356 BlockDriver *drv, Error **errp)
1357 {
1358 int ret;
1359 BlockDriverState *file = NULL, *bs;
1360 const char *drvname;
1361 Error *local_err = NULL;
1362 int snapshot_flags = 0;
1363
1364 assert(pbs);
1365
1366 if (reference) {
1367 bool options_non_empty = options ? qdict_size(options) : false;
1368 QDECREF(options);
1369
1370 if (*pbs) {
1371 error_setg(errp, "Cannot reuse an existing BDS when referencing "
1372 "another block device");
1373 return -EINVAL;
1374 }
1375
1376 if (filename || options_non_empty) {
1377 error_setg(errp, "Cannot reference an existing block device with "
1378 "additional options or a new filename");
1379 return -EINVAL;
1380 }
1381
1382 bs = bdrv_lookup_bs(reference, reference, errp);
1383 if (!bs) {
1384 return -ENODEV;
1385 }
1386 bdrv_ref(bs);
1387 *pbs = bs;
1388 return 0;
1389 }
1390
1391 if (*pbs) {
1392 bs = *pbs;
1393 } else {
1394 bs = bdrv_new("", &error_abort);
1395 }
1396
1397 /* NULL means an empty set of options */
1398 if (options == NULL) {
1399 options = qdict_new();
1400 }
1401
1402 ret = bdrv_fill_options(&options, &filename, flags, drv, &local_err);
1403 if (local_err) {
1404 goto fail;
1405 }
1406
1407 /* Find the right image format driver */
1408 drv = NULL;
1409 drvname = qdict_get_try_str(options, "driver");
1410 if (drvname) {
1411 drv = bdrv_find_format(drvname);
1412 qdict_del(options, "driver");
1413 if (!drv) {
1414 error_setg(errp, "Unknown driver: '%s'", drvname);
1415 ret = -EINVAL;
1416 goto fail;
1417 }
1418 }
1419
1420 assert(drvname || !(flags & BDRV_O_PROTOCOL));
1421 if (drv && !drv->bdrv_file_open) {
1422 /* If the user explicitly wants a format driver here, we'll need to add
1423 * another layer for the protocol in bs->file */
1424 flags &= ~BDRV_O_PROTOCOL;
1425 }
1426
1427 bs->options = options;
1428 options = qdict_clone_shallow(options);
1429
1430 /* Open image file without format layer */
1431 if ((flags & BDRV_O_PROTOCOL) == 0) {
1432 if (flags & BDRV_O_RDWR) {
1433 flags |= BDRV_O_ALLOW_RDWR;
1434 }
1435 if (flags & BDRV_O_SNAPSHOT) {
1436 snapshot_flags = bdrv_temp_snapshot_flags(flags);
1437 flags = bdrv_backing_flags(flags);
1438 }
1439
1440 assert(file == NULL);
1441 ret = bdrv_open_image(&file, filename, options, "file",
1442 bdrv_inherited_flags(flags),
1443 true, &local_err);
1444 if (ret < 0) {
1445 goto fail;
1446 }
1447 }
1448
1449 /* Image format probing */
1450 if (!drv && file) {
1451 ret = find_image_format(file, filename, &drv, &local_err);
1452 if (ret < 0) {
1453 goto fail;
1454 }
1455 } else if (!drv) {
1456 error_setg(errp, "Must specify either driver or file");
1457 ret = -EINVAL;
1458 goto fail;
1459 }
1460
1461 /* Open the image */
1462 if (flags & BDRV_O_PROTOCOL) {
1463 ret = bdrv_open_common(bs, NULL, options,
1464 flags & ~BDRV_O_PROTOCOL, drv, &local_err);
1465 if (!ret) {
1466 bs->growable = 1;
1467 goto done;
1468 } else if (bs->drv) {
1469 goto close_and_fail;
1470 } else {
1471 goto fail;
1472 }
1473 }
1474
1475 ret = bdrv_open_common(bs, file, options, flags, drv, &local_err);
1476 if (ret < 0) {
1477 goto fail;
1478 }
1479
1480 if (file && (bs->file != file)) {
1481 bdrv_unref(file);
1482 file = NULL;
1483 }
1484
1485 /* If there is a backing file, use it */
1486 if ((flags & BDRV_O_NO_BACKING) == 0) {
1487 QDict *backing_options;
1488
1489 qdict_extract_subqdict(options, &backing_options, "backing.");
1490 ret = bdrv_open_backing_file(bs, backing_options, &local_err);
1491 if (ret < 0) {
1492 goto close_and_fail;
1493 }
1494 }
1495
1496 /* For snapshot=on, create a temporary qcow2 overlay. bs points to the
1497 * temporary snapshot afterwards. */
1498 if (snapshot_flags) {
1499 bdrv_append_temp_snapshot(bs, snapshot_flags, &local_err);
1500 if (local_err) {
1501 error_propagate(errp, local_err);
1502 goto close_and_fail;
1503 }
1504 }
1505
1506
1507 done:
1508 /* Check if any unknown options were used */
1509 if (options && (qdict_size(options) != 0)) {
1510 const QDictEntry *entry = qdict_first(options);
1511 if (flags & BDRV_O_PROTOCOL) {
1512 error_setg(errp, "Block protocol '%s' doesn't support the option "
1513 "'%s'", drv->format_name, entry->key);
1514 } else {
1515 error_setg(errp, "Block format '%s' used by device '%s' doesn't "
1516 "support the option '%s'", drv->format_name,
1517 bs->device_name, entry->key);
1518 }
1519
1520 ret = -EINVAL;
1521 goto close_and_fail;
1522 }
1523
1524 if (!bdrv_key_required(bs)) {
1525 bdrv_dev_change_media_cb(bs, true);
1526 } else if (!runstate_check(RUN_STATE_PRELAUNCH)
1527 && !runstate_check(RUN_STATE_INMIGRATE)
1528 && !runstate_check(RUN_STATE_PAUSED)) { /* HACK */
1529 error_setg(errp,
1530 "Guest must be stopped for opening of encrypted image");
1531 ret = -EBUSY;
1532 goto close_and_fail;
1533 }
1534
1535 QDECREF(options);
1536 *pbs = bs;
1537 return 0;
1538
1539 fail:
1540 if (file != NULL) {
1541 bdrv_unref(file);
1542 }
1543 QDECREF(bs->options);
1544 QDECREF(options);
1545 bs->options = NULL;
1546 if (!*pbs) {
1547 /* If *pbs is NULL, a new BDS has been created in this function and
1548 needs to be freed now. Otherwise, it does not need to be closed,
1549 since it has not really been opened yet. */
1550 bdrv_unref(bs);
1551 }
1552 if (local_err) {
1553 error_propagate(errp, local_err);
1554 }
1555 return ret;
1556
1557 close_and_fail:
1558 /* See fail path, but now the BDS has to be always closed */
1559 if (*pbs) {
1560 bdrv_close(bs);
1561 } else {
1562 bdrv_unref(bs);
1563 }
1564 QDECREF(options);
1565 if (local_err) {
1566 error_propagate(errp, local_err);
1567 }
1568 return ret;
1569 }
1570
1571 typedef struct BlockReopenQueueEntry {
1572 bool prepared;
1573 BDRVReopenState state;
1574 QSIMPLEQ_ENTRY(BlockReopenQueueEntry) entry;
1575 } BlockReopenQueueEntry;
1576
1577 /*
1578 * Adds a BlockDriverState to a simple queue for an atomic, transactional
1579 * reopen of multiple devices.
1580 *
1581 * bs_queue can either be an existing BlockReopenQueue that has had QSIMPLE_INIT
1582 * already performed, or alternatively may be NULL a new BlockReopenQueue will
1583 * be created and initialized. This newly created BlockReopenQueue should be
1584 * passed back in for subsequent calls that are intended to be of the same
1585 * atomic 'set'.
1586 *
1587 * bs is the BlockDriverState to add to the reopen queue.
1588 *
1589 * flags contains the open flags for the associated bs
1590 *
1591 * returns a pointer to bs_queue, which is either the newly allocated
1592 * bs_queue, or the existing bs_queue being used.
1593 *
1594 */
1595 BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue,
1596 BlockDriverState *bs, int flags)
1597 {
1598 assert(bs != NULL);
1599
1600 BlockReopenQueueEntry *bs_entry;
1601 if (bs_queue == NULL) {
1602 bs_queue = g_new0(BlockReopenQueue, 1);
1603 QSIMPLEQ_INIT(bs_queue);
1604 }
1605
1606 /* bdrv_open() masks this flag out */
1607 flags &= ~BDRV_O_PROTOCOL;
1608
1609 if (bs->file) {
1610 bdrv_reopen_queue(bs_queue, bs->file, bdrv_inherited_flags(flags));
1611 }
1612
1613 bs_entry = g_new0(BlockReopenQueueEntry, 1);
1614 QSIMPLEQ_INSERT_TAIL(bs_queue, bs_entry, entry);
1615
1616 bs_entry->state.bs = bs;
1617 bs_entry->state.flags = flags;
1618
1619 return bs_queue;
1620 }
1621
1622 /*
1623 * Reopen multiple BlockDriverStates atomically & transactionally.
1624 *
1625 * The queue passed in (bs_queue) must have been built up previous
1626 * via bdrv_reopen_queue().
1627 *
1628 * Reopens all BDS specified in the queue, with the appropriate
1629 * flags. All devices are prepared for reopen, and failure of any
1630 * device will cause all device changes to be abandonded, and intermediate
1631 * data cleaned up.
1632 *
1633 * If all devices prepare successfully, then the changes are committed
1634 * to all devices.
1635 *
1636 */
1637 int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp)
1638 {
1639 int ret = -1;
1640 BlockReopenQueueEntry *bs_entry, *next;
1641 Error *local_err = NULL;
1642
1643 assert(bs_queue != NULL);
1644
1645 bdrv_drain_all();
1646
1647 QSIMPLEQ_FOREACH(bs_entry, bs_queue, entry) {
1648 if (bdrv_reopen_prepare(&bs_entry->state, bs_queue, &local_err)) {
1649 error_propagate(errp, local_err);
1650 goto cleanup;
1651 }
1652 bs_entry->prepared = true;
1653 }
1654
1655 /* If we reach this point, we have success and just need to apply the
1656 * changes
1657 */
1658 QSIMPLEQ_FOREACH(bs_entry, bs_queue, entry) {
1659 bdrv_reopen_commit(&bs_entry->state);
1660 }
1661
1662 ret = 0;
1663
1664 cleanup:
1665 QSIMPLEQ_FOREACH_SAFE(bs_entry, bs_queue, entry, next) {
1666 if (ret && bs_entry->prepared) {
1667 bdrv_reopen_abort(&bs_entry->state);
1668 }
1669 g_free(bs_entry);
1670 }
1671 g_free(bs_queue);
1672 return ret;
1673 }
1674
1675
1676 /* Reopen a single BlockDriverState with the specified flags. */
1677 int bdrv_reopen(BlockDriverState *bs, int bdrv_flags, Error **errp)
1678 {
1679 int ret = -1;
1680 Error *local_err = NULL;
1681 BlockReopenQueue *queue = bdrv_reopen_queue(NULL, bs, bdrv_flags);
1682
1683 ret = bdrv_reopen_multiple(queue, &local_err);
1684 if (local_err != NULL) {
1685 error_propagate(errp, local_err);
1686 }
1687 return ret;
1688 }
1689
1690
1691 /*
1692 * Prepares a BlockDriverState for reopen. All changes are staged in the
1693 * 'opaque' field of the BDRVReopenState, which is used and allocated by
1694 * the block driver layer .bdrv_reopen_prepare()
1695 *
1696 * bs is the BlockDriverState to reopen
1697 * flags are the new open flags
1698 * queue is the reopen queue
1699 *
1700 * Returns 0 on success, non-zero on error. On error errp will be set
1701 * as well.
1702 *
1703 * On failure, bdrv_reopen_abort() will be called to clean up any data.
1704 * It is the responsibility of the caller to then call the abort() or
1705 * commit() for any other BDS that have been left in a prepare() state
1706 *
1707 */
1708 int bdrv_reopen_prepare(BDRVReopenState *reopen_state, BlockReopenQueue *queue,
1709 Error **errp)
1710 {
1711 int ret = -1;
1712 Error *local_err = NULL;
1713 BlockDriver *drv;
1714
1715 assert(reopen_state != NULL);
1716 assert(reopen_state->bs->drv != NULL);
1717 drv = reopen_state->bs->drv;
1718
1719 /* if we are to stay read-only, do not allow permission change
1720 * to r/w */
1721 if (!(reopen_state->bs->open_flags & BDRV_O_ALLOW_RDWR) &&
1722 reopen_state->flags & BDRV_O_RDWR) {
1723 error_set(errp, QERR_DEVICE_IS_READ_ONLY,
1724 reopen_state->bs->device_name);
1725 goto error;
1726 }
1727
1728
1729 ret = bdrv_flush(reopen_state->bs);
1730 if (ret) {
1731 error_set(errp, ERROR_CLASS_GENERIC_ERROR, "Error (%s) flushing drive",
1732 strerror(-ret));
1733 goto error;
1734 }
1735
1736 if (drv->bdrv_reopen_prepare) {
1737 ret = drv->bdrv_reopen_prepare(reopen_state, queue, &local_err);
1738 if (ret) {
1739 if (local_err != NULL) {
1740 error_propagate(errp, local_err);
1741 } else {
1742 error_setg(errp, "failed while preparing to reopen image '%s'",
1743 reopen_state->bs->filename);
1744 }
1745 goto error;
1746 }
1747 } else {
1748 /* It is currently mandatory to have a bdrv_reopen_prepare()
1749 * handler for each supported drv. */
1750 error_set(errp, QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED,
1751 drv->format_name, reopen_state->bs->device_name,
1752 "reopening of file");
1753 ret = -1;
1754 goto error;
1755 }
1756
1757 ret = 0;
1758
1759 error:
1760 return ret;
1761 }
1762
1763 /*
1764 * Takes the staged changes for the reopen from bdrv_reopen_prepare(), and
1765 * makes them final by swapping the staging BlockDriverState contents into
1766 * the active BlockDriverState contents.
1767 */
1768 void bdrv_reopen_commit(BDRVReopenState *reopen_state)
1769 {
1770 BlockDriver *drv;
1771
1772 assert(reopen_state != NULL);
1773 drv = reopen_state->bs->drv;
1774 assert(drv != NULL);
1775
1776 /* If there are any driver level actions to take */
1777 if (drv->bdrv_reopen_commit) {
1778 drv->bdrv_reopen_commit(reopen_state);
1779 }
1780
1781 /* set BDS specific flags now */
1782 reopen_state->bs->open_flags = reopen_state->flags;
1783 reopen_state->bs->enable_write_cache = !!(reopen_state->flags &
1784 BDRV_O_CACHE_WB);
1785 reopen_state->bs->read_only = !(reopen_state->flags & BDRV_O_RDWR);
1786
1787 bdrv_refresh_limits(reopen_state->bs);
1788 }
1789
1790 /*
1791 * Abort the reopen, and delete and free the staged changes in
1792 * reopen_state
1793 */
1794 void bdrv_reopen_abort(BDRVReopenState *reopen_state)
1795 {
1796 BlockDriver *drv;
1797
1798 assert(reopen_state != NULL);
1799 drv = reopen_state->bs->drv;
1800 assert(drv != NULL);
1801
1802 if (drv->bdrv_reopen_abort) {
1803 drv->bdrv_reopen_abort(reopen_state);
1804 }
1805 }
1806
1807
1808 void bdrv_close(BlockDriverState *bs)
1809 {
1810 if (bs->job) {
1811 block_job_cancel_sync(bs->job);
1812 }
1813 bdrv_drain_all(); /* complete I/O */
1814 bdrv_flush(bs);
1815 bdrv_drain_all(); /* in case flush left pending I/O */
1816 notifier_list_notify(&bs->close_notifiers, bs);
1817
1818 if (bs->drv) {
1819 if (bs->backing_hd) {
1820 BlockDriverState *backing_hd = bs->backing_hd;
1821 bdrv_set_backing_hd(bs, NULL);
1822 bdrv_unref(backing_hd);
1823 }
1824 bs->drv->bdrv_close(bs);
1825 g_free(bs->opaque);
1826 bs->opaque = NULL;
1827 bs->drv = NULL;
1828 bs->copy_on_read = 0;
1829 bs->backing_file[0] = '\0';
1830 bs->backing_format[0] = '\0';
1831 bs->total_sectors = 0;
1832 bs->encrypted = 0;
1833 bs->valid_key = 0;
1834 bs->sg = 0;
1835 bs->growable = 0;
1836 bs->zero_beyond_eof = false;
1837 QDECREF(bs->options);
1838 bs->options = NULL;
1839
1840 if (bs->file != NULL) {
1841 bdrv_unref(bs->file);
1842 bs->file = NULL;
1843 }
1844 }
1845
1846 bdrv_dev_change_media_cb(bs, false);
1847
1848 /*throttling disk I/O limits*/
1849 if (bs->io_limits_enabled) {
1850 bdrv_io_limits_disable(bs);
1851 }
1852 }
1853
1854 void bdrv_close_all(void)
1855 {
1856 BlockDriverState *bs;
1857
1858 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
1859 AioContext *aio_context = bdrv_get_aio_context(bs);
1860
1861 aio_context_acquire(aio_context);
1862 bdrv_close(bs);
1863 aio_context_release(aio_context);
1864 }
1865 }
1866
1867 /* Check if any requests are in-flight (including throttled requests) */
1868 static bool bdrv_requests_pending(BlockDriverState *bs)
1869 {
1870 if (!QLIST_EMPTY(&bs->tracked_requests)) {
1871 return true;
1872 }
1873 if (!qemu_co_queue_empty(&bs->throttled_reqs[0])) {
1874 return true;
1875 }
1876 if (!qemu_co_queue_empty(&bs->throttled_reqs[1])) {
1877 return true;
1878 }
1879 if (bs->file && bdrv_requests_pending(bs->file)) {
1880 return true;
1881 }
1882 if (bs->backing_hd && bdrv_requests_pending(bs->backing_hd)) {
1883 return true;
1884 }
1885 return false;
1886 }
1887
1888 /*
1889 * Wait for pending requests to complete across all BlockDriverStates
1890 *
1891 * This function does not flush data to disk, use bdrv_flush_all() for that
1892 * after calling this function.
1893 *
1894 * Note that completion of an asynchronous I/O operation can trigger any
1895 * number of other I/O operations on other devices---for example a coroutine
1896 * can be arbitrarily complex and a constant flow of I/O can come until the
1897 * coroutine is complete. Because of this, it is not possible to have a
1898 * function to drain a single device's I/O queue.
1899 */
1900 void bdrv_drain_all(void)
1901 {
1902 /* Always run first iteration so any pending completion BHs run */
1903 bool busy = true;
1904 BlockDriverState *bs;
1905
1906 while (busy) {
1907 busy = false;
1908
1909 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
1910 AioContext *aio_context = bdrv_get_aio_context(bs);
1911 bool bs_busy;
1912
1913 aio_context_acquire(aio_context);
1914 bdrv_start_throttled_reqs(bs);
1915 bs_busy = bdrv_requests_pending(bs);
1916 bs_busy |= aio_poll(aio_context, bs_busy);
1917 aio_context_release(aio_context);
1918
1919 busy |= bs_busy;
1920 }
1921 }
1922 }
1923
1924 /* make a BlockDriverState anonymous by removing from bdrv_state and
1925 * graph_bdrv_state list.
1926 Also, NULL terminate the device_name to prevent double remove */
1927 void bdrv_make_anon(BlockDriverState *bs)
1928 {
1929 if (bs->device_name[0] != '\0') {
1930 QTAILQ_REMOVE(&bdrv_states, bs, device_list);
1931 }
1932 bs->device_name[0] = '\0';
1933 if (bs->node_name[0] != '\0') {
1934 QTAILQ_REMOVE(&graph_bdrv_states, bs, node_list);
1935 }
1936 bs->node_name[0] = '\0';
1937 }
1938
1939 static void bdrv_rebind(BlockDriverState *bs)
1940 {
1941 if (bs->drv && bs->drv->bdrv_rebind) {
1942 bs->drv->bdrv_rebind(bs);
1943 }
1944 }
1945
1946 static void bdrv_move_feature_fields(BlockDriverState *bs_dest,
1947 BlockDriverState *bs_src)
1948 {
1949 /* move some fields that need to stay attached to the device */
1950
1951 /* dev info */
1952 bs_dest->dev_ops = bs_src->dev_ops;
1953 bs_dest->dev_opaque = bs_src->dev_opaque;
1954 bs_dest->dev = bs_src->dev;
1955 bs_dest->guest_block_size = bs_src->guest_block_size;
1956 bs_dest->copy_on_read = bs_src->copy_on_read;
1957
1958 bs_dest->enable_write_cache = bs_src->enable_write_cache;
1959
1960 /* i/o throttled req */
1961 memcpy(&bs_dest->throttle_state,
1962 &bs_src->throttle_state,
1963 sizeof(ThrottleState));
1964 bs_dest->throttled_reqs[0] = bs_src->throttled_reqs[0];
1965 bs_dest->throttled_reqs[1] = bs_src->throttled_reqs[1];
1966 bs_dest->io_limits_enabled = bs_src->io_limits_enabled;
1967
1968 /* r/w error */
1969 bs_dest->on_read_error = bs_src->on_read_error;
1970 bs_dest->on_write_error = bs_src->on_write_error;
1971
1972 /* i/o status */
1973 bs_dest->iostatus_enabled = bs_src->iostatus_enabled;
1974 bs_dest->iostatus = bs_src->iostatus;
1975
1976 /* dirty bitmap */
1977 bs_dest->dirty_bitmaps = bs_src->dirty_bitmaps;
1978
1979 /* reference count */
1980 bs_dest->refcnt = bs_src->refcnt;
1981
1982 /* job */
1983 bs_dest->job = bs_src->job;
1984
1985 /* keep the same entry in bdrv_states */
1986 pstrcpy(bs_dest->device_name, sizeof(bs_dest->device_name),
1987 bs_src->device_name);
1988 bs_dest->device_list = bs_src->device_list;
1989 memcpy(bs_dest->op_blockers, bs_src->op_blockers,
1990 sizeof(bs_dest->op_blockers));
1991 }
1992
1993 /*
1994 * Swap bs contents for two image chains while they are live,
1995 * while keeping required fields on the BlockDriverState that is
1996 * actually attached to a device.
1997 *
1998 * This will modify the BlockDriverState fields, and swap contents
1999 * between bs_new and bs_old. Both bs_new and bs_old are modified.
2000 *
2001 * bs_new is required to be anonymous.
2002 *
2003 * This function does not create any image files.
2004 */
2005 void bdrv_swap(BlockDriverState *bs_new, BlockDriverState *bs_old)
2006 {
2007 BlockDriverState tmp;
2008
2009 /* The code needs to swap the node_name but simply swapping node_list won't
2010 * work so first remove the nodes from the graph list, do the swap then
2011 * insert them back if needed.
2012 */
2013 if (bs_new->node_name[0] != '\0') {
2014 QTAILQ_REMOVE(&graph_bdrv_states, bs_new, node_list);
2015 }
2016 if (bs_old->node_name[0] != '\0') {
2017 QTAILQ_REMOVE(&graph_bdrv_states, bs_old, node_list);
2018 }
2019
2020 /* bs_new must be anonymous and shouldn't have anything fancy enabled */
2021 assert(bs_new->device_name[0] == '\0');
2022 assert(QLIST_EMPTY(&bs_new->dirty_bitmaps));
2023 assert(bs_new->job == NULL);
2024 assert(bs_new->dev == NULL);
2025 assert(bs_new->io_limits_enabled == false);
2026 assert(!throttle_have_timer(&bs_new->throttle_state));
2027
2028 tmp = *bs_new;
2029 *bs_new = *bs_old;
2030 *bs_old = tmp;
2031
2032 /* there are some fields that should not be swapped, move them back */
2033 bdrv_move_feature_fields(&tmp, bs_old);
2034 bdrv_move_feature_fields(bs_old, bs_new);
2035 bdrv_move_feature_fields(bs_new, &tmp);
2036
2037 /* bs_new shouldn't be in bdrv_states even after the swap! */
2038 assert(bs_new->device_name[0] == '\0');
2039
2040 /* Check a few fields that should remain attached to the device */
2041 assert(bs_new->dev == NULL);
2042 assert(bs_new->job == NULL);
2043 assert(bs_new->io_limits_enabled == false);
2044 assert(!throttle_have_timer(&bs_new->throttle_state));
2045
2046 /* insert the nodes back into the graph node list if needed */
2047 if (bs_new->node_name[0] != '\0') {
2048 QTAILQ_INSERT_TAIL(&graph_bdrv_states, bs_new, node_list);
2049 }
2050 if (bs_old->node_name[0] != '\0') {
2051 QTAILQ_INSERT_TAIL(&graph_bdrv_states, bs_old, node_list);
2052 }
2053
2054 bdrv_rebind(bs_new);
2055 bdrv_rebind(bs_old);
2056 }
2057
2058 /*
2059 * Add new bs contents at the top of an image chain while the chain is
2060 * live, while keeping required fields on the top layer.
2061 *
2062 * This will modify the BlockDriverState fields, and swap contents
2063 * between bs_new and bs_top. Both bs_new and bs_top are modified.
2064 *
2065 * bs_new is required to be anonymous.
2066 *
2067 * This function does not create any image files.
2068 */
2069 void bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top)
2070 {
2071 bdrv_swap(bs_new, bs_top);
2072
2073 /* The contents of 'tmp' will become bs_top, as we are
2074 * swapping bs_new and bs_top contents. */
2075 bdrv_set_backing_hd(bs_top, bs_new);
2076 }
2077
2078 static void bdrv_delete(BlockDriverState *bs)
2079 {
2080 assert(!bs->dev);
2081 assert(!bs->job);
2082 assert(bdrv_op_blocker_is_empty(bs));
2083 assert(!bs->refcnt);
2084 assert(QLIST_EMPTY(&bs->dirty_bitmaps));
2085
2086 bdrv_close(bs);
2087
2088 /* remove from list, if necessary */
2089 bdrv_make_anon(bs);
2090
2091 g_free(bs);
2092 }
2093
2094 int bdrv_attach_dev(BlockDriverState *bs, void *dev)
2095 /* TODO change to DeviceState *dev when all users are qdevified */
2096 {
2097 if (bs->dev) {
2098 return -EBUSY;
2099 }
2100 bs->dev = dev;
2101 bdrv_iostatus_reset(bs);
2102 return 0;
2103 }
2104
2105 /* TODO qdevified devices don't use this, remove when devices are qdevified */
2106 void bdrv_attach_dev_nofail(BlockDriverState *bs, void *dev)
2107 {
2108 if (bdrv_attach_dev(bs, dev) < 0) {
2109 abort();
2110 }
2111 }
2112
2113 void bdrv_detach_dev(BlockDriverState *bs, void *dev)
2114 /* TODO change to DeviceState *dev when all users are qdevified */
2115 {
2116 assert(bs->dev == dev);
2117 bs->dev = NULL;
2118 bs->dev_ops = NULL;
2119 bs->dev_opaque = NULL;
2120 bs->guest_block_size = 512;
2121 }
2122
2123 /* TODO change to return DeviceState * when all users are qdevified */
2124 void *bdrv_get_attached_dev(BlockDriverState *bs)
2125 {
2126 return bs->dev;
2127 }
2128
2129 void bdrv_set_dev_ops(BlockDriverState *bs, const BlockDevOps *ops,
2130 void *opaque)
2131 {
2132 bs->dev_ops = ops;
2133 bs->dev_opaque = opaque;
2134 }
2135
2136 static void bdrv_dev_change_media_cb(BlockDriverState *bs, bool load)
2137 {
2138 if (bs->dev_ops && bs->dev_ops->change_media_cb) {
2139 bool tray_was_closed = !bdrv_dev_is_tray_open(bs);
2140 bs->dev_ops->change_media_cb(bs->dev_opaque, load);
2141 if (tray_was_closed) {
2142 /* tray open */
2143 qapi_event_send_device_tray_moved(bdrv_get_device_name(bs),
2144 true, &error_abort);
2145 }
2146 if (load) {
2147 /* tray close */
2148 qapi_event_send_device_tray_moved(bdrv_get_device_name(bs),
2149 false, &error_abort);
2150 }
2151 }
2152 }
2153
2154 bool bdrv_dev_has_removable_media(BlockDriverState *bs)
2155 {
2156 return !bs->dev || (bs->dev_ops && bs->dev_ops->change_media_cb);
2157 }
2158
2159 void bdrv_dev_eject_request(BlockDriverState *bs, bool force)
2160 {
2161 if (bs->dev_ops && bs->dev_ops->eject_request_cb) {
2162 bs->dev_ops->eject_request_cb(bs->dev_opaque, force);
2163 }
2164 }
2165
2166 bool bdrv_dev_is_tray_open(BlockDriverState *bs)
2167 {
2168 if (bs->dev_ops && bs->dev_ops->is_tray_open) {
2169 return bs->dev_ops->is_tray_open(bs->dev_opaque);
2170 }
2171 return false;
2172 }
2173
2174 static void bdrv_dev_resize_cb(BlockDriverState *bs)
2175 {
2176 if (bs->dev_ops && bs->dev_ops->resize_cb) {
2177 bs->dev_ops->resize_cb(bs->dev_opaque);
2178 }
2179 }
2180
2181 bool bdrv_dev_is_medium_locked(BlockDriverState *bs)
2182 {
2183 if (bs->dev_ops && bs->dev_ops->is_medium_locked) {
2184 return bs->dev_ops->is_medium_locked(bs->dev_opaque);
2185 }
2186 return false;
2187 }
2188
2189 /*
2190 * Run consistency checks on an image
2191 *
2192 * Returns 0 if the check could be completed (it doesn't mean that the image is
2193 * free of errors) or -errno when an internal error occurred. The results of the
2194 * check are stored in res.
2195 */
2196 int bdrv_check(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix)
2197 {
2198 if (bs->drv->bdrv_check == NULL) {
2199 return -ENOTSUP;
2200 }
2201
2202 memset(res, 0, sizeof(*res));
2203 return bs->drv->bdrv_check(bs, res, fix);
2204 }
2205
2206 #define COMMIT_BUF_SECTORS 2048
2207
2208 /* commit COW file into the raw image */
2209 int bdrv_commit(BlockDriverState *bs)
2210 {
2211 BlockDriver *drv = bs->drv;
2212 int64_t sector, total_sectors, length, backing_length;
2213 int n, ro, open_flags;
2214 int ret = 0;
2215 uint8_t *buf = NULL;
2216 char filename[PATH_MAX];
2217
2218 if (!drv)
2219 return -ENOMEDIUM;
2220
2221 if (!bs->backing_hd) {
2222 return -ENOTSUP;
2223 }
2224
2225 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_COMMIT, NULL) ||
2226 bdrv_op_is_blocked(bs->backing_hd, BLOCK_OP_TYPE_COMMIT, NULL)) {
2227 return -EBUSY;
2228 }
2229
2230 ro = bs->backing_hd->read_only;
2231 /* Use pstrcpy (not strncpy): filename must be NUL-terminated. */
2232 pstrcpy(filename, sizeof(filename), bs->backing_hd->filename);
2233 open_flags = bs->backing_hd->open_flags;
2234
2235 if (ro) {
2236 if (bdrv_reopen(bs->backing_hd, open_flags | BDRV_O_RDWR, NULL)) {
2237 return -EACCES;
2238 }
2239 }
2240
2241 length = bdrv_getlength(bs);
2242 if (length < 0) {
2243 ret = length;
2244 goto ro_cleanup;
2245 }
2246
2247 backing_length = bdrv_getlength(bs->backing_hd);
2248 if (backing_length < 0) {
2249 ret = backing_length;
2250 goto ro_cleanup;
2251 }
2252
2253 /* If our top snapshot is larger than the backing file image,
2254 * grow the backing file image if possible. If not possible,
2255 * we must return an error */
2256 if (length > backing_length) {
2257 ret = bdrv_truncate(bs->backing_hd, length);
2258 if (ret < 0) {
2259 goto ro_cleanup;
2260 }
2261 }
2262
2263 total_sectors = length >> BDRV_SECTOR_BITS;
2264 buf = g_malloc(COMMIT_BUF_SECTORS * BDRV_SECTOR_SIZE);
2265
2266 for (sector = 0; sector < total_sectors; sector += n) {
2267 ret = bdrv_is_allocated(bs, sector, COMMIT_BUF_SECTORS, &n);
2268 if (ret < 0) {
2269 goto ro_cleanup;
2270 }
2271 if (ret) {
2272 ret = bdrv_read(bs, sector, buf, n);
2273 if (ret < 0) {
2274 goto ro_cleanup;
2275 }
2276
2277 ret = bdrv_write(bs->backing_hd, sector, buf, n);
2278 if (ret < 0) {
2279 goto ro_cleanup;
2280 }
2281 }
2282 }
2283
2284 if (drv->bdrv_make_empty) {
2285 ret = drv->bdrv_make_empty(bs);
2286 if (ret < 0) {
2287 goto ro_cleanup;
2288 }
2289 bdrv_flush(bs);
2290 }
2291
2292 /*
2293 * Make sure all data we wrote to the backing device is actually
2294 * stable on disk.
2295 */
2296 if (bs->backing_hd) {
2297 bdrv_flush(bs->backing_hd);
2298 }
2299
2300 ret = 0;
2301 ro_cleanup:
2302 g_free(buf);
2303
2304 if (ro) {
2305 /* ignoring error return here */
2306 bdrv_reopen(bs->backing_hd, open_flags & ~BDRV_O_RDWR, NULL);
2307 }
2308
2309 return ret;
2310 }
2311
2312 int bdrv_commit_all(void)
2313 {
2314 BlockDriverState *bs;
2315
2316 QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
2317 AioContext *aio_context = bdrv_get_aio_context(bs);
2318
2319 aio_context_acquire(aio_context);
2320 if (bs->drv && bs->backing_hd) {
2321 int ret = bdrv_commit(bs);
2322 if (ret < 0) {
2323 aio_context_release(aio_context);
2324 return ret;
2325 }
2326 }
2327 aio_context_release(aio_context);
2328 }
2329 return 0;
2330 }
2331
2332 /**
2333 * Remove an active request from the tracked requests list
2334 *
2335 * This function should be called when a tracked request is completing.
2336 */
2337 static void tracked_request_end(BdrvTrackedRequest *req)
2338 {
2339 if (req->serialising) {
2340 req->bs->serialising_in_flight--;
2341 }
2342
2343 QLIST_REMOVE(req, list);
2344 qemu_co_queue_restart_all(&req->wait_queue);
2345 }
2346
2347 /**
2348 * Add an active request to the tracked requests list
2349 */
2350 static void tracked_request_begin(BdrvTrackedRequest *req,
2351 BlockDriverState *bs,
2352 int64_t offset,
2353 unsigned int bytes, bool is_write)
2354 {
2355 *req = (BdrvTrackedRequest){
2356 .bs = bs,
2357 .offset = offset,
2358 .bytes = bytes,
2359 .is_write = is_write,
2360 .co = qemu_coroutine_self(),
2361 .serialising = false,
2362 .overlap_offset = offset,
2363 .overlap_bytes = bytes,
2364 };
2365
2366 qemu_co_queue_init(&req->wait_queue);
2367
2368 QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
2369 }
2370
2371 static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align)
2372 {
2373 int64_t overlap_offset = req->offset & ~(align - 1);
2374 unsigned int overlap_bytes = ROUND_UP(req->offset + req->bytes, align)
2375 - overlap_offset;
2376
2377 if (!req->serialising) {
2378 req->bs->serialising_in_flight++;
2379 req->serialising = true;
2380 }
2381
2382 req->overlap_offset = MIN(req->overlap_offset, overlap_offset);
2383 req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes);
2384 }
2385
2386 /**
2387 * Round a region to cluster boundaries
2388 */
2389 void bdrv_round_to_clusters(BlockDriverState *bs,
2390 int64_t sector_num, int nb_sectors,
2391 int64_t *cluster_sector_num,
2392 int *cluster_nb_sectors)
2393 {
2394 BlockDriverInfo bdi;
2395
2396 if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
2397 *cluster_sector_num = sector_num;
2398 *cluster_nb_sectors = nb_sectors;
2399 } else {
2400 int64_t c = bdi.cluster_size / BDRV_SECTOR_SIZE;
2401 *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c);
2402 *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num +
2403 nb_sectors, c);
2404 }
2405 }
2406
2407 static int bdrv_get_cluster_size(BlockDriverState *bs)
2408 {
2409 BlockDriverInfo bdi;
2410 int ret;
2411
2412 ret = bdrv_get_info(bs, &bdi);
2413 if (ret < 0 || bdi.cluster_size == 0) {
2414 return bs->request_alignment;
2415 } else {
2416 return bdi.cluster_size;
2417 }
2418 }
2419
2420 static bool tracked_request_overlaps(BdrvTrackedRequest *req,
2421 int64_t offset, unsigned int bytes)
2422 {
2423 /* aaaa bbbb */
2424 if (offset >= req->overlap_offset + req->overlap_bytes) {
2425 return false;
2426 }
2427 /* bbbb aaaa */
2428 if (req->overlap_offset >= offset + bytes) {
2429 return false;
2430 }
2431 return true;
2432 }
2433
2434 static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self)
2435 {
2436 BlockDriverState *bs = self->bs;
2437 BdrvTrackedRequest *req;
2438 bool retry;
2439 bool waited = false;
2440
2441 if (!bs->serialising_in_flight) {
2442 return false;
2443 }
2444
2445 do {
2446 retry = false;
2447 QLIST_FOREACH(req, &bs->tracked_requests, list) {
2448 if (req == self || (!req->serialising && !self->serialising)) {
2449 continue;
2450 }
2451 if (tracked_request_overlaps(req, self->overlap_offset,
2452 self->overlap_bytes))
2453 {
2454 /* Hitting this means there was a reentrant request, for
2455 * example, a block driver issuing nested requests. This must
2456 * never happen since it means deadlock.
2457 */
2458 assert(qemu_coroutine_self() != req->co);
2459
2460 /* If the request is already (indirectly) waiting for us, or
2461 * will wait for us as soon as it wakes up, then just go on
2462 * (instead of producing a deadlock in the former case). */
2463 if (!req->waiting_for) {
2464 self->waiting_for = req;
2465 qemu_co_queue_wait(&req->wait_queue);
2466 self->waiting_for = NULL;
2467 retry = true;
2468 waited = true;
2469 break;
2470 }
2471 }
2472 }
2473 } while (retry);
2474
2475 return waited;
2476 }
2477
2478 /*
2479 * Return values:
2480 * 0 - success
2481 * -EINVAL - backing format specified, but no file
2482 * -ENOSPC - can't update the backing file because no space is left in the
2483 * image file header
2484 * -ENOTSUP - format driver doesn't support changing the backing file
2485 */
2486 int bdrv_change_backing_file(BlockDriverState *bs,
2487 const char *backing_file, const char *backing_fmt)
2488 {
2489 BlockDriver *drv = bs->drv;
2490 int ret;
2491
2492 /* Backing file format doesn't make sense without a backing file */
2493 if (backing_fmt && !backing_file) {
2494 return -EINVAL;
2495 }
2496
2497 if (drv->bdrv_change_backing_file != NULL) {
2498 ret = drv->bdrv_change_backing_file(bs, backing_file, backing_fmt);
2499 } else {
2500 ret = -ENOTSUP;
2501 }
2502
2503 if (ret == 0) {
2504 pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_file ?: "");
2505 pstrcpy(bs->backing_format, sizeof(bs->backing_format), backing_fmt ?: "");
2506 }
2507 return ret;
2508 }
2509
2510 /*
2511 * Finds the image layer in the chain that has 'bs' as its backing file.
2512 *
2513 * active is the current topmost image.
2514 *
2515 * Returns NULL if bs is not found in active's image chain,
2516 * or if active == bs.
2517 */
2518 BlockDriverState *bdrv_find_overlay(BlockDriverState *active,
2519 BlockDriverState *bs)
2520 {
2521 BlockDriverState *overlay = NULL;
2522 BlockDriverState *intermediate;
2523
2524 assert(active != NULL);
2525 assert(bs != NULL);
2526
2527 /* if bs is the same as active, then by definition it has no overlay
2528 */
2529 if (active == bs) {
2530 return NULL;
2531 }
2532
2533 intermediate = active;
2534 while (intermediate->backing_hd) {
2535 if (intermediate->backing_hd == bs) {
2536 overlay = intermediate;
2537 break;
2538 }
2539 intermediate = intermediate->backing_hd;
2540 }
2541
2542 return overlay;
2543 }
2544
2545 typedef struct BlkIntermediateStates {
2546 BlockDriverState *bs;
2547 QSIMPLEQ_ENTRY(BlkIntermediateStates) entry;
2548 } BlkIntermediateStates;
2549
2550
2551 /*
2552 * Drops images above 'base' up to and including 'top', and sets the image
2553 * above 'top' to have base as its backing file.
2554 *
2555 * Requires that the overlay to 'top' is opened r/w, so that the backing file
2556 * information in 'bs' can be properly updated.
2557 *
2558 * E.g., this will convert the following chain:
2559 * bottom <- base <- intermediate <- top <- active
2560 *
2561 * to
2562 *
2563 * bottom <- base <- active
2564 *
2565 * It is allowed for bottom==base, in which case it converts:
2566 *
2567 * base <- intermediate <- top <- active
2568 *
2569 * to
2570 *
2571 * base <- active
2572 *
2573 * Error conditions:
2574 * if active == top, that is considered an error
2575 *
2576 */
2577 int bdrv_drop_intermediate(BlockDriverState *active, BlockDriverState *top,
2578 BlockDriverState *base)
2579 {
2580 BlockDriverState *intermediate;
2581 BlockDriverState *base_bs = NULL;
2582 BlockDriverState *new_top_bs = NULL;
2583 BlkIntermediateStates *intermediate_state, *next;
2584 int ret = -EIO;
2585
2586 QSIMPLEQ_HEAD(states_to_delete, BlkIntermediateStates) states_to_delete;
2587 QSIMPLEQ_INIT(&states_to_delete);
2588
2589 if (!top->drv || !base->drv) {
2590 goto exit;
2591 }
2592
2593 new_top_bs = bdrv_find_overlay(active, top);
2594
2595 if (new_top_bs == NULL) {
2596 /* we could not find the image above 'top', this is an error */
2597 goto exit;
2598 }
2599
2600 /* special case of new_top_bs->backing_hd already pointing to base - nothing
2601 * to do, no intermediate images */
2602 if (new_top_bs->backing_hd == base) {
2603 ret = 0;
2604 goto exit;
2605 }
2606
2607 intermediate = top;
2608
2609 /* now we will go down through the list, and add each BDS we find
2610 * into our deletion queue, until we hit the 'base'
2611 */
2612 while (intermediate) {
2613 intermediate_state = g_malloc0(sizeof(BlkIntermediateStates));
2614 intermediate_state->bs = intermediate;
2615 QSIMPLEQ_INSERT_TAIL(&states_to_delete, intermediate_state, entry);
2616
2617 if (intermediate->backing_hd == base) {
2618 base_bs = intermediate->backing_hd;
2619 break;
2620 }
2621 intermediate = intermediate->backing_hd;
2622 }
2623 if (base_bs == NULL) {
2624 /* something went wrong, we did not end at the base. safely
2625 * unravel everything, and exit with error */
2626 goto exit;
2627 }
2628
2629 /* success - we can delete the intermediate states, and link top->base */
2630 ret = bdrv_change_backing_file(new_top_bs, base_bs->filename,
2631 base_bs->drv ? base_bs->drv->format_name : "");
2632 if (ret) {
2633 goto exit;
2634 }
2635 bdrv_set_backing_hd(new_top_bs, base_bs);
2636
2637 QSIMPLEQ_FOREACH_SAFE(intermediate_state, &states_to_delete, entry, next) {
2638 /* so that bdrv_close() does not recursively close the chain */
2639 bdrv_set_backing_hd(intermediate_state->bs, NULL);
2640 bdrv_unref(intermediate_state->bs);
2641 }
2642 ret = 0;
2643
2644 exit:
2645 QSIMPLEQ_FOREACH_SAFE(intermediate_state, &states_to_delete, entry, next) {
2646 g_free(intermediate_state);
2647 }
2648 return ret;
2649 }
2650
2651
2652 static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
2653 size_t size)
2654 {
2655 int64_t len;
2656
2657 if (size > INT_MAX) {
2658 return -EIO;
2659 }
2660
2661 if (!bdrv_is_inserted(bs))
2662 return -ENOMEDIUM;
2663
2664 if (bs->growable)
2665 return 0;
2666
2667 len = bdrv_getlength(bs);
2668
2669 if (offset < 0)
2670 return -EIO;
2671
2672 if ((offset > len) || (len - offset < size))
2673 return -EIO;
2674
2675 return 0;
2676 }
2677
2678 static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num,
2679 int nb_sectors)
2680 {
2681 if (nb_sectors < 0 || nb_sectors > INT_MAX / BDRV_SECTOR_SIZE) {
2682 return -EIO;
2683 }
2684
2685 return bdrv_check_byte_request(bs, sector_num * BDRV_SECTOR_SIZE,
2686 nb_sectors * BDRV_SECTOR_SIZE);
2687 }
2688
2689 typedef struct RwCo {
2690 BlockDriverState *bs;
2691 int64_t offset;
2692 QEMUIOVector *qiov;
2693 bool is_write;
2694 int ret;
2695 BdrvRequestFlags flags;
2696 } RwCo;
2697
2698 static void coroutine_fn bdrv_rw_co_entry(void *opaque)
2699 {
2700 RwCo *rwco = opaque;
2701
2702 if (!rwco->is_write) {
2703 rwco->ret = bdrv_co_do_preadv(rwco->bs, rwco->offset,
2704 rwco->qiov->size, rwco->qiov,
2705 rwco->flags);
2706 } else {
2707 rwco->ret = bdrv_co_do_pwritev(rwco->bs, rwco->offset,
2708 rwco->qiov->size, rwco->qiov,
2709 rwco->flags);
2710 }
2711 }
2712
2713 /*
2714 * Process a vectored synchronous request using coroutines
2715 */
2716 static int bdrv_prwv_co(BlockDriverState *bs, int64_t offset,
2717 QEMUIOVector *qiov, bool is_write,
2718 BdrvRequestFlags flags)
2719 {
2720 Coroutine *co;
2721 RwCo rwco = {
2722 .bs = bs,
2723 .offset = offset,
2724 .qiov = qiov,
2725 .is_write = is_write,
2726 .ret = NOT_DONE,
2727 .flags = flags,
2728 };
2729
2730 /**
2731 * In sync call context, when the vcpu is blocked, this throttling timer
2732 * will not fire; so the I/O throttling function has to be disabled here
2733 * if it has been enabled.
2734 */
2735 if (bs->io_limits_enabled) {
2736 fprintf(stderr, "Disabling I/O throttling on '%s' due "
2737 "to synchronous I/O.\n", bdrv_get_device_name(bs));
2738 bdrv_io_limits_disable(bs);
2739 }
2740
2741 if (qemu_in_coroutine()) {
2742 /* Fast-path if already in coroutine context */
2743 bdrv_rw_co_entry(&rwco);
2744 } else {
2745 AioContext *aio_context = bdrv_get_aio_context(bs);
2746
2747 co = qemu_coroutine_create(bdrv_rw_co_entry);
2748 qemu_coroutine_enter(co, &rwco);
2749 while (rwco.ret == NOT_DONE) {
2750 aio_poll(aio_context, true);
2751 }
2752 }
2753 return rwco.ret;
2754 }
2755
2756 /*
2757 * Process a synchronous request using coroutines
2758 */
2759 static int bdrv_rw_co(BlockDriverState *bs, int64_t sector_num, uint8_t *buf,
2760 int nb_sectors, bool is_write, BdrvRequestFlags flags)
2761 {
2762 QEMUIOVector qiov;
2763 struct iovec iov = {
2764 .iov_base = (void *)buf,
2765 .iov_len = nb_sectors * BDRV_SECTOR_SIZE,
2766 };
2767
2768 if (nb_sectors < 0 || nb_sectors > INT_MAX / BDRV_SECTOR_SIZE) {
2769 return -EINVAL;
2770 }
2771
2772 qemu_iovec_init_external(&qiov, &iov, 1);
2773 return bdrv_prwv_co(bs, sector_num << BDRV_SECTOR_BITS,
2774 &qiov, is_write, flags);
2775 }
2776
2777 /* return < 0 if error. See bdrv_write() for the return codes */
2778 int bdrv_read(BlockDriverState *bs, int64_t sector_num,
2779 uint8_t *buf, int nb_sectors)
2780 {
2781 return bdrv_rw_co(bs, sector_num, buf, nb_sectors, false, 0);
2782 }
2783
2784 /* Just like bdrv_read(), but with I/O throttling temporarily disabled */
2785 int bdrv_read_unthrottled(BlockDriverState *bs, int64_t sector_num,
2786 uint8_t *buf, int nb_sectors)
2787 {
2788 bool enabled;
2789 int ret;
2790
2791 enabled = bs->io_limits_enabled;
2792 bs->io_limits_enabled = false;
2793 ret = bdrv_read(bs, sector_num, buf, nb_sectors);
2794 bs->io_limits_enabled = enabled;
2795 return ret;
2796 }
2797
2798 /* Return < 0 if error. Important errors are:
2799 -EIO generic I/O error (may happen for all errors)
2800 -ENOMEDIUM No media inserted.
2801 -EINVAL Invalid sector number or nb_sectors
2802 -EACCES Trying to write a read-only device
2803 */
2804 int bdrv_write(BlockDriverState *bs, int64_t sector_num,
2805 const uint8_t *buf, int nb_sectors)
2806 {
2807 return bdrv_rw_co(bs, sector_num, (uint8_t *)buf, nb_sectors, true, 0);
2808 }
2809
2810 int bdrv_write_zeroes(BlockDriverState *bs, int64_t sector_num,
2811 int nb_sectors, BdrvRequestFlags flags)
2812 {
2813 return bdrv_rw_co(bs, sector_num, NULL, nb_sectors, true,
2814 BDRV_REQ_ZERO_WRITE | flags);
2815 }
2816
2817 /*
2818 * Completely zero out a block device with the help of bdrv_write_zeroes.
2819 * The operation is sped up by checking the block status and only writing
2820 * zeroes to the device if they currently do not return zeroes. Optional
2821 * flags are passed through to bdrv_write_zeroes (e.g. BDRV_REQ_MAY_UNMAP).
2822 *
2823 * Returns < 0 on error, 0 on success. For error codes see bdrv_write().
2824 */
2825 int bdrv_make_zero(BlockDriverState *bs, BdrvRequestFlags flags)
2826 {
2827 int64_t target_size;
2828 int64_t ret, nb_sectors, sector_num = 0;
2829 int n;
2830
2831 target_size = bdrv_getlength(bs);
2832 if (target_size < 0) {
2833 return target_size;
2834 }
2835 target_size /= BDRV_SECTOR_SIZE;
2836
2837 for (;;) {
2838 nb_sectors = target_size - sector_num;
2839 if (nb_sectors <= 0) {
2840 return 0;
2841 }
2842 if (nb_sectors > INT_MAX) {
2843 nb_sectors = INT_MAX;
2844 }
2845 ret = bdrv_get_block_status(bs, sector_num, nb_sectors, &n);
2846 if (ret < 0) {
2847 error_report("error getting block status at sector %" PRId64 ": %s",
2848 sector_num, strerror(-ret));
2849 return ret;
2850 }
2851 if (ret & BDRV_BLOCK_ZERO) {
2852 sector_num += n;
2853 continue;
2854 }
2855 ret = bdrv_write_zeroes(bs, sector_num, n, flags);
2856 if (ret < 0) {
2857 error_report("error writing zeroes at sector %" PRId64 ": %s",
2858 sector_num, strerror(-ret));
2859 return ret;
2860 }
2861 sector_num += n;
2862 }
2863 }
2864
2865 int bdrv_pread(BlockDriverState *bs, int64_t offset, void *buf, int bytes)
2866 {
2867 QEMUIOVector qiov;
2868 struct iovec iov = {
2869 .iov_base = (void *)buf,
2870 .iov_len = bytes,
2871 };
2872 int ret;
2873
2874 if (bytes < 0) {
2875 return -EINVAL;
2876 }
2877
2878 qemu_iovec_init_external(&qiov, &iov, 1);
2879 ret = bdrv_prwv_co(bs, offset, &qiov, false, 0);
2880 if (ret < 0) {
2881 return ret;
2882 }
2883
2884 return bytes;
2885 }
2886
2887 int bdrv_pwritev(BlockDriverState *bs, int64_t offset, QEMUIOVector *qiov)
2888 {
2889 int ret;
2890
2891 ret = bdrv_prwv_co(bs, offset, qiov, true, 0);
2892 if (ret < 0) {
2893 return ret;
2894 }
2895
2896 return qiov->size;
2897 }
2898
2899 int bdrv_pwrite(BlockDriverState *bs, int64_t offset,
2900 const void *buf, int bytes)
2901 {
2902 QEMUIOVector qiov;
2903 struct iovec iov = {
2904 .iov_base = (void *) buf,
2905 .iov_len = bytes,
2906 };
2907
2908 if (bytes < 0) {
2909 return -EINVAL;
2910 }
2911
2912 qemu_iovec_init_external(&qiov, &iov, 1);
2913 return bdrv_pwritev(bs, offset, &qiov);
2914 }
2915
2916 /*
2917 * Writes to the file and ensures that no writes are reordered across this
2918 * request (acts as a barrier)
2919 *
2920 * Returns 0 on success, -errno in error cases.
2921 */
2922 int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset,
2923 const void *buf, int count)
2924 {
2925 int ret;
2926
2927 ret = bdrv_pwrite(bs, offset, buf, count);
2928 if (ret < 0) {
2929 return ret;
2930 }
2931
2932 /* No flush needed for cache modes that already do it */
2933 if (bs->enable_write_cache) {
2934 bdrv_flush(bs);
2935 }
2936
2937 return 0;
2938 }
2939
2940 static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs,
2941 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
2942 {
2943 /* Perform I/O through a temporary buffer so that users who scribble over
2944 * their read buffer while the operation is in progress do not end up
2945 * modifying the image file. This is critical for zero-copy guest I/O
2946 * where anything might happen inside guest memory.
2947 */
2948 void *bounce_buffer;
2949
2950 BlockDriver *drv = bs->drv;
2951 struct iovec iov;
2952 QEMUIOVector bounce_qiov;
2953 int64_t cluster_sector_num;
2954 int cluster_nb_sectors;
2955 size_t skip_bytes;
2956 int ret;
2957
2958 /* Cover entire cluster so no additional backing file I/O is required when
2959 * allocating cluster in the image file.
2960 */
2961 bdrv_round_to_clusters(bs, sector_num, nb_sectors,
2962 &cluster_sector_num, &cluster_nb_sectors);
2963
2964 trace_bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors,
2965 cluster_sector_num, cluster_nb_sectors);
2966
2967 iov.iov_len = cluster_nb_sectors * BDRV_SECTOR_SIZE;
2968 iov.iov_base = bounce_buffer = qemu_blockalign(bs, iov.iov_len);
2969 qemu_iovec_init_external(&bounce_qiov, &iov, 1);
2970
2971 ret = drv->bdrv_co_readv(bs, cluster_sector_num, cluster_nb_sectors,
2972 &bounce_qiov);
2973 if (ret < 0) {
2974 goto err;
2975 }
2976
2977 if (drv->bdrv_co_write_zeroes &&
2978 buffer_is_zero(bounce_buffer, iov.iov_len)) {
2979 ret = bdrv_co_do_write_zeroes(bs, cluster_sector_num,
2980 cluster_nb_sectors, 0);
2981 } else {
2982 /* This does not change the data on the disk, it is not necessary
2983 * to flush even in cache=writethrough mode.
2984 */
2985 ret = drv->bdrv_co_writev(bs, cluster_sector_num, cluster_nb_sectors,
2986 &bounce_qiov);
2987 }
2988
2989 if (ret < 0) {
2990 /* It might be okay to ignore write errors for guest requests. If this
2991 * is a deliberate copy-on-read then we don't want to ignore the error.
2992 * Simply report it in all cases.
2993 */
2994 goto err;
2995 }
2996
2997 skip_bytes = (sector_num - cluster_sector_num) * BDRV_SECTOR_SIZE;
2998 qemu_iovec_from_buf(qiov, 0, bounce_buffer + skip_bytes,
2999 nb_sectors * BDRV_SECTOR_SIZE);
3000
3001 err:
3002 qemu_vfree(bounce_buffer);
3003 return ret;
3004 }
3005
3006 /*
3007 * Forwards an already correctly aligned request to the BlockDriver. This
3008 * handles copy on read and zeroing after EOF; any other features must be
3009 * implemented by the caller.
3010 */
3011 static int coroutine_fn bdrv_aligned_preadv(BlockDriverState *bs,
3012 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
3013 int64_t align, QEMUIOVector *qiov, int flags)
3014 {
3015 BlockDriver *drv = bs->drv;
3016 int ret;
3017
3018 int64_t sector_num = offset >> BDRV_SECTOR_BITS;
3019 unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS;
3020
3021 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
3022 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
3023
3024 /* Handle Copy on Read and associated serialisation */
3025 if (flags & BDRV_REQ_COPY_ON_READ) {
3026 /* If we touch the same cluster it counts as an overlap. This
3027 * guarantees that allocating writes will be serialized and not race
3028 * with each other for the same cluster. For example, in copy-on-read
3029 * it ensures that the CoR read and write operations are atomic and
3030 * guest writes cannot interleave between them. */
3031 mark_request_serialising(req, bdrv_get_cluster_size(bs));
3032 }
3033
3034 wait_serialising_requests(req);
3035
3036 if (flags & BDRV_REQ_COPY_ON_READ) {
3037 int pnum;
3038
3039 ret = bdrv_is_allocated(bs, sector_num, nb_sectors, &pnum);
3040 if (ret < 0) {
3041 goto out;
3042 }
3043
3044 if (!ret || pnum != nb_sectors) {
3045 ret = bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, qiov);
3046 goto out;
3047 }
3048 }
3049
3050 /* Forward the request to the BlockDriver */
3051 if (!(bs->zero_beyond_eof && bs->growable)) {
3052 ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
3053 } else {
3054 /* Read zeros after EOF of growable BDSes */
3055 int64_t len, total_sectors, max_nb_sectors;
3056
3057 len = bdrv_getlength(bs);
3058 if (len < 0) {
3059 ret = len;
3060 goto out;
3061 }
3062
3063 total_sectors = DIV_ROUND_UP(len, BDRV_SECTOR_SIZE);
3064 max_nb_sectors = ROUND_UP(MAX(0, total_sectors - sector_num),
3065 align >> BDRV_SECTOR_BITS);
3066 if (max_nb_sectors > 0) {
3067 ret = drv->bdrv_co_readv(bs, sector_num,
3068 MIN(nb_sectors, max_nb_sectors), qiov);
3069 } else {
3070 ret = 0;
3071 }
3072
3073 /* Reading beyond end of file is supposed to produce zeroes */
3074 if (ret == 0 && total_sectors < sector_num + nb_sectors) {
3075 uint64_t offset = MAX(0, total_sectors - sector_num);
3076 uint64_t bytes = (sector_num + nb_sectors - offset) *
3077 BDRV_SECTOR_SIZE;
3078 qemu_iovec_memset(qiov, offset * BDRV_SECTOR_SIZE, 0, bytes);
3079 }
3080 }
3081
3082 out:
3083 return ret;
3084 }
3085
3086 /*
3087 * Handle a read request in coroutine context
3088 */
3089 static int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs,
3090 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
3091 BdrvRequestFlags flags)
3092 {
3093 BlockDriver *drv = bs->drv;
3094 BdrvTrackedRequest req;
3095
3096 /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
3097 uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment);
3098 uint8_t *head_buf = NULL;
3099 uint8_t *tail_buf = NULL;
3100 QEMUIOVector local_qiov;
3101 bool use_local_qiov = false;
3102 int ret;
3103
3104 if (!drv) {
3105 return -ENOMEDIUM;
3106 }
3107 if (bdrv_check_byte_request(bs, offset, bytes)) {
3108 return -EIO;
3109 }
3110
3111 if (bs->copy_on_read) {
3112 flags |= BDRV_REQ_COPY_ON_READ;
3113 }
3114
3115 /* throttling disk I/O */
3116 if (bs->io_limits_enabled) {
3117 bdrv_io_limits_intercept(bs, bytes, false);
3118 }
3119
3120 /* Align read if necessary by padding qiov */
3121 if (offset & (align - 1)) {
3122 head_buf = qemu_blockalign(bs, align);
3123 qemu_iovec_init(&local_qiov, qiov->niov + 2);
3124 qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
3125 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
3126 use_local_qiov = true;
3127
3128 bytes += offset & (align - 1);
3129 offset = offset & ~(align - 1);
3130 }
3131
3132 if ((offset + bytes) & (align - 1)) {
3133 if (!use_local_qiov) {
3134 qemu_iovec_init(&local_qiov, qiov->niov + 1);
3135 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
3136 use_local_qiov = true;
3137 }
3138 tail_buf = qemu_blockalign(bs, align);
3139 qemu_iovec_add(&local_qiov, tail_buf,
3140 align - ((offset + bytes) & (align - 1)));
3141
3142 bytes = ROUND_UP(bytes, align);
3143 }
3144
3145 tracked_request_begin(&req, bs, offset, bytes, false);
3146 ret = bdrv_aligned_preadv(bs, &req, offset, bytes, align,
3147 use_local_qiov ? &local_qiov : qiov,
3148 flags);
3149 tracked_request_end(&req);
3150
3151 if (use_local_qiov) {
3152 qemu_iovec_destroy(&local_qiov);
3153 qemu_vfree(head_buf);
3154 qemu_vfree(tail_buf);
3155 }
3156
3157 return ret;
3158 }
3159
3160 static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
3161 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
3162 BdrvRequestFlags flags)
3163 {
3164 if (nb_sectors < 0 || nb_sectors > (UINT_MAX >> BDRV_SECTOR_BITS)) {
3165 return -EINVAL;
3166 }
3167
3168 return bdrv_co_do_preadv(bs, sector_num << BDRV_SECTOR_BITS,
3169 nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
3170 }
3171
3172 int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num,
3173 int nb_sectors, QEMUIOVector *qiov)
3174 {
3175 trace_bdrv_co_readv(bs, sector_num, nb_sectors);
3176
3177 return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, 0);
3178 }
3179
3180 int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
3181 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
3182 {
3183 trace_bdrv_co_copy_on_readv(bs, sector_num, nb_sectors);
3184
3185 return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov,
3186 BDRV_REQ_COPY_ON_READ);
3187 }
3188
3189 /* if no limit is specified in the BlockLimits use a default
3190 * of 32768 512-byte sectors (16 MiB) per request.
3191 */
3192 #define MAX_WRITE_ZEROES_DEFAULT 32768
3193
3194 static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
3195 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags)
3196 {
3197 BlockDriver *drv = bs->drv;
3198 QEMUIOVector qiov;
3199 struct iovec iov = {0};
3200 int ret = 0;
3201
3202 int max_write_zeroes = bs->bl.max_write_zeroes ?
3203 bs->bl.max_write_zeroes : MAX_WRITE_ZEROES_DEFAULT;
3204
3205 while (nb_sectors > 0 && !ret) {
3206 int num = nb_sectors;
3207
3208 /* Align request. Block drivers can expect the "bulk" of the request
3209 * to be aligned.
3210 */
3211 if (bs->bl.write_zeroes_alignment
3212 && num > bs->bl.write_zeroes_alignment) {
3213 if (sector_num % bs->bl.write_zeroes_alignment != 0) {
3214 /* Make a small request up to the first aligned sector. */
3215 num = bs->bl.write_zeroes_alignment;
3216 num -= sector_num % bs->bl.write_zeroes_alignment;
3217 } else if ((sector_num + num) % bs->bl.write_zeroes_alignment != 0) {
3218 /* Shorten the request to the last aligned sector. num cannot
3219 * underflow because num > bs->bl.write_zeroes_alignment.
3220 */
3221 num -= (sector_num + num) % bs->bl.write_zeroes_alignment;
3222 }
3223 }
3224
3225 /* limit request size */
3226 if (num > max_write_zeroes) {
3227 num = max_write_zeroes;
3228 }
3229
3230 ret = -ENOTSUP;
3231 /* First try the efficient write zeroes operation */
3232 if (drv->bdrv_co_write_zeroes) {
3233 ret = drv->bdrv_co_write_zeroes(bs, sector_num, num, flags);
3234 }
3235
3236 if (ret == -ENOTSUP) {
3237 /* Fall back to bounce buffer if write zeroes is unsupported */
3238 iov.iov_len = num * BDRV_SECTOR_SIZE;
3239 if (iov.iov_base == NULL) {
3240 iov.iov_base = qemu_blockalign(bs, num * BDRV_SECTOR_SIZE);
3241 memset(iov.iov_base, 0, num * BDRV_SECTOR_SIZE);
3242 }
3243 qemu_iovec_init_external(&qiov, &iov, 1);
3244
3245 ret = drv->bdrv_co_writev(bs, sector_num, num, &qiov);
3246
3247 /* Keep bounce buffer around if it is big enough for all
3248 * all future requests.
3249 */
3250 if (num < max_write_zeroes) {
3251 qemu_vfree(iov.iov_base);
3252 iov.iov_base = NULL;
3253 }
3254 }
3255
3256 sector_num += num;
3257 nb_sectors -= num;
3258 }
3259
3260 qemu_vfree(iov.iov_base);
3261 return ret;
3262 }
3263
3264 /*
3265 * Forwards an already correctly aligned write request to the BlockDriver.
3266 */
3267 static int coroutine_fn bdrv_aligned_pwritev(BlockDriverState *bs,
3268 BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
3269 QEMUIOVector *qiov, int flags)
3270 {
3271 BlockDriver *drv = bs->drv;
3272 bool waited;
3273 int ret;
3274
3275 int64_t sector_num = offset >> BDRV_SECTOR_BITS;
3276 unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS;
3277
3278 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
3279 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
3280
3281 waited = wait_serialising_requests(req);
3282 assert(!waited || !req->serialising);
3283 assert(req->overlap_offset <= offset);
3284 assert(offset + bytes <= req->overlap_offset + req->overlap_bytes);
3285
3286 ret = notifier_with_return_list_notify(&bs->before_write_notifiers, req);
3287
3288 if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF &&
3289 !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_write_zeroes &&
3290 qemu_iovec_is_zero(qiov)) {
3291 flags |= BDRV_REQ_ZERO_WRITE;
3292 if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) {
3293 flags |= BDRV_REQ_MAY_UNMAP;
3294 }
3295 }
3296
3297 if (ret < 0) {
3298 /* Do nothing, write notifier decided to fail this request */
3299 } else if (flags & BDRV_REQ_ZERO_WRITE) {
3300 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_ZERO);
3301 ret = bdrv_co_do_write_zeroes(bs, sector_num, nb_sectors, flags);
3302 } else {
3303 BLKDBG_EVENT(bs, BLKDBG_PWRITEV);
3304 ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov);
3305 }
3306 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_DONE);
3307
3308 if (ret == 0 && !bs->enable_write_cache) {
3309 ret = bdrv_co_flush(bs);
3310 }
3311
3312 bdrv_set_dirty(bs, sector_num, nb_sectors);
3313
3314 if (bs->wr_highest_sector < sector_num + nb_sectors - 1) {
3315 bs->wr_highest_sector = sector_num + nb_sectors - 1;
3316 }
3317 if (bs->growable && ret >= 0) {
3318 bs->total_sectors = MAX(bs->total_sectors, sector_num + nb_sectors);
3319 }
3320
3321 return ret;
3322 }
3323
3324 /*
3325 * Handle a write request in coroutine context
3326 */
3327 static int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs,
3328 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
3329 BdrvRequestFlags flags)
3330 {
3331 BdrvTrackedRequest req;
3332 /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
3333 uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment);
3334 uint8_t *head_buf = NULL;
3335 uint8_t *tail_buf = NULL;
3336 QEMUIOVector local_qiov;
3337 bool use_local_qiov = false;
3338 int ret;
3339
3340 if (!bs->drv) {
3341 return -ENOMEDIUM;
3342 }
3343 if (bs->read_only) {
3344 return -EACCES;
3345 }
3346 if (bdrv_check_byte_request(bs, offset, bytes)) {
3347 return -EIO;
3348 }
3349
3350 /* throttling disk I/O */
3351 if (bs->io_limits_enabled) {
3352 bdrv_io_limits_intercept(bs, bytes, true);
3353 }
3354
3355 /*
3356 * Align write if necessary by performing a read-modify-write cycle.
3357 * Pad qiov with the read parts and be sure to have a tracked request not
3358 * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle.
3359 */
3360 tracked_request_begin(&req, bs, offset, bytes, true);
3361
3362 if (offset & (align - 1)) {
3363 QEMUIOVector head_qiov;
3364 struct iovec head_iov;
3365
3366 mark_request_serialising(&req, align);
3367 wait_serialising_requests(&req);
3368
3369 head_buf = qemu_blockalign(bs, align);
3370 head_iov = (struct iovec) {
3371 .iov_base = head_buf,
3372 .iov_len = align,
3373 };
3374 qemu_iovec_init_external(&head_qiov, &head_iov, 1);
3375
3376 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_HEAD);
3377 ret = bdrv_aligned_preadv(bs, &req, offset & ~(align - 1), align,
3378 align, &head_qiov, 0);
3379 if (ret < 0) {
3380 goto fail;
3381 }
3382 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
3383
3384 qemu_iovec_init(&local_qiov, qiov->niov + 2);
3385 qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
3386 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
3387 use_local_qiov = true;
3388
3389 bytes += offset & (align - 1);
3390 offset = offset & ~(align - 1);
3391 }
3392
3393 if ((offset + bytes) & (align - 1)) {
3394 QEMUIOVector tail_qiov;
3395 struct iovec tail_iov;
3396 size_t tail_bytes;
3397 bool waited;
3398
3399 mark_request_serialising(&req, align);
3400 waited = wait_serialising_requests(&req);
3401 assert(!waited || !use_local_qiov);
3402
3403 tail_buf = qemu_blockalign(bs, align);
3404 tail_iov = (struct iovec) {
3405 .iov_base = tail_buf,
3406 .iov_len = align,
3407 };
3408 qemu_iovec_init_external(&tail_qiov, &tail_iov, 1);
3409
3410 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_TAIL);
3411 ret = bdrv_aligned_preadv(bs, &req, (offset + bytes) & ~(align - 1), align,
3412 align, &tail_qiov, 0);
3413 if (ret < 0) {
3414 goto fail;
3415 }
3416 BLKDBG_EVENT(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
3417
3418 if (!use_local_qiov) {
3419 qemu_iovec_init(&local_qiov, qiov->niov + 1);
3420 qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
3421 use_local_qiov = true;
3422 }
3423
3424 tail_bytes = (offset + bytes) & (align - 1);
3425 qemu_iovec_add(&local_qiov, tail_buf + tail_bytes, align - tail_bytes);
3426
3427 bytes = ROUND_UP(bytes, align);
3428 }
3429
3430 ret = bdrv_aligned_pwritev(bs, &req, offset, bytes,
3431 use_local_qiov ? &local_qiov : qiov,
3432 flags);
3433
3434 fail:
3435 tracked_request_end(&req);
3436
3437 if (use_local_qiov) {
3438 qemu_iovec_destroy(&local_qiov);
3439 }
3440 qemu_vfree(head_buf);
3441 qemu_vfree(tail_buf);
3442
3443 return ret;
3444 }
3445
3446 static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
3447 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
3448 BdrvRequestFlags flags)
3449 {
3450 if (nb_sectors < 0 || nb_sectors > (INT_MAX >> BDRV_SECTOR_BITS)) {
3451 return -EINVAL;
3452 }
3453
3454 return bdrv_co_do_pwritev(bs, sector_num << BDRV_SECTOR_BITS,
3455 nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
3456 }
3457
3458 int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num,
3459 int nb_sectors, QEMUIOVector *qiov)
3460 {
3461 trace_bdrv_co_writev(bs, sector_num, nb_sectors);
3462
3463 return bdrv_co_do_writev(bs, sector_num, nb_sectors, qiov, 0);
3464 }
3465
3466 int coroutine_fn bdrv_co_write_zeroes(BlockDriverState *bs,
3467 int64_t sector_num, int nb_sectors,
3468 BdrvRequestFlags flags)
3469 {
3470 trace_bdrv_co_write_zeroes(bs, sector_num, nb_sectors, flags);
3471
3472 if (!(bs->open_flags & BDRV_O_UNMAP)) {
3473 flags &= ~BDRV_REQ_MAY_UNMAP;
3474 }
3475
3476 return bdrv_co_do_writev(bs, sector_num, nb_sectors, NULL,
3477 BDRV_REQ_ZERO_WRITE | flags);
3478 }
3479
3480 /**
3481 * Truncate file to 'offset' bytes (needed only for file protocols)
3482 */
3483 int bdrv_truncate(BlockDriverState *bs, int64_t offset)
3484 {
3485 BlockDriver *drv = bs->drv;
3486 int ret;
3487 if (!drv)
3488 return -ENOMEDIUM;
3489 if (!drv->bdrv_truncate)
3490 return -ENOTSUP;
3491 if (bs->read_only)
3492 return -EACCES;
3493 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_RESIZE, NULL)) {
3494 return -EBUSY;
3495 }
3496 ret = drv->bdrv_truncate(bs, offset);
3497 if (ret == 0) {
3498 ret = refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS);
3499 bdrv_dev_resize_cb(bs);
3500 }
3501 return ret;
3502 }
3503
3504 /**
3505 * Length of a allocated file in bytes. Sparse files are counted by actual
3506 * allocated space. Return < 0 if error or unknown.
3507 */
3508 int64_t bdrv_get_allocated_file_size(BlockDriverState *bs)
3509 {
3510 BlockDriver *drv = bs->drv;
3511 if (!drv) {
3512 return -ENOMEDIUM;
3513 }
3514 if (drv->bdrv_get_allocated_file_size) {
3515 return drv->bdrv_get_allocated_file_size(bs);
3516 }
3517 if (bs->file) {
3518 return bdrv_get_allocated_file_size(bs->file);
3519 }
3520 return -ENOTSUP;
3521 }
3522
3523 /**
3524 * Length of a file in bytes. Return < 0 if error or unknown.
3525 */
3526 int64_t bdrv_getlength(BlockDriverState *bs)
3527 {
3528 BlockDriver *drv = bs->drv;
3529 if (!drv)
3530 return -ENOMEDIUM;
3531
3532 if (drv->has_variable_length) {
3533 int ret = refresh_total_sectors(bs, bs->total_sectors);
3534 if (ret < 0) {
3535 return ret;
3536 }
3537 }
3538 return bs->total_sectors * BDRV_SECTOR_SIZE;
3539 }
3540
3541 /* return 0 as number of sectors if no device present or error */
3542 void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr)
3543 {
3544 int64_t length;
3545 length = bdrv_getlength(bs);
3546 if (length < 0)
3547 length = 0;
3548 else
3549 length = length >> BDRV_SECTOR_BITS;
3550 *nb_sectors_ptr = length;
3551 }
3552
3553 void bdrv_set_on_error(BlockDriverState *bs, BlockdevOnError on_read_error,
3554 BlockdevOnError on_write_error)
3555 {
3556 bs->on_read_error = on_read_error;