Merge remote-tracking branch 'remotes/elmarco/tags/libslirp-pull-request' into staging
[qemu.git] / block / qcow2.c
1 /*
2 * Block driver for the QCOW version 2 format
3 *
4 * Copyright (c) 2004-2006 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25 #include "qemu/osdep.h"
26
27 #include "block/qdict.h"
28 #include "sysemu/block-backend.h"
29 #include "qemu/main-loop.h"
30 #include "qemu/module.h"
31 #include "qcow2.h"
32 #include "qemu/error-report.h"
33 #include "qapi/error.h"
34 #include "qapi/qapi-events-block-core.h"
35 #include "qapi/qmp/qdict.h"
36 #include "qapi/qmp/qstring.h"
37 #include "trace.h"
38 #include "qemu/option_int.h"
39 #include "qemu/cutils.h"
40 #include "qemu/bswap.h"
41 #include "qapi/qobject-input-visitor.h"
42 #include "qapi/qapi-visit-block-core.h"
43 #include "crypto.h"
44 #include "block/aio_task.h"
45
46 /*
47 Differences with QCOW:
48
49 - Support for multiple incremental snapshots.
50 - Memory management by reference counts.
51 - Clusters which have a reference count of one have the bit
52 QCOW_OFLAG_COPIED to optimize write performance.
53 - Size of compressed clusters is stored in sectors to reduce bit usage
54 in the cluster offsets.
55 - Support for storing additional data (such as the VM state) in the
56 snapshots.
57 - If a backing store is used, the cluster size is not constrained
58 (could be backported to QCOW).
59 - L2 tables have always a size of one cluster.
60 */
61
62
63 typedef struct {
64 uint32_t magic;
65 uint32_t len;
66 } QEMU_PACKED QCowExtension;
67
68 #define QCOW2_EXT_MAGIC_END 0
69 #define QCOW2_EXT_MAGIC_BACKING_FORMAT 0xe2792aca
70 #define QCOW2_EXT_MAGIC_FEATURE_TABLE 0x6803f857
71 #define QCOW2_EXT_MAGIC_CRYPTO_HEADER 0x0537be77
72 #define QCOW2_EXT_MAGIC_BITMAPS 0x23852875
73 #define QCOW2_EXT_MAGIC_DATA_FILE 0x44415441
74
75 static int coroutine_fn
76 qcow2_co_preadv_compressed(BlockDriverState *bs,
77 uint64_t cluster_descriptor,
78 uint64_t offset,
79 uint64_t bytes,
80 QEMUIOVector *qiov,
81 size_t qiov_offset);
82
83 static int qcow2_probe(const uint8_t *buf, int buf_size, const char *filename)
84 {
85 const QCowHeader *cow_header = (const void *)buf;
86
87 if (buf_size >= sizeof(QCowHeader) &&
88 be32_to_cpu(cow_header->magic) == QCOW_MAGIC &&
89 be32_to_cpu(cow_header->version) >= 2)
90 return 100;
91 else
92 return 0;
93 }
94
95
96 static ssize_t qcow2_crypto_hdr_read_func(QCryptoBlock *block, size_t offset,
97 uint8_t *buf, size_t buflen,
98 void *opaque, Error **errp)
99 {
100 BlockDriverState *bs = opaque;
101 BDRVQcow2State *s = bs->opaque;
102 ssize_t ret;
103
104 if ((offset + buflen) > s->crypto_header.length) {
105 error_setg(errp, "Request for data outside of extension header");
106 return -1;
107 }
108
109 ret = bdrv_pread(bs->file,
110 s->crypto_header.offset + offset, buf, buflen);
111 if (ret < 0) {
112 error_setg_errno(errp, -ret, "Could not read encryption header");
113 return -1;
114 }
115 return ret;
116 }
117
118
119 static ssize_t qcow2_crypto_hdr_init_func(QCryptoBlock *block, size_t headerlen,
120 void *opaque, Error **errp)
121 {
122 BlockDriverState *bs = opaque;
123 BDRVQcow2State *s = bs->opaque;
124 int64_t ret;
125 int64_t clusterlen;
126
127 ret = qcow2_alloc_clusters(bs, headerlen);
128 if (ret < 0) {
129 error_setg_errno(errp, -ret,
130 "Cannot allocate cluster for LUKS header size %zu",
131 headerlen);
132 return -1;
133 }
134
135 s->crypto_header.length = headerlen;
136 s->crypto_header.offset = ret;
137
138 /*
139 * Zero fill all space in cluster so it has predictable
140 * content, as we may not initialize some regions of the
141 * header (eg only 1 out of 8 key slots will be initialized)
142 */
143 clusterlen = size_to_clusters(s, headerlen) * s->cluster_size;
144 assert(qcow2_pre_write_overlap_check(bs, 0, ret, clusterlen, false) == 0);
145 ret = bdrv_pwrite_zeroes(bs->file,
146 ret,
147 clusterlen, 0);
148 if (ret < 0) {
149 error_setg_errno(errp, -ret, "Could not zero fill encryption header");
150 return -1;
151 }
152
153 return ret;
154 }
155
156
157 static ssize_t qcow2_crypto_hdr_write_func(QCryptoBlock *block, size_t offset,
158 const uint8_t *buf, size_t buflen,
159 void *opaque, Error **errp)
160 {
161 BlockDriverState *bs = opaque;
162 BDRVQcow2State *s = bs->opaque;
163 ssize_t ret;
164
165 if ((offset + buflen) > s->crypto_header.length) {
166 error_setg(errp, "Request for data outside of extension header");
167 return -1;
168 }
169
170 ret = bdrv_pwrite(bs->file,
171 s->crypto_header.offset + offset, buf, buflen);
172 if (ret < 0) {
173 error_setg_errno(errp, -ret, "Could not read encryption header");
174 return -1;
175 }
176 return ret;
177 }
178
179 static QDict*
180 qcow2_extract_crypto_opts(QemuOpts *opts, const char *fmt, Error **errp)
181 {
182 QDict *cryptoopts_qdict;
183 QDict *opts_qdict;
184
185 /* Extract "encrypt." options into a qdict */
186 opts_qdict = qemu_opts_to_qdict(opts, NULL);
187 qdict_extract_subqdict(opts_qdict, &cryptoopts_qdict, "encrypt.");
188 qobject_unref(opts_qdict);
189 qdict_put_str(cryptoopts_qdict, "format", fmt);
190 return cryptoopts_qdict;
191 }
192
193 /*
194 * read qcow2 extension and fill bs
195 * start reading from start_offset
196 * finish reading upon magic of value 0 or when end_offset reached
197 * unknown magic is skipped (future extension this version knows nothing about)
198 * return 0 upon success, non-0 otherwise
199 */
200 static int qcow2_read_extensions(BlockDriverState *bs, uint64_t start_offset,
201 uint64_t end_offset, void **p_feature_table,
202 int flags, bool *need_update_header,
203 Error **errp)
204 {
205 BDRVQcow2State *s = bs->opaque;
206 QCowExtension ext;
207 uint64_t offset;
208 int ret;
209 Qcow2BitmapHeaderExt bitmaps_ext;
210
211 if (need_update_header != NULL) {
212 *need_update_header = false;
213 }
214
215 #ifdef DEBUG_EXT
216 printf("qcow2_read_extensions: start=%ld end=%ld\n", start_offset, end_offset);
217 #endif
218 offset = start_offset;
219 while (offset < end_offset) {
220
221 #ifdef DEBUG_EXT
222 /* Sanity check */
223 if (offset > s->cluster_size)
224 printf("qcow2_read_extension: suspicious offset %lu\n", offset);
225
226 printf("attempting to read extended header in offset %lu\n", offset);
227 #endif
228
229 ret = bdrv_pread(bs->file, offset, &ext, sizeof(ext));
230 if (ret < 0) {
231 error_setg_errno(errp, -ret, "qcow2_read_extension: ERROR: "
232 "pread fail from offset %" PRIu64, offset);
233 return 1;
234 }
235 ext.magic = be32_to_cpu(ext.magic);
236 ext.len = be32_to_cpu(ext.len);
237 offset += sizeof(ext);
238 #ifdef DEBUG_EXT
239 printf("ext.magic = 0x%x\n", ext.magic);
240 #endif
241 if (offset > end_offset || ext.len > end_offset - offset) {
242 error_setg(errp, "Header extension too large");
243 return -EINVAL;
244 }
245
246 switch (ext.magic) {
247 case QCOW2_EXT_MAGIC_END:
248 return 0;
249
250 case QCOW2_EXT_MAGIC_BACKING_FORMAT:
251 if (ext.len >= sizeof(bs->backing_format)) {
252 error_setg(errp, "ERROR: ext_backing_format: len=%" PRIu32
253 " too large (>=%zu)", ext.len,
254 sizeof(bs->backing_format));
255 return 2;
256 }
257 ret = bdrv_pread(bs->file, offset, bs->backing_format, ext.len);
258 if (ret < 0) {
259 error_setg_errno(errp, -ret, "ERROR: ext_backing_format: "
260 "Could not read format name");
261 return 3;
262 }
263 bs->backing_format[ext.len] = '\0';
264 s->image_backing_format = g_strdup(bs->backing_format);
265 #ifdef DEBUG_EXT
266 printf("Qcow2: Got format extension %s\n", bs->backing_format);
267 #endif
268 break;
269
270 case QCOW2_EXT_MAGIC_FEATURE_TABLE:
271 if (p_feature_table != NULL) {
272 void *feature_table = g_malloc0(ext.len + 2 * sizeof(Qcow2Feature));
273 ret = bdrv_pread(bs->file, offset , feature_table, ext.len);
274 if (ret < 0) {
275 error_setg_errno(errp, -ret, "ERROR: ext_feature_table: "
276 "Could not read table");
277 return ret;
278 }
279
280 *p_feature_table = feature_table;
281 }
282 break;
283
284 case QCOW2_EXT_MAGIC_CRYPTO_HEADER: {
285 unsigned int cflags = 0;
286 if (s->crypt_method_header != QCOW_CRYPT_LUKS) {
287 error_setg(errp, "CRYPTO header extension only "
288 "expected with LUKS encryption method");
289 return -EINVAL;
290 }
291 if (ext.len != sizeof(Qcow2CryptoHeaderExtension)) {
292 error_setg(errp, "CRYPTO header extension size %u, "
293 "but expected size %zu", ext.len,
294 sizeof(Qcow2CryptoHeaderExtension));
295 return -EINVAL;
296 }
297
298 ret = bdrv_pread(bs->file, offset, &s->crypto_header, ext.len);
299 if (ret < 0) {
300 error_setg_errno(errp, -ret,
301 "Unable to read CRYPTO header extension");
302 return ret;
303 }
304 s->crypto_header.offset = be64_to_cpu(s->crypto_header.offset);
305 s->crypto_header.length = be64_to_cpu(s->crypto_header.length);
306
307 if ((s->crypto_header.offset % s->cluster_size) != 0) {
308 error_setg(errp, "Encryption header offset '%" PRIu64 "' is "
309 "not a multiple of cluster size '%u'",
310 s->crypto_header.offset, s->cluster_size);
311 return -EINVAL;
312 }
313
314 if (flags & BDRV_O_NO_IO) {
315 cflags |= QCRYPTO_BLOCK_OPEN_NO_IO;
316 }
317 s->crypto = qcrypto_block_open(s->crypto_opts, "encrypt.",
318 qcow2_crypto_hdr_read_func,
319 bs, cflags, QCOW2_MAX_THREADS, errp);
320 if (!s->crypto) {
321 return -EINVAL;
322 }
323 } break;
324
325 case QCOW2_EXT_MAGIC_BITMAPS:
326 if (ext.len != sizeof(bitmaps_ext)) {
327 error_setg_errno(errp, -ret, "bitmaps_ext: "
328 "Invalid extension length");
329 return -EINVAL;
330 }
331
332 if (!(s->autoclear_features & QCOW2_AUTOCLEAR_BITMAPS)) {
333 if (s->qcow_version < 3) {
334 /* Let's be a bit more specific */
335 warn_report("This qcow2 v2 image contains bitmaps, but "
336 "they may have been modified by a program "
337 "without persistent bitmap support; so now "
338 "they must all be considered inconsistent");
339 } else {
340 warn_report("a program lacking bitmap support "
341 "modified this file, so all bitmaps are now "
342 "considered inconsistent");
343 }
344 error_printf("Some clusters may be leaked, "
345 "run 'qemu-img check -r' on the image "
346 "file to fix.");
347 if (need_update_header != NULL) {
348 /* Updating is needed to drop invalid bitmap extension. */
349 *need_update_header = true;
350 }
351 break;
352 }
353
354 ret = bdrv_pread(bs->file, offset, &bitmaps_ext, ext.len);
355 if (ret < 0) {
356 error_setg_errno(errp, -ret, "bitmaps_ext: "
357 "Could not read ext header");
358 return ret;
359 }
360
361 if (bitmaps_ext.reserved32 != 0) {
362 error_setg_errno(errp, -ret, "bitmaps_ext: "
363 "Reserved field is not zero");
364 return -EINVAL;
365 }
366
367 bitmaps_ext.nb_bitmaps = be32_to_cpu(bitmaps_ext.nb_bitmaps);
368 bitmaps_ext.bitmap_directory_size =
369 be64_to_cpu(bitmaps_ext.bitmap_directory_size);
370 bitmaps_ext.bitmap_directory_offset =
371 be64_to_cpu(bitmaps_ext.bitmap_directory_offset);
372
373 if (bitmaps_ext.nb_bitmaps > QCOW2_MAX_BITMAPS) {
374 error_setg(errp,
375 "bitmaps_ext: Image has %" PRIu32 " bitmaps, "
376 "exceeding the QEMU supported maximum of %d",
377 bitmaps_ext.nb_bitmaps, QCOW2_MAX_BITMAPS);
378 return -EINVAL;
379 }
380
381 if (bitmaps_ext.nb_bitmaps == 0) {
382 error_setg(errp, "found bitmaps extension with zero bitmaps");
383 return -EINVAL;
384 }
385
386 if (offset_into_cluster(s, bitmaps_ext.bitmap_directory_offset)) {
387 error_setg(errp, "bitmaps_ext: "
388 "invalid bitmap directory offset");
389 return -EINVAL;
390 }
391
392 if (bitmaps_ext.bitmap_directory_size >
393 QCOW2_MAX_BITMAP_DIRECTORY_SIZE) {
394 error_setg(errp, "bitmaps_ext: "
395 "bitmap directory size (%" PRIu64 ") exceeds "
396 "the maximum supported size (%d)",
397 bitmaps_ext.bitmap_directory_size,
398 QCOW2_MAX_BITMAP_DIRECTORY_SIZE);
399 return -EINVAL;
400 }
401
402 s->nb_bitmaps = bitmaps_ext.nb_bitmaps;
403 s->bitmap_directory_offset =
404 bitmaps_ext.bitmap_directory_offset;
405 s->bitmap_directory_size =
406 bitmaps_ext.bitmap_directory_size;
407
408 #ifdef DEBUG_EXT
409 printf("Qcow2: Got bitmaps extension: "
410 "offset=%" PRIu64 " nb_bitmaps=%" PRIu32 "\n",
411 s->bitmap_directory_offset, s->nb_bitmaps);
412 #endif
413 break;
414
415 case QCOW2_EXT_MAGIC_DATA_FILE:
416 {
417 s->image_data_file = g_malloc0(ext.len + 1);
418 ret = bdrv_pread(bs->file, offset, s->image_data_file, ext.len);
419 if (ret < 0) {
420 error_setg_errno(errp, -ret,
421 "ERROR: Could not read data file name");
422 return ret;
423 }
424 #ifdef DEBUG_EXT
425 printf("Qcow2: Got external data file %s\n", s->image_data_file);
426 #endif
427 break;
428 }
429
430 default:
431 /* unknown magic - save it in case we need to rewrite the header */
432 /* If you add a new feature, make sure to also update the fast
433 * path of qcow2_make_empty() to deal with it. */
434 {
435 Qcow2UnknownHeaderExtension *uext;
436
437 uext = g_malloc0(sizeof(*uext) + ext.len);
438 uext->magic = ext.magic;
439 uext->len = ext.len;
440 QLIST_INSERT_HEAD(&s->unknown_header_ext, uext, next);
441
442 ret = bdrv_pread(bs->file, offset , uext->data, uext->len);
443 if (ret < 0) {
444 error_setg_errno(errp, -ret, "ERROR: unknown extension: "
445 "Could not read data");
446 return ret;
447 }
448 }
449 break;
450 }
451
452 offset += ((ext.len + 7) & ~7);
453 }
454
455 return 0;
456 }
457
458 static void cleanup_unknown_header_ext(BlockDriverState *bs)
459 {
460 BDRVQcow2State *s = bs->opaque;
461 Qcow2UnknownHeaderExtension *uext, *next;
462
463 QLIST_FOREACH_SAFE(uext, &s->unknown_header_ext, next, next) {
464 QLIST_REMOVE(uext, next);
465 g_free(uext);
466 }
467 }
468
469 static void report_unsupported_feature(Error **errp, Qcow2Feature *table,
470 uint64_t mask)
471 {
472 g_autoptr(GString) features = g_string_sized_new(60);
473
474 while (table && table->name[0] != '\0') {
475 if (table->type == QCOW2_FEAT_TYPE_INCOMPATIBLE) {
476 if (mask & (1ULL << table->bit)) {
477 if (features->len > 0) {
478 g_string_append(features, ", ");
479 }
480 g_string_append_printf(features, "%.46s", table->name);
481 mask &= ~(1ULL << table->bit);
482 }
483 }
484 table++;
485 }
486
487 if (mask) {
488 if (features->len > 0) {
489 g_string_append(features, ", ");
490 }
491 g_string_append_printf(features,
492 "Unknown incompatible feature: %" PRIx64, mask);
493 }
494
495 error_setg(errp, "Unsupported qcow2 feature(s): %s", features->str);
496 }
497
498 /*
499 * Sets the dirty bit and flushes afterwards if necessary.
500 *
501 * The incompatible_features bit is only set if the image file header was
502 * updated successfully. Therefore it is not required to check the return
503 * value of this function.
504 */
505 int qcow2_mark_dirty(BlockDriverState *bs)
506 {
507 BDRVQcow2State *s = bs->opaque;
508 uint64_t val;
509 int ret;
510
511 assert(s->qcow_version >= 3);
512
513 if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) {
514 return 0; /* already dirty */
515 }
516
517 val = cpu_to_be64(s->incompatible_features | QCOW2_INCOMPAT_DIRTY);
518 ret = bdrv_pwrite(bs->file, offsetof(QCowHeader, incompatible_features),
519 &val, sizeof(val));
520 if (ret < 0) {
521 return ret;
522 }
523 ret = bdrv_flush(bs->file->bs);
524 if (ret < 0) {
525 return ret;
526 }
527
528 /* Only treat image as dirty if the header was updated successfully */
529 s->incompatible_features |= QCOW2_INCOMPAT_DIRTY;
530 return 0;
531 }
532
533 /*
534 * Clears the dirty bit and flushes before if necessary. Only call this
535 * function when there are no pending requests, it does not guard against
536 * concurrent requests dirtying the image.
537 */
538 static int qcow2_mark_clean(BlockDriverState *bs)
539 {
540 BDRVQcow2State *s = bs->opaque;
541
542 if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) {
543 int ret;
544
545 s->incompatible_features &= ~QCOW2_INCOMPAT_DIRTY;
546
547 ret = qcow2_flush_caches(bs);
548 if (ret < 0) {
549 return ret;
550 }
551
552 return qcow2_update_header(bs);
553 }
554 return 0;
555 }
556
557 /*
558 * Marks the image as corrupt.
559 */
560 int qcow2_mark_corrupt(BlockDriverState *bs)
561 {
562 BDRVQcow2State *s = bs->opaque;
563
564 s->incompatible_features |= QCOW2_INCOMPAT_CORRUPT;
565 return qcow2_update_header(bs);
566 }
567
568 /*
569 * Marks the image as consistent, i.e., unsets the corrupt bit, and flushes
570 * before if necessary.
571 */
572 int qcow2_mark_consistent(BlockDriverState *bs)
573 {
574 BDRVQcow2State *s = bs->opaque;
575
576 if (s->incompatible_features & QCOW2_INCOMPAT_CORRUPT) {
577 int ret = qcow2_flush_caches(bs);
578 if (ret < 0) {
579 return ret;
580 }
581
582 s->incompatible_features &= ~QCOW2_INCOMPAT_CORRUPT;
583 return qcow2_update_header(bs);
584 }
585 return 0;
586 }
587
588 static void qcow2_add_check_result(BdrvCheckResult *out,
589 const BdrvCheckResult *src,
590 bool set_allocation_info)
591 {
592 out->corruptions += src->corruptions;
593 out->leaks += src->leaks;
594 out->check_errors += src->check_errors;
595 out->corruptions_fixed += src->corruptions_fixed;
596 out->leaks_fixed += src->leaks_fixed;
597
598 if (set_allocation_info) {
599 out->image_end_offset = src->image_end_offset;
600 out->bfi = src->bfi;
601 }
602 }
603
604 static int coroutine_fn qcow2_co_check_locked(BlockDriverState *bs,
605 BdrvCheckResult *result,
606 BdrvCheckMode fix)
607 {
608 BdrvCheckResult snapshot_res = {};
609 BdrvCheckResult refcount_res = {};
610 int ret;
611
612 memset(result, 0, sizeof(*result));
613
614 ret = qcow2_check_read_snapshot_table(bs, &snapshot_res, fix);
615 if (ret < 0) {
616 qcow2_add_check_result(result, &snapshot_res, false);
617 return ret;
618 }
619
620 ret = qcow2_check_refcounts(bs, &refcount_res, fix);
621 qcow2_add_check_result(result, &refcount_res, true);
622 if (ret < 0) {
623 qcow2_add_check_result(result, &snapshot_res, false);
624 return ret;
625 }
626
627 ret = qcow2_check_fix_snapshot_table(bs, &snapshot_res, fix);
628 qcow2_add_check_result(result, &snapshot_res, false);
629 if (ret < 0) {
630 return ret;
631 }
632
633 if (fix && result->check_errors == 0 && result->corruptions == 0) {
634 ret = qcow2_mark_clean(bs);
635 if (ret < 0) {
636 return ret;
637 }
638 return qcow2_mark_consistent(bs);
639 }
640 return ret;
641 }
642
643 static int coroutine_fn qcow2_co_check(BlockDriverState *bs,
644 BdrvCheckResult *result,
645 BdrvCheckMode fix)
646 {
647 BDRVQcow2State *s = bs->opaque;
648 int ret;
649
650 qemu_co_mutex_lock(&s->lock);
651 ret = qcow2_co_check_locked(bs, result, fix);
652 qemu_co_mutex_unlock(&s->lock);
653 return ret;
654 }
655
656 int qcow2_validate_table(BlockDriverState *bs, uint64_t offset,
657 uint64_t entries, size_t entry_len,
658 int64_t max_size_bytes, const char *table_name,
659 Error **errp)
660 {
661 BDRVQcow2State *s = bs->opaque;
662
663 if (entries > max_size_bytes / entry_len) {
664 error_setg(errp, "%s too large", table_name);
665 return -EFBIG;
666 }
667
668 /* Use signed INT64_MAX as the maximum even for uint64_t header fields,
669 * because values will be passed to qemu functions taking int64_t. */
670 if ((INT64_MAX - entries * entry_len < offset) ||
671 (offset_into_cluster(s, offset) != 0)) {
672 error_setg(errp, "%s offset invalid", table_name);
673 return -EINVAL;
674 }
675
676 return 0;
677 }
678
679 static const char *const mutable_opts[] = {
680 QCOW2_OPT_LAZY_REFCOUNTS,
681 QCOW2_OPT_DISCARD_REQUEST,
682 QCOW2_OPT_DISCARD_SNAPSHOT,
683 QCOW2_OPT_DISCARD_OTHER,
684 QCOW2_OPT_OVERLAP,
685 QCOW2_OPT_OVERLAP_TEMPLATE,
686 QCOW2_OPT_OVERLAP_MAIN_HEADER,
687 QCOW2_OPT_OVERLAP_ACTIVE_L1,
688 QCOW2_OPT_OVERLAP_ACTIVE_L2,
689 QCOW2_OPT_OVERLAP_REFCOUNT_TABLE,
690 QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK,
691 QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE,
692 QCOW2_OPT_OVERLAP_INACTIVE_L1,
693 QCOW2_OPT_OVERLAP_INACTIVE_L2,
694 QCOW2_OPT_OVERLAP_BITMAP_DIRECTORY,
695 QCOW2_OPT_CACHE_SIZE,
696 QCOW2_OPT_L2_CACHE_SIZE,
697 QCOW2_OPT_L2_CACHE_ENTRY_SIZE,
698 QCOW2_OPT_REFCOUNT_CACHE_SIZE,
699 QCOW2_OPT_CACHE_CLEAN_INTERVAL,
700 NULL
701 };
702
703 static QemuOptsList qcow2_runtime_opts = {
704 .name = "qcow2",
705 .head = QTAILQ_HEAD_INITIALIZER(qcow2_runtime_opts.head),
706 .desc = {
707 {
708 .name = QCOW2_OPT_LAZY_REFCOUNTS,
709 .type = QEMU_OPT_BOOL,
710 .help = "Postpone refcount updates",
711 },
712 {
713 .name = QCOW2_OPT_DISCARD_REQUEST,
714 .type = QEMU_OPT_BOOL,
715 .help = "Pass guest discard requests to the layer below",
716 },
717 {
718 .name = QCOW2_OPT_DISCARD_SNAPSHOT,
719 .type = QEMU_OPT_BOOL,
720 .help = "Generate discard requests when snapshot related space "
721 "is freed",
722 },
723 {
724 .name = QCOW2_OPT_DISCARD_OTHER,
725 .type = QEMU_OPT_BOOL,
726 .help = "Generate discard requests when other clusters are freed",
727 },
728 {
729 .name = QCOW2_OPT_OVERLAP,
730 .type = QEMU_OPT_STRING,
731 .help = "Selects which overlap checks to perform from a range of "
732 "templates (none, constant, cached, all)",
733 },
734 {
735 .name = QCOW2_OPT_OVERLAP_TEMPLATE,
736 .type = QEMU_OPT_STRING,
737 .help = "Selects which overlap checks to perform from a range of "
738 "templates (none, constant, cached, all)",
739 },
740 {
741 .name = QCOW2_OPT_OVERLAP_MAIN_HEADER,
742 .type = QEMU_OPT_BOOL,
743 .help = "Check for unintended writes into the main qcow2 header",
744 },
745 {
746 .name = QCOW2_OPT_OVERLAP_ACTIVE_L1,
747 .type = QEMU_OPT_BOOL,
748 .help = "Check for unintended writes into the active L1 table",
749 },
750 {
751 .name = QCOW2_OPT_OVERLAP_ACTIVE_L2,
752 .type = QEMU_OPT_BOOL,
753 .help = "Check for unintended writes into an active L2 table",
754 },
755 {
756 .name = QCOW2_OPT_OVERLAP_REFCOUNT_TABLE,
757 .type = QEMU_OPT_BOOL,
758 .help = "Check for unintended writes into the refcount table",
759 },
760 {
761 .name = QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK,
762 .type = QEMU_OPT_BOOL,
763 .help = "Check for unintended writes into a refcount block",
764 },
765 {
766 .name = QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE,
767 .type = QEMU_OPT_BOOL,
768 .help = "Check for unintended writes into the snapshot table",
769 },
770 {
771 .name = QCOW2_OPT_OVERLAP_INACTIVE_L1,
772 .type = QEMU_OPT_BOOL,
773 .help = "Check for unintended writes into an inactive L1 table",
774 },
775 {
776 .name = QCOW2_OPT_OVERLAP_INACTIVE_L2,
777 .type = QEMU_OPT_BOOL,
778 .help = "Check for unintended writes into an inactive L2 table",
779 },
780 {
781 .name = QCOW2_OPT_OVERLAP_BITMAP_DIRECTORY,
782 .type = QEMU_OPT_BOOL,
783 .help = "Check for unintended writes into the bitmap directory",
784 },
785 {
786 .name = QCOW2_OPT_CACHE_SIZE,
787 .type = QEMU_OPT_SIZE,
788 .help = "Maximum combined metadata (L2 tables and refcount blocks) "
789 "cache size",
790 },
791 {
792 .name = QCOW2_OPT_L2_CACHE_SIZE,
793 .type = QEMU_OPT_SIZE,
794 .help = "Maximum L2 table cache size",
795 },
796 {
797 .name = QCOW2_OPT_L2_CACHE_ENTRY_SIZE,
798 .type = QEMU_OPT_SIZE,
799 .help = "Size of each entry in the L2 cache",
800 },
801 {
802 .name = QCOW2_OPT_REFCOUNT_CACHE_SIZE,
803 .type = QEMU_OPT_SIZE,
804 .help = "Maximum refcount block cache size",
805 },
806 {
807 .name = QCOW2_OPT_CACHE_CLEAN_INTERVAL,
808 .type = QEMU_OPT_NUMBER,
809 .help = "Clean unused cache entries after this time (in seconds)",
810 },
811 BLOCK_CRYPTO_OPT_DEF_KEY_SECRET("encrypt.",
812 "ID of secret providing qcow2 AES key or LUKS passphrase"),
813 { /* end of list */ }
814 },
815 };
816
817 static const char *overlap_bool_option_names[QCOW2_OL_MAX_BITNR] = {
818 [QCOW2_OL_MAIN_HEADER_BITNR] = QCOW2_OPT_OVERLAP_MAIN_HEADER,
819 [QCOW2_OL_ACTIVE_L1_BITNR] = QCOW2_OPT_OVERLAP_ACTIVE_L1,
820 [QCOW2_OL_ACTIVE_L2_BITNR] = QCOW2_OPT_OVERLAP_ACTIVE_L2,
821 [QCOW2_OL_REFCOUNT_TABLE_BITNR] = QCOW2_OPT_OVERLAP_REFCOUNT_TABLE,
822 [QCOW2_OL_REFCOUNT_BLOCK_BITNR] = QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK,
823 [QCOW2_OL_SNAPSHOT_TABLE_BITNR] = QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE,
824 [QCOW2_OL_INACTIVE_L1_BITNR] = QCOW2_OPT_OVERLAP_INACTIVE_L1,
825 [QCOW2_OL_INACTIVE_L2_BITNR] = QCOW2_OPT_OVERLAP_INACTIVE_L2,
826 [QCOW2_OL_BITMAP_DIRECTORY_BITNR] = QCOW2_OPT_OVERLAP_BITMAP_DIRECTORY,
827 };
828
829 static void cache_clean_timer_cb(void *opaque)
830 {
831 BlockDriverState *bs = opaque;
832 BDRVQcow2State *s = bs->opaque;
833 qcow2_cache_clean_unused(s->l2_table_cache);
834 qcow2_cache_clean_unused(s->refcount_block_cache);
835 timer_mod(s->cache_clean_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) +
836 (int64_t) s->cache_clean_interval * 1000);
837 }
838
839 static void cache_clean_timer_init(BlockDriverState *bs, AioContext *context)
840 {
841 BDRVQcow2State *s = bs->opaque;
842 if (s->cache_clean_interval > 0) {
843 s->cache_clean_timer = aio_timer_new(context, QEMU_CLOCK_VIRTUAL,
844 SCALE_MS, cache_clean_timer_cb,
845 bs);
846 timer_mod(s->cache_clean_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) +
847 (int64_t) s->cache_clean_interval * 1000);
848 }
849 }
850
851 static void cache_clean_timer_del(BlockDriverState *bs)
852 {
853 BDRVQcow2State *s = bs->opaque;
854 if (s->cache_clean_timer) {
855 timer_del(s->cache_clean_timer);
856 timer_free(s->cache_clean_timer);
857 s->cache_clean_timer = NULL;
858 }
859 }
860
861 static void qcow2_detach_aio_context(BlockDriverState *bs)
862 {
863 cache_clean_timer_del(bs);
864 }
865
866 static void qcow2_attach_aio_context(BlockDriverState *bs,
867 AioContext *new_context)
868 {
869 cache_clean_timer_init(bs, new_context);
870 }
871
872 static void read_cache_sizes(BlockDriverState *bs, QemuOpts *opts,
873 uint64_t *l2_cache_size,
874 uint64_t *l2_cache_entry_size,
875 uint64_t *refcount_cache_size, Error **errp)
876 {
877 BDRVQcow2State *s = bs->opaque;
878 uint64_t combined_cache_size, l2_cache_max_setting;
879 bool l2_cache_size_set, refcount_cache_size_set, combined_cache_size_set;
880 bool l2_cache_entry_size_set;
881 int min_refcount_cache = MIN_REFCOUNT_CACHE_SIZE * s->cluster_size;
882 uint64_t virtual_disk_size = bs->total_sectors * BDRV_SECTOR_SIZE;
883 uint64_t max_l2_entries = DIV_ROUND_UP(virtual_disk_size, s->cluster_size);
884 /* An L2 table is always one cluster in size so the max cache size
885 * should be a multiple of the cluster size. */
886 uint64_t max_l2_cache = ROUND_UP(max_l2_entries * l2_entry_size(s),
887 s->cluster_size);
888
889 combined_cache_size_set = qemu_opt_get(opts, QCOW2_OPT_CACHE_SIZE);
890 l2_cache_size_set = qemu_opt_get(opts, QCOW2_OPT_L2_CACHE_SIZE);
891 refcount_cache_size_set = qemu_opt_get(opts, QCOW2_OPT_REFCOUNT_CACHE_SIZE);
892 l2_cache_entry_size_set = qemu_opt_get(opts, QCOW2_OPT_L2_CACHE_ENTRY_SIZE);
893
894 combined_cache_size = qemu_opt_get_size(opts, QCOW2_OPT_CACHE_SIZE, 0);
895 l2_cache_max_setting = qemu_opt_get_size(opts, QCOW2_OPT_L2_CACHE_SIZE,
896 DEFAULT_L2_CACHE_MAX_SIZE);
897 *refcount_cache_size = qemu_opt_get_size(opts,
898 QCOW2_OPT_REFCOUNT_CACHE_SIZE, 0);
899
900 *l2_cache_entry_size = qemu_opt_get_size(
901 opts, QCOW2_OPT_L2_CACHE_ENTRY_SIZE, s->cluster_size);
902
903 *l2_cache_size = MIN(max_l2_cache, l2_cache_max_setting);
904
905 if (combined_cache_size_set) {
906 if (l2_cache_size_set && refcount_cache_size_set) {
907 error_setg(errp, QCOW2_OPT_CACHE_SIZE ", " QCOW2_OPT_L2_CACHE_SIZE
908 " and " QCOW2_OPT_REFCOUNT_CACHE_SIZE " may not be set "
909 "at the same time");
910 return;
911 } else if (l2_cache_size_set &&
912 (l2_cache_max_setting > combined_cache_size)) {
913 error_setg(errp, QCOW2_OPT_L2_CACHE_SIZE " may not exceed "
914 QCOW2_OPT_CACHE_SIZE);
915 return;
916 } else if (*refcount_cache_size > combined_cache_size) {
917 error_setg(errp, QCOW2_OPT_REFCOUNT_CACHE_SIZE " may not exceed "
918 QCOW2_OPT_CACHE_SIZE);
919 return;
920 }
921
922 if (l2_cache_size_set) {
923 *refcount_cache_size = combined_cache_size - *l2_cache_size;
924 } else if (refcount_cache_size_set) {
925 *l2_cache_size = combined_cache_size - *refcount_cache_size;
926 } else {
927 /* Assign as much memory as possible to the L2 cache, and
928 * use the remainder for the refcount cache */
929 if (combined_cache_size >= max_l2_cache + min_refcount_cache) {
930 *l2_cache_size = max_l2_cache;
931 *refcount_cache_size = combined_cache_size - *l2_cache_size;
932 } else {
933 *refcount_cache_size =
934 MIN(combined_cache_size, min_refcount_cache);
935 *l2_cache_size = combined_cache_size - *refcount_cache_size;
936 }
937 }
938 }
939
940 /*
941 * If the L2 cache is not enough to cover the whole disk then
942 * default to 4KB entries. Smaller entries reduce the cost of
943 * loads and evictions and increase I/O performance.
944 */
945 if (*l2_cache_size < max_l2_cache && !l2_cache_entry_size_set) {
946 *l2_cache_entry_size = MIN(s->cluster_size, 4096);
947 }
948
949 /* l2_cache_size and refcount_cache_size are ensured to have at least
950 * their minimum values in qcow2_update_options_prepare() */
951
952 if (*l2_cache_entry_size < (1 << MIN_CLUSTER_BITS) ||
953 *l2_cache_entry_size > s->cluster_size ||
954 !is_power_of_2(*l2_cache_entry_size)) {
955 error_setg(errp, "L2 cache entry size must be a power of two "
956 "between %d and the cluster size (%d)",
957 1 << MIN_CLUSTER_BITS, s->cluster_size);
958 return;
959 }
960 }
961
962 typedef struct Qcow2ReopenState {
963 Qcow2Cache *l2_table_cache;
964 Qcow2Cache *refcount_block_cache;
965 int l2_slice_size; /* Number of entries in a slice of the L2 table */
966 bool use_lazy_refcounts;
967 int overlap_check;
968 bool discard_passthrough[QCOW2_DISCARD_MAX];
969 uint64_t cache_clean_interval;
970 QCryptoBlockOpenOptions *crypto_opts; /* Disk encryption runtime options */
971 } Qcow2ReopenState;
972
973 static int qcow2_update_options_prepare(BlockDriverState *bs,
974 Qcow2ReopenState *r,
975 QDict *options, int flags,
976 Error **errp)
977 {
978 BDRVQcow2State *s = bs->opaque;
979 QemuOpts *opts = NULL;
980 const char *opt_overlap_check, *opt_overlap_check_template;
981 int overlap_check_template = 0;
982 uint64_t l2_cache_size, l2_cache_entry_size, refcount_cache_size;
983 int i;
984 const char *encryptfmt;
985 QDict *encryptopts = NULL;
986 Error *local_err = NULL;
987 int ret;
988
989 qdict_extract_subqdict(options, &encryptopts, "encrypt.");
990 encryptfmt = qdict_get_try_str(encryptopts, "format");
991
992 opts = qemu_opts_create(&qcow2_runtime_opts, NULL, 0, &error_abort);
993 if (!qemu_opts_absorb_qdict(opts, options, errp)) {
994 ret = -EINVAL;
995 goto fail;
996 }
997
998 /* get L2 table/refcount block cache size from command line options */
999 read_cache_sizes(bs, opts, &l2_cache_size, &l2_cache_entry_size,
1000 &refcount_cache_size, &local_err);
1001 if (local_err) {
1002 error_propagate(errp, local_err);
1003 ret = -EINVAL;
1004 goto fail;
1005 }
1006
1007 l2_cache_size /= l2_cache_entry_size;
1008 if (l2_cache_size < MIN_L2_CACHE_SIZE) {
1009 l2_cache_size = MIN_L2_CACHE_SIZE;
1010 }
1011 if (l2_cache_size > INT_MAX) {
1012 error_setg(errp, "L2 cache size too big");
1013 ret = -EINVAL;
1014 goto fail;
1015 }
1016
1017 refcount_cache_size /= s->cluster_size;
1018 if (refcount_cache_size < MIN_REFCOUNT_CACHE_SIZE) {
1019 refcount_cache_size = MIN_REFCOUNT_CACHE_SIZE;
1020 }
1021 if (refcount_cache_size > INT_MAX) {
1022 error_setg(errp, "Refcount cache size too big");
1023 ret = -EINVAL;
1024 goto fail;
1025 }
1026
1027 /* alloc new L2 table/refcount block cache, flush old one */
1028 if (s->l2_table_cache) {
1029 ret = qcow2_cache_flush(bs, s->l2_table_cache);
1030 if (ret) {
1031 error_setg_errno(errp, -ret, "Failed to flush the L2 table cache");
1032 goto fail;
1033 }
1034 }
1035
1036 if (s->refcount_block_cache) {
1037 ret = qcow2_cache_flush(bs, s->refcount_block_cache);
1038 if (ret) {
1039 error_setg_errno(errp, -ret,
1040 "Failed to flush the refcount block cache");
1041 goto fail;
1042 }
1043 }
1044
1045 r->l2_slice_size = l2_cache_entry_size / l2_entry_size(s);
1046 r->l2_table_cache = qcow2_cache_create(bs, l2_cache_size,
1047 l2_cache_entry_size);
1048 r->refcount_block_cache = qcow2_cache_create(bs, refcount_cache_size,
1049 s->cluster_size);
1050 if (r->l2_table_cache == NULL || r->refcount_block_cache == NULL) {
1051 error_setg(errp, "Could not allocate metadata caches");
1052 ret = -ENOMEM;
1053 goto fail;
1054 }
1055
1056 /* New interval for cache cleanup timer */
1057 r->cache_clean_interval =
1058 qemu_opt_get_number(opts, QCOW2_OPT_CACHE_CLEAN_INTERVAL,
1059 DEFAULT_CACHE_CLEAN_INTERVAL);
1060 #ifndef CONFIG_LINUX
1061 if (r->cache_clean_interval != 0) {
1062 error_setg(errp, QCOW2_OPT_CACHE_CLEAN_INTERVAL
1063 " not supported on this host");
1064 ret = -EINVAL;
1065 goto fail;
1066 }
1067 #endif
1068 if (r->cache_clean_interval > UINT_MAX) {
1069 error_setg(errp, "Cache clean interval too big");
1070 ret = -EINVAL;
1071 goto fail;
1072 }
1073
1074 /* lazy-refcounts; flush if going from enabled to disabled */
1075 r->use_lazy_refcounts = qemu_opt_get_bool(opts, QCOW2_OPT_LAZY_REFCOUNTS,
1076 (s->compatible_features & QCOW2_COMPAT_LAZY_REFCOUNTS));
1077 if (r->use_lazy_refcounts && s->qcow_version < 3) {
1078 error_setg(errp, "Lazy refcounts require a qcow2 image with at least "
1079 "qemu 1.1 compatibility level");
1080 ret = -EINVAL;
1081 goto fail;
1082 }
1083
1084 if (s->use_lazy_refcounts && !r->use_lazy_refcounts) {
1085 ret = qcow2_mark_clean(bs);
1086 if (ret < 0) {
1087 error_setg_errno(errp, -ret, "Failed to disable lazy refcounts");
1088 goto fail;
1089 }
1090 }
1091
1092 /* Overlap check options */
1093 opt_overlap_check = qemu_opt_get(opts, QCOW2_OPT_OVERLAP);
1094 opt_overlap_check_template = qemu_opt_get(opts, QCOW2_OPT_OVERLAP_TEMPLATE);
1095 if (opt_overlap_check_template && opt_overlap_check &&
1096 strcmp(opt_overlap_check_template, opt_overlap_check))
1097 {
1098 error_setg(errp, "Conflicting values for qcow2 options '"
1099 QCOW2_OPT_OVERLAP "' ('%s') and '" QCOW2_OPT_OVERLAP_TEMPLATE
1100 "' ('%s')", opt_overlap_check, opt_overlap_check_template);
1101 ret = -EINVAL;
1102 goto fail;
1103 }
1104 if (!opt_overlap_check) {
1105 opt_overlap_check = opt_overlap_check_template ?: "cached";
1106 }
1107
1108 if (!strcmp(opt_overlap_check, "none")) {
1109 overlap_check_template = 0;
1110 } else if (!strcmp(opt_overlap_check, "constant")) {
1111 overlap_check_template = QCOW2_OL_CONSTANT;
1112 } else if (!strcmp(opt_overlap_check, "cached")) {
1113 overlap_check_template = QCOW2_OL_CACHED;
1114 } else if (!strcmp(opt_overlap_check, "all")) {
1115 overlap_check_template = QCOW2_OL_ALL;
1116 } else {
1117 error_setg(errp, "Unsupported value '%s' for qcow2 option "
1118 "'overlap-check'. Allowed are any of the following: "
1119 "none, constant, cached, all", opt_overlap_check);
1120 ret = -EINVAL;
1121 goto fail;
1122 }
1123
1124 r->overlap_check = 0;
1125 for (i = 0; i < QCOW2_OL_MAX_BITNR; i++) {
1126 /* overlap-check defines a template bitmask, but every flag may be
1127 * overwritten through the associated boolean option */
1128 r->overlap_check |=
1129 qemu_opt_get_bool(opts, overlap_bool_option_names[i],
1130 overlap_check_template & (1 << i)) << i;
1131 }
1132
1133 r->discard_passthrough[QCOW2_DISCARD_NEVER] = false;
1134 r->discard_passthrough[QCOW2_DISCARD_ALWAYS] = true;
1135 r->discard_passthrough[QCOW2_DISCARD_REQUEST] =
1136 qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_REQUEST,
1137 flags & BDRV_O_UNMAP);
1138 r->discard_passthrough[QCOW2_DISCARD_SNAPSHOT] =
1139 qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_SNAPSHOT, true);
1140 r->discard_passthrough[QCOW2_DISCARD_OTHER] =
1141 qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_OTHER, false);
1142
1143 switch (s->crypt_method_header) {
1144 case QCOW_CRYPT_NONE:
1145 if (encryptfmt) {
1146 error_setg(errp, "No encryption in image header, but options "
1147 "specified format '%s'", encryptfmt);
1148 ret = -EINVAL;
1149 goto fail;
1150 }
1151 break;
1152
1153 case QCOW_CRYPT_AES:
1154 if (encryptfmt && !g_str_equal(encryptfmt, "aes")) {
1155 error_setg(errp,
1156 "Header reported 'aes' encryption format but "
1157 "options specify '%s'", encryptfmt);
1158 ret = -EINVAL;
1159 goto fail;
1160 }
1161 qdict_put_str(encryptopts, "format", "qcow");
1162 r->crypto_opts = block_crypto_open_opts_init(encryptopts, errp);
1163 break;
1164
1165 case QCOW_CRYPT_LUKS:
1166 if (encryptfmt && !g_str_equal(encryptfmt, "luks")) {
1167 error_setg(errp,
1168 "Header reported 'luks' encryption format but "
1169 "options specify '%s'", encryptfmt);
1170 ret = -EINVAL;
1171 goto fail;
1172 }
1173 qdict_put_str(encryptopts, "format", "luks");
1174 r->crypto_opts = block_crypto_open_opts_init(encryptopts, errp);
1175 break;
1176
1177 default:
1178 error_setg(errp, "Unsupported encryption method %d",
1179 s->crypt_method_header);
1180 break;
1181 }
1182 if (s->crypt_method_header != QCOW_CRYPT_NONE && !r->crypto_opts) {
1183 ret = -EINVAL;
1184 goto fail;
1185 }
1186
1187 ret = 0;
1188 fail:
1189 qobject_unref(encryptopts);
1190 qemu_opts_del(opts);
1191 opts = NULL;
1192 return ret;
1193 }
1194
1195 static void qcow2_update_options_commit(BlockDriverState *bs,
1196 Qcow2ReopenState *r)
1197 {
1198 BDRVQcow2State *s = bs->opaque;
1199 int i;
1200
1201 if (s->l2_table_cache) {
1202 qcow2_cache_destroy(s->l2_table_cache);
1203 }
1204 if (s->refcount_block_cache) {
1205 qcow2_cache_destroy(s->refcount_block_cache);
1206 }
1207 s->l2_table_cache = r->l2_table_cache;
1208 s->refcount_block_cache = r->refcount_block_cache;
1209 s->l2_slice_size = r->l2_slice_size;
1210
1211 s->overlap_check = r->overlap_check;
1212 s->use_lazy_refcounts = r->use_lazy_refcounts;
1213
1214 for (i = 0; i < QCOW2_DISCARD_MAX; i++) {
1215 s->discard_passthrough[i] = r->discard_passthrough[i];
1216 }
1217
1218 if (s->cache_clean_interval != r->cache_clean_interval) {
1219 cache_clean_timer_del(bs);
1220 s->cache_clean_interval = r->cache_clean_interval;
1221 cache_clean_timer_init(bs, bdrv_get_aio_context(bs));
1222 }
1223
1224 qapi_free_QCryptoBlockOpenOptions(s->crypto_opts);
1225 s->crypto_opts = r->crypto_opts;
1226 }
1227
1228 static void qcow2_update_options_abort(BlockDriverState *bs,
1229 Qcow2ReopenState *r)
1230 {
1231 if (r->l2_table_cache) {
1232 qcow2_cache_destroy(r->l2_table_cache);
1233 }
1234 if (r->refcount_block_cache) {
1235 qcow2_cache_destroy(r->refcount_block_cache);
1236 }
1237 qapi_free_QCryptoBlockOpenOptions(r->crypto_opts);
1238 }
1239
1240 static int qcow2_update_options(BlockDriverState *bs, QDict *options,
1241 int flags, Error **errp)
1242 {
1243 Qcow2ReopenState r = {};
1244 int ret;
1245
1246 ret = qcow2_update_options_prepare(bs, &r, options, flags, errp);
1247 if (ret >= 0) {
1248 qcow2_update_options_commit(bs, &r);
1249 } else {
1250 qcow2_update_options_abort(bs, &r);
1251 }
1252
1253 return ret;
1254 }
1255
1256 static int validate_compression_type(BDRVQcow2State *s, Error **errp)
1257 {
1258 switch (s->compression_type) {
1259 case QCOW2_COMPRESSION_TYPE_ZLIB:
1260 #ifdef CONFIG_ZSTD
1261 case QCOW2_COMPRESSION_TYPE_ZSTD:
1262 #endif
1263 break;
1264
1265 default:
1266 error_setg(errp, "qcow2: unknown compression type: %u",
1267 s->compression_type);
1268 return -ENOTSUP;
1269 }
1270
1271 /*
1272 * if the compression type differs from QCOW2_COMPRESSION_TYPE_ZLIB
1273 * the incompatible feature flag must be set
1274 */
1275 if (s->compression_type == QCOW2_COMPRESSION_TYPE_ZLIB) {
1276 if (s->incompatible_features & QCOW2_INCOMPAT_COMPRESSION) {
1277 error_setg(errp, "qcow2: Compression type incompatible feature "
1278 "bit must not be set");
1279 return -EINVAL;
1280 }
1281 } else {
1282 if (!(s->incompatible_features & QCOW2_INCOMPAT_COMPRESSION)) {
1283 error_setg(errp, "qcow2: Compression type incompatible feature "
1284 "bit must be set");
1285 return -EINVAL;
1286 }
1287 }
1288
1289 return 0;
1290 }
1291
1292 /* Called with s->lock held. */
1293 static int coroutine_fn qcow2_do_open(BlockDriverState *bs, QDict *options,
1294 int flags, Error **errp)
1295 {
1296 BDRVQcow2State *s = bs->opaque;
1297 unsigned int len, i;
1298 int ret = 0;
1299 QCowHeader header;
1300 Error *local_err = NULL;
1301 uint64_t ext_end;
1302 uint64_t l1_vm_state_index;
1303 bool update_header = false;
1304
1305 ret = bdrv_pread(bs->file, 0, &header, sizeof(header));
1306 if (ret < 0) {
1307 error_setg_errno(errp, -ret, "Could not read qcow2 header");
1308 goto fail;
1309 }
1310 header.magic = be32_to_cpu(header.magic);
1311 header.version = be32_to_cpu(header.version);
1312 header.backing_file_offset = be64_to_cpu(header.backing_file_offset);
1313 header.backing_file_size = be32_to_cpu(header.backing_file_size);
1314 header.size = be64_to_cpu(header.size);
1315 header.cluster_bits = be32_to_cpu(header.cluster_bits);
1316 header.crypt_method = be32_to_cpu(header.crypt_method);
1317 header.l1_table_offset = be64_to_cpu(header.l1_table_offset);
1318 header.l1_size = be32_to_cpu(header.l1_size);
1319 header.refcount_table_offset = be64_to_cpu(header.refcount_table_offset);
1320 header.refcount_table_clusters =
1321 be32_to_cpu(header.refcount_table_clusters);
1322 header.snapshots_offset = be64_to_cpu(header.snapshots_offset);
1323 header.nb_snapshots = be32_to_cpu(header.nb_snapshots);
1324
1325 if (header.magic != QCOW_MAGIC) {
1326 error_setg(errp, "Image is not in qcow2 format");
1327 ret = -EINVAL;
1328 goto fail;
1329 }
1330 if (header.version < 2 || header.version > 3) {
1331 error_setg(errp, "Unsupported qcow2 version %" PRIu32, header.version);
1332 ret = -ENOTSUP;
1333 goto fail;
1334 }
1335
1336 s->qcow_version = header.version;
1337
1338 /* Initialise cluster size */
1339 if (header.cluster_bits < MIN_CLUSTER_BITS ||
1340 header.cluster_bits > MAX_CLUSTER_BITS) {
1341 error_setg(errp, "Unsupported cluster size: 2^%" PRIu32,
1342 header.cluster_bits);
1343 ret = -EINVAL;
1344 goto fail;
1345 }
1346
1347 s->cluster_bits = header.cluster_bits;
1348 s->cluster_size = 1 << s->cluster_bits;
1349
1350 /* Initialise version 3 header fields */
1351 if (header.version == 2) {
1352 header.incompatible_features = 0;
1353 header.compatible_features = 0;
1354 header.autoclear_features = 0;
1355 header.refcount_order = 4;
1356 header.header_length = 72;
1357 } else {
1358 header.incompatible_features =
1359 be64_to_cpu(header.incompatible_features);
1360 header.compatible_features = be64_to_cpu(header.compatible_features);
1361 header.autoclear_features = be64_to_cpu(header.autoclear_features);
1362 header.refcount_order = be32_to_cpu(header.refcount_order);
1363 header.header_length = be32_to_cpu(header.header_length);
1364
1365 if (header.header_length < 104) {
1366 error_setg(errp, "qcow2 header too short");
1367 ret = -EINVAL;
1368 goto fail;
1369 }
1370 }
1371
1372 if (header.header_length > s->cluster_size) {
1373 error_setg(errp, "qcow2 header exceeds cluster size");
1374 ret = -EINVAL;
1375 goto fail;
1376 }
1377
1378 if (header.header_length > sizeof(header)) {
1379 s->unknown_header_fields_size = header.header_length - sizeof(header);
1380 s->unknown_header_fields = g_malloc(s->unknown_header_fields_size);
1381 ret = bdrv_pread(bs->file, sizeof(header), s->unknown_header_fields,
1382 s->unknown_header_fields_size);
1383 if (ret < 0) {
1384 error_setg_errno(errp, -ret, "Could not read unknown qcow2 header "
1385 "fields");
1386 goto fail;
1387 }
1388 }
1389
1390 if (header.backing_file_offset > s->cluster_size) {
1391 error_setg(errp, "Invalid backing file offset");
1392 ret = -EINVAL;
1393 goto fail;
1394 }
1395
1396 if (header.backing_file_offset) {
1397 ext_end = header.backing_file_offset;
1398 } else {
1399 ext_end = 1 << header.cluster_bits;
1400 }
1401
1402 /* Handle feature bits */
1403 s->incompatible_features = header.incompatible_features;
1404 s->compatible_features = header.compatible_features;
1405 s->autoclear_features = header.autoclear_features;
1406
1407 /*
1408 * Handle compression type
1409 * Older qcow2 images don't contain the compression type header.
1410 * Distinguish them by the header length and use
1411 * the only valid (default) compression type in that case
1412 */
1413 if (header.header_length > offsetof(QCowHeader, compression_type)) {
1414 s->compression_type = header.compression_type;
1415 } else {
1416 s->compression_type = QCOW2_COMPRESSION_TYPE_ZLIB;
1417 }
1418
1419 ret = validate_compression_type(s, errp);
1420 if (ret) {
1421 goto fail;
1422 }
1423
1424 if (s->incompatible_features & ~QCOW2_INCOMPAT_MASK) {
1425 void *feature_table = NULL;
1426 qcow2_read_extensions(bs, header.header_length, ext_end,
1427 &feature_table, flags, NULL, NULL);
1428 report_unsupported_feature(errp, feature_table,
1429 s->incompatible_features &
1430 ~QCOW2_INCOMPAT_MASK);
1431 ret = -ENOTSUP;
1432 g_free(feature_table);
1433 goto fail;
1434 }
1435
1436 if (s->incompatible_features & QCOW2_INCOMPAT_CORRUPT) {
1437 /* Corrupt images may not be written to unless they are being repaired
1438 */
1439 if ((flags & BDRV_O_RDWR) && !(flags & BDRV_O_CHECK)) {
1440 error_setg(errp, "qcow2: Image is corrupt; cannot be opened "
1441 "read/write");
1442 ret = -EACCES;
1443 goto fail;
1444 }
1445 }
1446
1447 s->subclusters_per_cluster =
1448 has_subclusters(s) ? QCOW_EXTL2_SUBCLUSTERS_PER_CLUSTER : 1;
1449 s->subcluster_size = s->cluster_size / s->subclusters_per_cluster;
1450 s->subcluster_bits = ctz32(s->subcluster_size);
1451
1452 if (s->subcluster_size < (1 << MIN_CLUSTER_BITS)) {
1453 error_setg(errp, "Unsupported subcluster size: %d", s->subcluster_size);
1454 ret = -EINVAL;
1455 goto fail;
1456 }
1457
1458 /* Check support for various header values */
1459 if (header.refcount_order > 6) {
1460 error_setg(errp, "Reference count entry width too large; may not "
1461 "exceed 64 bits");
1462 ret = -EINVAL;
1463 goto fail;
1464 }
1465 s->refcount_order = header.refcount_order;
1466 s->refcount_bits = 1 << s->refcount_order;
1467 s->refcount_max = UINT64_C(1) << (s->refcount_bits - 1);
1468 s->refcount_max += s->refcount_max - 1;
1469
1470 s->crypt_method_header = header.crypt_method;
1471 if (s->crypt_method_header) {
1472 if (bdrv_uses_whitelist() &&
1473 s->crypt_method_header == QCOW_CRYPT_AES) {
1474 error_setg(errp,
1475 "Use of AES-CBC encrypted qcow2 images is no longer "
1476 "supported in system emulators");
1477 error_append_hint(errp,
1478 "You can use 'qemu-img convert' to convert your "
1479 "image to an alternative supported format, such "
1480 "as unencrypted qcow2, or raw with the LUKS "
1481 "format instead.\n");
1482 ret = -ENOSYS;
1483 goto fail;
1484 }
1485
1486 if (s->crypt_method_header == QCOW_CRYPT_AES) {
1487 s->crypt_physical_offset = false;
1488 } else {
1489 /* Assuming LUKS and any future crypt methods we
1490 * add will all use physical offsets, due to the
1491 * fact that the alternative is insecure... */
1492 s->crypt_physical_offset = true;
1493 }
1494
1495 bs->encrypted = true;
1496 }
1497
1498 s->l2_bits = s->cluster_bits - ctz32(l2_entry_size(s));
1499 s->l2_size = 1 << s->l2_bits;
1500 /* 2^(s->refcount_order - 3) is the refcount width in bytes */
1501 s->refcount_block_bits = s->cluster_bits - (s->refcount_order - 3);
1502 s->refcount_block_size = 1 << s->refcount_block_bits;
1503 bs->total_sectors = header.size / BDRV_SECTOR_SIZE;
1504 s->csize_shift = (62 - (s->cluster_bits - 8));
1505 s->csize_mask = (1 << (s->cluster_bits - 8)) - 1;
1506 s->cluster_offset_mask = (1LL << s->csize_shift) - 1;
1507
1508 s->refcount_table_offset = header.refcount_table_offset;
1509 s->refcount_table_size =
1510 header.refcount_table_clusters << (s->cluster_bits - 3);
1511
1512 if (header.refcount_table_clusters == 0 && !(flags & BDRV_O_CHECK)) {
1513 error_setg(errp, "Image does not contain a reference count table");
1514 ret = -EINVAL;
1515 goto fail;
1516 }
1517
1518 ret = qcow2_validate_table(bs, s->refcount_table_offset,
1519 header.refcount_table_clusters,
1520 s->cluster_size, QCOW_MAX_REFTABLE_SIZE,
1521 "Reference count table", errp);
1522 if (ret < 0) {
1523 goto fail;
1524 }
1525
1526 if (!(flags & BDRV_O_CHECK)) {
1527 /*
1528 * The total size in bytes of the snapshot table is checked in
1529 * qcow2_read_snapshots() because the size of each snapshot is
1530 * variable and we don't know it yet.
1531 * Here we only check the offset and number of snapshots.
1532 */
1533 ret = qcow2_validate_table(bs, header.snapshots_offset,
1534 header.nb_snapshots,
1535 sizeof(QCowSnapshotHeader),
1536 sizeof(QCowSnapshotHeader) *
1537 QCOW_MAX_SNAPSHOTS,
1538 "Snapshot table", errp);
1539 if (ret < 0) {
1540 goto fail;
1541 }
1542 }
1543
1544 /* read the level 1 table */
1545 ret = qcow2_validate_table(bs, header.l1_table_offset,
1546 header.l1_size, L1E_SIZE,
1547 QCOW_MAX_L1_SIZE, "Active L1 table", errp);
1548 if (ret < 0) {
1549 goto fail;
1550 }
1551 s->l1_size = header.l1_size;
1552 s->l1_table_offset = header.l1_table_offset;
1553
1554 l1_vm_state_index = size_to_l1(s, header.size);
1555 if (l1_vm_state_index > INT_MAX) {
1556 error_setg(errp, "Image is too big");
1557 ret = -EFBIG;
1558 goto fail;
1559 }
1560 s->l1_vm_state_index = l1_vm_state_index;
1561
1562 /* the L1 table must contain at least enough entries to put
1563 header.size bytes */
1564 if (s->l1_size < s->l1_vm_state_index) {
1565 error_setg(errp, "L1 table is too small");
1566 ret = -EINVAL;
1567 goto fail;
1568 }
1569
1570 if (s->l1_size > 0) {
1571 s->l1_table = qemu_try_blockalign(bs->file->bs, s->l1_size * L1E_SIZE);
1572 if (s->l1_table == NULL) {
1573 error_setg(errp, "Could not allocate L1 table");
1574 ret = -ENOMEM;
1575 goto fail;
1576 }
1577 ret = bdrv_pread(bs->file, s->l1_table_offset, s->l1_table,
1578 s->l1_size * L1E_SIZE);
1579 if (ret < 0) {
1580 error_setg_errno(errp, -ret, "Could not read L1 table");
1581 goto fail;
1582 }
1583 for(i = 0;i < s->l1_size; i++) {
1584 s->l1_table[i] = be64_to_cpu(s->l1_table[i]);
1585 }
1586 }
1587
1588 /* Parse driver-specific options */
1589 ret = qcow2_update_options(bs, options, flags, errp);
1590 if (ret < 0) {
1591 goto fail;
1592 }
1593
1594 s->flags = flags;
1595
1596 ret = qcow2_refcount_init(bs);
1597 if (ret != 0) {
1598 error_setg_errno(errp, -ret, "Could not initialize refcount handling");
1599 goto fail;
1600 }
1601
1602 QLIST_INIT(&s->cluster_allocs);
1603 QTAILQ_INIT(&s->discards);
1604
1605 /* read qcow2 extensions */
1606 if (qcow2_read_extensions(bs, header.header_length, ext_end, NULL,
1607 flags, &update_header, errp)) {
1608 ret = -EINVAL;
1609 goto fail;
1610 }
1611
1612 /* Open external data file */
1613 s->data_file = bdrv_open_child(NULL, options, "data-file", bs,
1614 &child_of_bds, BDRV_CHILD_DATA,
1615 true, &local_err);
1616 if (local_err) {
1617 error_propagate(errp, local_err);
1618 ret = -EINVAL;
1619 goto fail;
1620 }
1621
1622 if (s->incompatible_features & QCOW2_INCOMPAT_DATA_FILE) {
1623 if (!s->data_file && s->image_data_file) {
1624 s->data_file = bdrv_open_child(s->image_data_file, options,
1625 "data-file", bs, &child_of_bds,
1626 BDRV_CHILD_DATA, false, errp);
1627 if (!s->data_file) {
1628 ret = -EINVAL;
1629 goto fail;
1630 }
1631 }
1632 if (!s->data_file) {
1633 error_setg(errp, "'data-file' is required for this image");
1634 ret = -EINVAL;
1635 goto fail;
1636 }
1637
1638 /* No data here */
1639 bs->file->role &= ~BDRV_CHILD_DATA;
1640
1641 /* Must succeed because we have given up permissions if anything */
1642 bdrv_child_refresh_perms(bs, bs->file, &error_abort);
1643 } else {
1644 if (s->data_file) {
1645 error_setg(errp, "'data-file' can only be set for images with an "
1646 "external data file");
1647 ret = -EINVAL;
1648 goto fail;
1649 }
1650
1651 s->data_file = bs->file;
1652
1653 if (data_file_is_raw(bs)) {
1654 error_setg(errp, "data-file-raw requires a data file");
1655 ret = -EINVAL;
1656 goto fail;
1657 }
1658 }
1659
1660 /* qcow2_read_extension may have set up the crypto context
1661 * if the crypt method needs a header region, some methods
1662 * don't need header extensions, so must check here
1663 */
1664 if (s->crypt_method_header && !s->crypto) {
1665 if (s->crypt_method_header == QCOW_CRYPT_AES) {
1666 unsigned int cflags = 0;
1667 if (flags & BDRV_O_NO_IO) {
1668 cflags |= QCRYPTO_BLOCK_OPEN_NO_IO;
1669 }
1670 s->crypto = qcrypto_block_open(s->crypto_opts, "encrypt.",
1671 NULL, NULL, cflags,
1672 QCOW2_MAX_THREADS, errp);
1673 if (!s->crypto) {
1674 ret = -EINVAL;
1675 goto fail;
1676 }
1677 } else if (!(flags & BDRV_O_NO_IO)) {
1678 error_setg(errp, "Missing CRYPTO header for crypt method %d",
1679 s->crypt_method_header);
1680 ret = -EINVAL;
1681 goto fail;
1682 }
1683 }
1684
1685 /* read the backing file name */
1686 if (header.backing_file_offset != 0) {
1687 len = header.backing_file_size;
1688 if (len > MIN(1023, s->cluster_size - header.backing_file_offset) ||
1689 len >= sizeof(bs->backing_file)) {
1690 error_setg(errp, "Backing file name too long");
1691 ret = -EINVAL;
1692 goto fail;
1693 }
1694 ret = bdrv_pread(bs->file, header.backing_file_offset,
1695 bs->auto_backing_file, len);
1696 if (ret < 0) {
1697 error_setg_errno(errp, -ret, "Could not read backing file name");
1698 goto fail;
1699 }
1700 bs->auto_backing_file[len] = '\0';
1701 pstrcpy(bs->backing_file, sizeof(bs->backing_file),
1702 bs->auto_backing_file);
1703 s->image_backing_file = g_strdup(bs->auto_backing_file);
1704 }
1705
1706 /*
1707 * Internal snapshots; skip reading them in check mode, because
1708 * we do not need them then, and we do not want to abort because
1709 * of a broken table.
1710 */
1711 if (!(flags & BDRV_O_CHECK)) {
1712 s->snapshots_offset = header.snapshots_offset;
1713 s->nb_snapshots = header.nb_snapshots;
1714
1715 ret = qcow2_read_snapshots(bs, errp);
1716 if (ret < 0) {
1717 goto fail;
1718 }
1719 }
1720
1721 /* Clear unknown autoclear feature bits */
1722 update_header |= s->autoclear_features & ~QCOW2_AUTOCLEAR_MASK;
1723 update_header =
1724 update_header && !bs->read_only && !(flags & BDRV_O_INACTIVE);
1725 if (update_header) {
1726 s->autoclear_features &= QCOW2_AUTOCLEAR_MASK;
1727 }
1728
1729 /* == Handle persistent dirty bitmaps ==
1730 *
1731 * We want load dirty bitmaps in three cases:
1732 *
1733 * 1. Normal open of the disk in active mode, not related to invalidation
1734 * after migration.
1735 *
1736 * 2. Invalidation of the target vm after pre-copy phase of migration, if
1737 * bitmaps are _not_ migrating through migration channel, i.e.
1738 * 'dirty-bitmaps' capability is disabled.
1739 *
1740 * 3. Invalidation of source vm after failed or canceled migration.
1741 * This is a very interesting case. There are two possible types of
1742 * bitmaps:
1743 *
1744 * A. Stored on inactivation and removed. They should be loaded from the
1745 * image.
1746 *
1747 * B. Not stored: not-persistent bitmaps and bitmaps, migrated through
1748 * the migration channel (with dirty-bitmaps capability).
1749 *
1750 * On the other hand, there are two possible sub-cases:
1751 *
1752 * 3.1 disk was changed by somebody else while were inactive. In this
1753 * case all in-RAM dirty bitmaps (both persistent and not) are
1754 * definitely invalid. And we don't have any method to determine
1755 * this.
1756 *
1757 * Simple and safe thing is to just drop all the bitmaps of type B on
1758 * inactivation. But in this case we lose bitmaps in valid 4.2 case.
1759 *
1760 * On the other hand, resuming source vm, if disk was already changed
1761 * is a bad thing anyway: not only bitmaps, the whole vm state is
1762 * out of sync with disk.
1763 *
1764 * This means, that user or management tool, who for some reason
1765 * decided to resume source vm, after disk was already changed by
1766 * target vm, should at least drop all dirty bitmaps by hand.
1767 *
1768 * So, we can ignore this case for now, but TODO: "generation"
1769 * extension for qcow2, to determine, that image was changed after
1770 * last inactivation. And if it is changed, we will drop (or at least
1771 * mark as 'invalid' all the bitmaps of type B, both persistent
1772 * and not).
1773 *
1774 * 3.2 disk was _not_ changed while were inactive. Bitmaps may be saved
1775 * to disk ('dirty-bitmaps' capability disabled), or not saved
1776 * ('dirty-bitmaps' capability enabled), but we don't need to care
1777 * of: let's load bitmaps as always: stored bitmaps will be loaded,
1778 * and not stored has flag IN_USE=1 in the image and will be skipped
1779 * on loading.
1780 *
1781 * One remaining possible case when we don't want load bitmaps:
1782 *
1783 * 4. Open disk in inactive mode in target vm (bitmaps are migrating or
1784 * will be loaded on invalidation, no needs try loading them before)
1785 */
1786
1787 if (!(bdrv_get_flags(bs) & BDRV_O_INACTIVE)) {
1788 /* It's case 1, 2 or 3.2. Or 3.1 which is BUG in management layer. */
1789 bool header_updated = qcow2_load_dirty_bitmaps(bs, &local_err);
1790 if (local_err != NULL) {
1791 error_propagate(errp, local_err);
1792 ret = -EINVAL;
1793 goto fail;
1794 }
1795
1796 update_header = update_header && !header_updated;
1797 }
1798
1799 if (update_header) {
1800 ret = qcow2_update_header(bs);
1801 if (ret < 0) {
1802 error_setg_errno(errp, -ret, "Could not update qcow2 header");
1803 goto fail;
1804 }
1805 }
1806
1807 bs->supported_zero_flags = header.version >= 3 ?
1808 BDRV_REQ_MAY_UNMAP | BDRV_REQ_NO_FALLBACK : 0;
1809 bs->supported_truncate_flags = BDRV_REQ_ZERO_WRITE;
1810
1811 /* Repair image if dirty */
1812 if (!(flags & (BDRV_O_CHECK | BDRV_O_INACTIVE)) && !bs->read_only &&
1813 (s->incompatible_features & QCOW2_INCOMPAT_DIRTY)) {
1814 BdrvCheckResult result = {0};
1815
1816 ret = qcow2_co_check_locked(bs, &result,
1817 BDRV_FIX_ERRORS | BDRV_FIX_LEAKS);
1818 if (ret < 0 || result.check_errors) {
1819 if (ret >= 0) {
1820 ret = -EIO;
1821 }
1822 error_setg_errno(errp, -ret, "Could not repair dirty image");
1823 goto fail;
1824 }
1825 }
1826
1827 #ifdef DEBUG_ALLOC
1828 {
1829 BdrvCheckResult result = {0};
1830 qcow2_check_refcounts(bs, &result, 0);
1831 }
1832 #endif
1833
1834 qemu_co_queue_init(&s->thread_task_queue);
1835
1836 return ret;
1837
1838 fail:
1839 g_free(s->image_data_file);
1840 if (has_data_file(bs)) {
1841 bdrv_unref_child(bs, s->data_file);
1842 s->data_file = NULL;
1843 }
1844 g_free(s->unknown_header_fields);
1845 cleanup_unknown_header_ext(bs);
1846 qcow2_free_snapshots(bs);
1847 qcow2_refcount_close(bs);
1848 qemu_vfree(s->l1_table);
1849 /* else pre-write overlap checks in cache_destroy may crash */
1850 s->l1_table = NULL;
1851 cache_clean_timer_del(bs);
1852 if (s->l2_table_cache) {
1853 qcow2_cache_destroy(s->l2_table_cache);
1854 }
1855 if (s->refcount_block_cache) {
1856 qcow2_cache_destroy(s->refcount_block_cache);
1857 }
1858 qcrypto_block_free(s->crypto);
1859 qapi_free_QCryptoBlockOpenOptions(s->crypto_opts);
1860 return ret;
1861 }
1862
1863 typedef struct QCow2OpenCo {
1864 BlockDriverState *bs;
1865 QDict *options;
1866 int flags;
1867 Error **errp;
1868 int ret;
1869 } QCow2OpenCo;
1870
1871 static void coroutine_fn qcow2_open_entry(void *opaque)
1872 {
1873 QCow2OpenCo *qoc = opaque;
1874 BDRVQcow2State *s = qoc->bs->opaque;
1875
1876 qemu_co_mutex_lock(&s->lock);
1877 qoc->ret = qcow2_do_open(qoc->bs, qoc->options, qoc->flags, qoc->errp);
1878 qemu_co_mutex_unlock(&s->lock);
1879 }
1880
1881 static int qcow2_open(BlockDriverState *bs, QDict *options, int flags,
1882 Error **errp)
1883 {
1884 BDRVQcow2State *s = bs->opaque;
1885 QCow2OpenCo qoc = {
1886 .bs = bs,
1887 .options = options,
1888 .flags = flags,
1889 .errp = errp,
1890 .ret = -EINPROGRESS
1891 };
1892
1893 bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds,
1894 BDRV_CHILD_IMAGE, false, errp);
1895 if (!bs->file) {
1896 return -EINVAL;
1897 }
1898
1899 /* Initialise locks */
1900 qemu_co_mutex_init(&s->lock);
1901
1902 if (qemu_in_coroutine()) {
1903 /* From bdrv_co_create. */
1904 qcow2_open_entry(&qoc);
1905 } else {
1906 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
1907 qemu_coroutine_enter(qemu_coroutine_create(qcow2_open_entry, &qoc));
1908 BDRV_POLL_WHILE(bs, qoc.ret == -EINPROGRESS);
1909 }
1910 return qoc.ret;
1911 }
1912
1913 static void qcow2_refresh_limits(BlockDriverState *bs, Error **errp)
1914 {
1915 BDRVQcow2State *s = bs->opaque;
1916
1917 if (bs->encrypted) {
1918 /* Encryption works on a sector granularity */
1919 bs->bl.request_alignment = qcrypto_block_get_sector_size(s->crypto);
1920 }
1921 bs->bl.pwrite_zeroes_alignment = s->subcluster_size;
1922 bs->bl.pdiscard_alignment = s->cluster_size;
1923 }
1924
1925 static int qcow2_reopen_prepare(BDRVReopenState *state,
1926 BlockReopenQueue *queue, Error **errp)
1927 {
1928 Qcow2ReopenState *r;
1929 int ret;
1930
1931 r = g_new0(Qcow2ReopenState, 1);
1932 state->opaque = r;
1933
1934 ret = qcow2_update_options_prepare(state->bs, r, state->options,
1935 state->flags, errp);
1936 if (ret < 0) {
1937 goto fail;
1938 }
1939
1940 /* We need to write out any unwritten data if we reopen read-only. */
1941 if ((state->flags & BDRV_O_RDWR) == 0) {
1942 ret = qcow2_reopen_bitmaps_ro(state->bs, errp);
1943 if (ret < 0) {
1944 goto fail;
1945 }
1946
1947 ret = bdrv_flush(state->bs);
1948 if (ret < 0) {
1949 goto fail;
1950 }
1951
1952 ret = qcow2_mark_clean(state->bs);
1953 if (ret < 0) {
1954 goto fail;
1955 }
1956 }
1957
1958 return 0;
1959
1960 fail:
1961 qcow2_update_options_abort(state->bs, r);
1962 g_free(r);
1963 return ret;
1964 }
1965
1966 static void qcow2_reopen_commit(BDRVReopenState *state)
1967 {
1968 qcow2_update_options_commit(state->bs, state->opaque);
1969 g_free(state->opaque);
1970 }
1971
1972 static void qcow2_reopen_commit_post(BDRVReopenState *state)
1973 {
1974 if (state->flags & BDRV_O_RDWR) {
1975 Error *local_err = NULL;
1976
1977 if (qcow2_reopen_bitmaps_rw(state->bs, &local_err) < 0) {
1978 /*
1979 * This is not fatal, bitmaps just left read-only, so all following
1980 * writes will fail. User can remove read-only bitmaps to unblock
1981 * writes or retry reopen.
1982 */
1983 error_reportf_err(local_err,
1984 "%s: Failed to make dirty bitmaps writable: ",
1985 bdrv_get_node_name(state->bs));
1986 }
1987 }
1988 }
1989
1990 static void qcow2_reopen_abort(BDRVReopenState *state)
1991 {
1992 qcow2_update_options_abort(state->bs, state->opaque);
1993 g_free(state->opaque);
1994 }
1995
1996 static void qcow2_join_options(QDict *options, QDict *old_options)
1997 {
1998 bool has_new_overlap_template =
1999 qdict_haskey(options, QCOW2_OPT_OVERLAP) ||
2000 qdict_haskey(options, QCOW2_OPT_OVERLAP_TEMPLATE);
2001 bool has_new_total_cache_size =
2002 qdict_haskey(options, QCOW2_OPT_CACHE_SIZE);
2003 bool has_all_cache_options;
2004
2005 /* New overlap template overrides all old overlap options */
2006 if (has_new_overlap_template) {
2007 qdict_del(old_options, QCOW2_OPT_OVERLAP);
2008 qdict_del(old_options, QCOW2_OPT_OVERLAP_TEMPLATE);
2009 qdict_del(old_options, QCOW2_OPT_OVERLAP_MAIN_HEADER);
2010 qdict_del(old_options, QCOW2_OPT_OVERLAP_ACTIVE_L1);
2011 qdict_del(old_options, QCOW2_OPT_OVERLAP_ACTIVE_L2);
2012 qdict_del(old_options, QCOW2_OPT_OVERLAP_REFCOUNT_TABLE);
2013 qdict_del(old_options, QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK);
2014 qdict_del(old_options, QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE);
2015 qdict_del(old_options, QCOW2_OPT_OVERLAP_INACTIVE_L1);
2016 qdict_del(old_options, QCOW2_OPT_OVERLAP_INACTIVE_L2);
2017 }
2018
2019 /* New total cache size overrides all old options */
2020 if (qdict_haskey(options, QCOW2_OPT_CACHE_SIZE)) {
2021 qdict_del(old_options, QCOW2_OPT_L2_CACHE_SIZE);
2022 qdict_del(old_options, QCOW2_OPT_REFCOUNT_CACHE_SIZE);
2023 }
2024
2025 qdict_join(options, old_options, false);
2026
2027 /*
2028 * If after merging all cache size options are set, an old total size is
2029 * overwritten. Do keep all options, however, if all three are new. The
2030 * resulting error message is what we want to happen.
2031 */
2032 has_all_cache_options =
2033 qdict_haskey(options, QCOW2_OPT_CACHE_SIZE) ||
2034 qdict_haskey(options, QCOW2_OPT_L2_CACHE_SIZE) ||
2035 qdict_haskey(options, QCOW2_OPT_REFCOUNT_CACHE_SIZE);
2036
2037 if (has_all_cache_options && !has_new_total_cache_size) {
2038 qdict_del(options, QCOW2_OPT_CACHE_SIZE);
2039 }
2040 }
2041
2042 static int coroutine_fn qcow2_co_block_status(BlockDriverState *bs,
2043 bool want_zero,
2044 int64_t offset, int64_t count,
2045 int64_t *pnum, int64_t *map,
2046 BlockDriverState **file)
2047 {
2048 BDRVQcow2State *s = bs->opaque;
2049 uint64_t host_offset;
2050 unsigned int bytes;
2051 QCow2SubclusterType type;
2052 int ret, status = 0;
2053
2054 qemu_co_mutex_lock(&s->lock);
2055
2056 if (!s->metadata_preallocation_checked) {
2057 ret = qcow2_detect_metadata_preallocation(bs);
2058 s->metadata_preallocation = (ret == 1);
2059 s->metadata_preallocation_checked = true;
2060 }
2061
2062 bytes = MIN(INT_MAX, count);
2063 ret = qcow2_get_host_offset(bs, offset, &bytes, &host_offset, &type);
2064 qemu_co_mutex_unlock(&s->lock);
2065 if (ret < 0) {
2066 return ret;
2067 }
2068
2069 *pnum = bytes;
2070
2071 if ((type == QCOW2_SUBCLUSTER_NORMAL ||
2072 type == QCOW2_SUBCLUSTER_ZERO_ALLOC ||
2073 type == QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC) && !s->crypto) {
2074 *map = host_offset;
2075 *file = s->data_file->bs;
2076 status |= BDRV_BLOCK_OFFSET_VALID;
2077 }
2078 if (type == QCOW2_SUBCLUSTER_ZERO_PLAIN ||
2079 type == QCOW2_SUBCLUSTER_ZERO_ALLOC) {
2080 status |= BDRV_BLOCK_ZERO;
2081 } else if (type != QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN &&
2082 type != QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC) {
2083 status |= BDRV_BLOCK_DATA;
2084 }
2085 if (s->metadata_preallocation && (status & BDRV_BLOCK_DATA) &&
2086 (status & BDRV_BLOCK_OFFSET_VALID))
2087 {
2088 status |= BDRV_BLOCK_RECURSE;
2089 }
2090 return status;
2091 }
2092
2093 static coroutine_fn int qcow2_handle_l2meta(BlockDriverState *bs,
2094 QCowL2Meta **pl2meta,
2095 bool link_l2)
2096 {
2097 int ret = 0;
2098 QCowL2Meta *l2meta = *pl2meta;
2099
2100 while (l2meta != NULL) {
2101 QCowL2Meta *next;
2102
2103 if (link_l2) {
2104 ret = qcow2_alloc_cluster_link_l2(bs, l2meta);
2105 if (ret) {
2106 goto out;
2107 }
2108 } else {
2109 qcow2_alloc_cluster_abort(bs, l2meta);
2110 }
2111
2112 /* Take the request off the list of running requests */
2113 QLIST_REMOVE(l2meta, next_in_flight);
2114
2115 qemu_co_queue_restart_all(&l2meta->dependent_requests);
2116
2117 next = l2meta->next;
2118 g_free(l2meta);
2119 l2meta = next;
2120 }
2121 out:
2122 *pl2meta = l2meta;
2123 return ret;
2124 }
2125
2126 static coroutine_fn int
2127 qcow2_co_preadv_encrypted(BlockDriverState *bs,
2128 uint64_t host_offset,
2129 uint64_t offset,
2130 uint64_t bytes,
2131 QEMUIOVector *qiov,
2132 uint64_t qiov_offset)
2133 {
2134 int ret;
2135 BDRVQcow2State *s = bs->opaque;
2136 uint8_t *buf;
2137
2138 assert(bs->encrypted && s->crypto);
2139 assert(bytes <= QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size);
2140
2141 /*
2142 * For encrypted images, read everything into a temporary
2143 * contiguous buffer on which the AES functions can work.
2144 * Also, decryption in a separate buffer is better as it
2145 * prevents the guest from learning information about the
2146 * encrypted nature of the virtual disk.
2147 */
2148
2149 buf = qemu_try_blockalign(s->data_file->bs, bytes);
2150 if (buf == NULL) {
2151 return -ENOMEM;
2152 }
2153
2154 BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
2155 ret = bdrv_co_pread(s->data_file, host_offset, bytes, buf, 0);
2156 if (ret < 0) {
2157 goto fail;
2158 }
2159
2160 if (qcow2_co_decrypt(bs, host_offset, offset, buf, bytes) < 0)
2161 {
2162 ret = -EIO;
2163 goto fail;
2164 }
2165 qemu_iovec_from_buf(qiov, qiov_offset, buf, bytes);
2166
2167 fail:
2168 qemu_vfree(buf);
2169
2170 return ret;
2171 }
2172
2173 typedef struct Qcow2AioTask {
2174 AioTask task;
2175
2176 BlockDriverState *bs;
2177 QCow2SubclusterType subcluster_type; /* only for read */
2178 uint64_t host_offset; /* or full descriptor in compressed clusters */
2179 uint64_t offset;
2180 uint64_t bytes;
2181 QEMUIOVector *qiov;
2182 uint64_t qiov_offset;
2183 QCowL2Meta *l2meta; /* only for write */
2184 } Qcow2AioTask;
2185
2186 static coroutine_fn int qcow2_co_preadv_task_entry(AioTask *task);
2187 static coroutine_fn int qcow2_add_task(BlockDriverState *bs,
2188 AioTaskPool *pool,
2189 AioTaskFunc func,
2190 QCow2SubclusterType subcluster_type,
2191 uint64_t host_offset,
2192 uint64_t offset,
2193 uint64_t bytes,
2194 QEMUIOVector *qiov,
2195 size_t qiov_offset,
2196 QCowL2Meta *l2meta)
2197 {
2198 Qcow2AioTask local_task;
2199 Qcow2AioTask *task = pool ? g_new(Qcow2AioTask, 1) : &local_task;
2200
2201 *task = (Qcow2AioTask) {
2202 .task.func = func,
2203 .bs = bs,
2204 .subcluster_type = subcluster_type,
2205 .qiov = qiov,
2206 .host_offset = host_offset,
2207 .offset = offset,
2208 .bytes = bytes,
2209 .qiov_offset = qiov_offset,
2210 .l2meta = l2meta,
2211 };
2212
2213 trace_qcow2_add_task(qemu_coroutine_self(), bs, pool,
2214 func == qcow2_co_preadv_task_entry ? "read" : "write",
2215 subcluster_type, host_offset, offset, bytes,
2216 qiov, qiov_offset);
2217
2218 if (!pool) {
2219 return func(&task->task);
2220 }
2221
2222 aio_task_pool_start_task(pool, &task->task);
2223
2224 return 0;
2225 }
2226
2227 static coroutine_fn int qcow2_co_preadv_task(BlockDriverState *bs,
2228 QCow2SubclusterType subc_type,
2229 uint64_t host_offset,
2230 uint64_t offset, uint64_t bytes,
2231 QEMUIOVector *qiov,
2232 size_t qiov_offset)
2233 {
2234 BDRVQcow2State *s = bs->opaque;
2235
2236 switch (subc_type) {
2237 case QCOW2_SUBCLUSTER_ZERO_PLAIN:
2238 case QCOW2_SUBCLUSTER_ZERO_ALLOC:
2239 /* Both zero types are handled in qcow2_co_preadv_part */
2240 g_assert_not_reached();
2241
2242 case QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN:
2243 case QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC:
2244 assert(bs->backing); /* otherwise handled in qcow2_co_preadv_part */
2245
2246 BLKDBG_EVENT(bs->file, BLKDBG_READ_BACKING_AIO);
2247 return bdrv_co_preadv_part(bs->backing, offset, bytes,
2248 qiov, qiov_offset, 0);
2249
2250 case QCOW2_SUBCLUSTER_COMPRESSED:
2251 return qcow2_co_preadv_compressed(bs, host_offset,
2252 offset, bytes, qiov, qiov_offset);
2253
2254 case QCOW2_SUBCLUSTER_NORMAL:
2255 if (bs->encrypted) {
2256 return qcow2_co_preadv_encrypted(bs, host_offset,
2257 offset, bytes, qiov, qiov_offset);
2258 }
2259
2260 BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
2261 return bdrv_co_preadv_part(s->data_file, host_offset,
2262 bytes, qiov, qiov_offset, 0);
2263
2264 default:
2265 g_assert_not_reached();
2266 }
2267
2268 g_assert_not_reached();
2269 }
2270
2271 static coroutine_fn int qcow2_co_preadv_task_entry(AioTask *task)
2272 {
2273 Qcow2AioTask *t = container_of(task, Qcow2AioTask, task);
2274
2275 assert(!t->l2meta);
2276
2277 return qcow2_co_preadv_task(t->bs, t->subcluster_type,
2278 t->host_offset, t->offset, t->bytes,
2279 t->qiov, t->qiov_offset);
2280 }
2281
2282 static coroutine_fn int qcow2_co_preadv_part(BlockDriverState *bs,
2283 uint64_t offset, uint64_t bytes,
2284 QEMUIOVector *qiov,
2285 size_t qiov_offset, int flags)
2286 {
2287 BDRVQcow2State *s = bs->opaque;
2288 int ret = 0;
2289 unsigned int cur_bytes; /* number of bytes in current iteration */
2290 uint64_t host_offset = 0;
2291 QCow2SubclusterType type;
2292 AioTaskPool *aio = NULL;
2293
2294 while (bytes != 0 && aio_task_pool_status(aio) == 0) {
2295 /* prepare next request */
2296 cur_bytes = MIN(bytes, INT_MAX);
2297 if (s->crypto) {
2298 cur_bytes = MIN(cur_bytes,
2299 QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size);
2300 }
2301
2302 qemu_co_mutex_lock(&s->lock);
2303 ret = qcow2_get_host_offset(bs, offset, &cur_bytes,
2304 &host_offset, &type);
2305 qemu_co_mutex_unlock(&s->lock);
2306 if (ret < 0) {
2307 goto out;
2308 }
2309
2310 if (type == QCOW2_SUBCLUSTER_ZERO_PLAIN ||
2311 type == QCOW2_SUBCLUSTER_ZERO_ALLOC ||
2312 (type == QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN && !bs->backing) ||
2313 (type == QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC && !bs->backing))
2314 {
2315 qemu_iovec_memset(qiov, qiov_offset, 0, cur_bytes);
2316 } else {
2317 if (!aio && cur_bytes != bytes) {
2318 aio = aio_task_pool_new(QCOW2_MAX_WORKERS);
2319 }
2320 ret = qcow2_add_task(bs, aio, qcow2_co_preadv_task_entry, type,
2321 host_offset, offset, cur_bytes,
2322 qiov, qiov_offset, NULL);
2323 if (ret < 0) {
2324 goto out;
2325 }
2326 }
2327
2328 bytes -= cur_bytes;
2329 offset += cur_bytes;
2330 qiov_offset += cur_bytes;
2331 }
2332
2333 out:
2334 if (aio) {
2335 aio_task_pool_wait_all(aio);
2336 if (ret == 0) {
2337 ret = aio_task_pool_status(aio);
2338 }
2339 g_free(aio);
2340 }
2341
2342 return ret;
2343 }
2344
2345 /* Check if it's possible to merge a write request with the writing of
2346 * the data from the COW regions */
2347 static bool merge_cow(uint64_t offset, unsigned bytes,
2348 QEMUIOVector *qiov, size_t qiov_offset,
2349 QCowL2Meta *l2meta)
2350 {
2351 QCowL2Meta *m;
2352
2353 for (m = l2meta; m != NULL; m = m->next) {
2354 /* If both COW regions are empty then there's nothing to merge */
2355 if (m->cow_start.nb_bytes == 0 && m->cow_end.nb_bytes == 0) {
2356 continue;
2357 }
2358
2359 /* If COW regions are handled already, skip this too */
2360 if (m->skip_cow) {
2361 continue;
2362 }
2363
2364 /*
2365 * The write request should start immediately after the first
2366 * COW region. This does not always happen because the area
2367 * touched by the request can be larger than the one defined
2368 * by @m (a single request can span an area consisting of a
2369 * mix of previously unallocated and allocated clusters, that
2370 * is why @l2meta is a list).
2371 */
2372 if (l2meta_cow_start(m) + m->cow_start.nb_bytes != offset) {
2373 /* In this case the request starts before this region */
2374 assert(offset < l2meta_cow_start(m));
2375 assert(m->cow_start.nb_bytes == 0);
2376 continue;
2377 }
2378
2379 /* The write request should end immediately before the second
2380 * COW region (see above for why it does not always happen) */
2381 if (m->offset + m->cow_end.offset != offset + bytes) {
2382 assert(offset + bytes > m->offset + m->cow_end.offset);
2383 assert(m->cow_end.nb_bytes == 0);
2384 continue;
2385 }
2386
2387 /* Make sure that adding both COW regions to the QEMUIOVector
2388 * does not exceed IOV_MAX */
2389 if (qemu_iovec_subvec_niov(qiov, qiov_offset, bytes) > IOV_MAX - 2) {
2390 continue;
2391 }
2392
2393 m->data_qiov = qiov;
2394 m->data_qiov_offset = qiov_offset;
2395 return true;
2396 }
2397
2398 return false;
2399 }
2400
2401 /*
2402 * Return 1 if the COW regions read as zeroes, 0 if not, < 0 on error.
2403 * Note that returning 0 does not guarantee non-zero data.
2404 */
2405 static int is_zero_cow(BlockDriverState *bs, QCowL2Meta *m)
2406 {
2407 /*
2408 * This check is designed for optimization shortcut so it must be
2409 * efficient.
2410 * Instead of is_zero(), use bdrv_co_is_zero_fast() as it is
2411 * faster (but not as accurate and can result in false negatives).
2412 */
2413 int ret = bdrv_co_is_zero_fast(bs, m->offset + m->cow_start.offset,
2414 m->cow_start.nb_bytes);
2415 if (ret <= 0) {
2416 return ret;
2417 }
2418
2419 return bdrv_co_is_zero_fast(bs, m->offset + m->cow_end.offset,
2420 m->cow_end.nb_bytes);
2421 }
2422
2423 static int handle_alloc_space(BlockDriverState *bs, QCowL2Meta *l2meta)
2424 {
2425 BDRVQcow2State *s = bs->opaque;
2426 QCowL2Meta *m;
2427
2428 if (!(s->data_file->bs->supported_zero_flags & BDRV_REQ_NO_FALLBACK)) {
2429 return 0;
2430 }
2431
2432 if (bs->encrypted) {
2433 return 0;
2434 }
2435
2436 for (m = l2meta; m != NULL; m = m->next) {
2437 int ret;
2438 uint64_t start_offset = m->alloc_offset + m->cow_start.offset;
2439 unsigned nb_bytes = m->cow_end.offset + m->cow_end.nb_bytes -
2440 m->cow_start.offset;
2441
2442 if (!m->cow_start.nb_bytes && !m->cow_end.nb_bytes) {
2443 continue;
2444 }
2445
2446 ret = is_zero_cow(bs, m);
2447 if (ret < 0) {
2448 return ret;
2449 } else if (ret == 0) {
2450 continue;
2451 }
2452
2453 /*
2454 * instead of writing zero COW buffers,
2455 * efficiently zero out the whole clusters
2456 */
2457
2458 ret = qcow2_pre_write_overlap_check(bs, 0, start_offset, nb_bytes,
2459 true);
2460 if (ret < 0) {
2461 return ret;
2462 }
2463
2464 BLKDBG_EVENT(bs->file, BLKDBG_CLUSTER_ALLOC_SPACE);
2465 ret = bdrv_co_pwrite_zeroes(s->data_file, start_offset, nb_bytes,
2466 BDRV_REQ_NO_FALLBACK);
2467 if (ret < 0) {
2468 if (ret != -ENOTSUP && ret != -EAGAIN) {
2469 return ret;
2470 }
2471 continue;
2472 }
2473
2474 trace_qcow2_skip_cow(qemu_coroutine_self(), m->offset, m->nb_clusters);
2475 m->skip_cow = true;
2476 }
2477 return 0;
2478 }
2479
2480 /*
2481 * qcow2_co_pwritev_task
2482 * Called with s->lock unlocked
2483 * l2meta - if not NULL, qcow2_co_pwritev_task() will consume it. Caller must
2484 * not use it somehow after qcow2_co_pwritev_task() call
2485 */
2486 static coroutine_fn int qcow2_co_pwritev_task(BlockDriverState *bs,
2487 uint64_t host_offset,
2488 uint64_t offset, uint64_t bytes,
2489 QEMUIOVector *qiov,
2490 uint64_t qiov_offset,
2491 QCowL2Meta *l2meta)
2492 {
2493 int ret;
2494 BDRVQcow2State *s = bs->opaque;
2495 void *crypt_buf = NULL;
2496 QEMUIOVector encrypted_qiov;
2497
2498 if (bs->encrypted) {
2499 assert(s->crypto);
2500 assert(bytes <= QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size);
2501 crypt_buf = qemu_try_blockalign(bs->file->bs, bytes);
2502 if (crypt_buf == NULL) {
2503 ret = -ENOMEM;
2504 goto out_unlocked;
2505 }
2506 qemu_iovec_to_buf(qiov, qiov_offset, crypt_buf, bytes);
2507
2508 if (qcow2_co_encrypt(bs, host_offset, offset, crypt_buf, bytes) < 0) {
2509 ret = -EIO;
2510 goto out_unlocked;
2511 }
2512
2513 qemu_iovec_init_buf(&encrypted_qiov, crypt_buf, bytes);
2514 qiov = &encrypted_qiov;
2515 qiov_offset = 0;
2516 }
2517
2518 /* Try to efficiently initialize the physical space with zeroes */
2519 ret = handle_alloc_space(bs, l2meta);
2520 if (ret < 0) {
2521 goto out_unlocked;
2522 }
2523
2524 /*
2525 * If we need to do COW, check if it's possible to merge the
2526 * writing of the guest data together with that of the COW regions.
2527 * If it's not possible (or not necessary) then write the
2528 * guest data now.
2529 */
2530 if (!merge_cow(offset, bytes, qiov, qiov_offset, l2meta)) {
2531 BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO);
2532 trace_qcow2_writev_data(qemu_coroutine_self(), host_offset);
2533 ret = bdrv_co_pwritev_part(s->data_file, host_offset,
2534 bytes, qiov, qiov_offset, 0);
2535 if (ret < 0) {
2536 goto out_unlocked;
2537 }
2538 }
2539
2540 qemu_co_mutex_lock(&s->lock);
2541
2542 ret = qcow2_handle_l2meta(bs, &l2meta, true);
2543 goto out_locked;
2544
2545 out_unlocked:
2546 qemu_co_mutex_lock(&s->lock);
2547
2548 out_locked:
2549 qcow2_handle_l2meta(bs, &l2meta, false);
2550 qemu_co_mutex_unlock(&s->lock);
2551
2552 qemu_vfree(crypt_buf);
2553
2554 return ret;
2555 }
2556
2557 static coroutine_fn int qcow2_co_pwritev_task_entry(AioTask *task)
2558 {
2559 Qcow2AioTask *t = container_of(task, Qcow2AioTask, task);
2560
2561 assert(!t->subcluster_type);
2562
2563 return qcow2_co_pwritev_task(t->bs, t->host_offset,
2564 t->offset, t->bytes, t->qiov, t->qiov_offset,
2565 t->l2meta);
2566 }
2567
2568 static coroutine_fn int qcow2_co_pwritev_part(
2569 BlockDriverState *bs, uint64_t offset, uint64_t bytes,
2570 QEMUIOVector *qiov, size_t qiov_offset, int flags)
2571 {
2572 BDRVQcow2State *s = bs->opaque;
2573 int offset_in_cluster;
2574 int ret;
2575 unsigned int cur_bytes; /* number of sectors in current iteration */
2576 uint64_t host_offset;
2577 QCowL2Meta *l2meta = NULL;
2578 AioTaskPool *aio = NULL;
2579
2580 trace_qcow2_writev_start_req(qemu_coroutine_self(), offset, bytes);
2581
2582 while (bytes != 0 && aio_task_pool_status(aio) == 0) {
2583
2584 l2meta = NULL;
2585
2586 trace_qcow2_writev_start_part(qemu_coroutine_self());
2587 offset_in_cluster = offset_into_cluster(s, offset);
2588 cur_bytes = MIN(bytes, INT_MAX);
2589 if (bs->encrypted) {
2590 cur_bytes = MIN(cur_bytes,
2591 QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size
2592 - offset_in_cluster);
2593 }
2594
2595 qemu_co_mutex_lock(&s->lock);
2596
2597 ret = qcow2_alloc_host_offset(bs, offset, &cur_bytes,
2598 &host_offset, &l2meta);
2599 if (ret < 0) {
2600 goto out_locked;
2601 }
2602
2603 ret = qcow2_pre_write_overlap_check(bs, 0, host_offset,
2604 cur_bytes, true);
2605 if (ret < 0) {
2606 goto out_locked;
2607 }
2608
2609 qemu_co_mutex_unlock(&s->lock);
2610
2611 if (!aio && cur_bytes != bytes) {
2612 aio = aio_task_pool_new(QCOW2_MAX_WORKERS);
2613 }
2614 ret = qcow2_add_task(bs, aio, qcow2_co_pwritev_task_entry, 0,
2615 host_offset, offset,
2616 cur_bytes, qiov, qiov_offset, l2meta);
2617 l2meta = NULL; /* l2meta is consumed by qcow2_co_pwritev_task() */
2618 if (ret < 0) {
2619 goto fail_nometa;
2620 }
2621
2622 bytes -= cur_bytes;
2623 offset += cur_bytes;
2624 qiov_offset += cur_bytes;
2625 trace_qcow2_writev_done_part(qemu_coroutine_self(), cur_bytes);
2626 }
2627 ret = 0;
2628
2629 qemu_co_mutex_lock(&s->lock);
2630
2631 out_locked:
2632 qcow2_handle_l2meta(bs, &l2meta, false);
2633
2634 qemu_co_mutex_unlock(&s->lock);
2635
2636 fail_nometa:
2637 if (aio) {
2638 aio_task_pool_wait_all(aio);
2639 if (ret == 0) {
2640 ret = aio_task_pool_status(aio);
2641 }
2642 g_free(aio);
2643 }
2644
2645 trace_qcow2_writev_done_req(qemu_coroutine_self(), ret);
2646
2647 return ret;
2648 }
2649
2650 static int qcow2_inactivate(BlockDriverState *bs)
2651 {
2652 BDRVQcow2State *s = bs->opaque;
2653 int ret, result = 0;
2654 Error *local_err = NULL;
2655
2656 qcow2_store_persistent_dirty_bitmaps(bs, true, &local_err);
2657 if (local_err != NULL) {
2658 result = -EINVAL;
2659 error_reportf_err(local_err, "Lost persistent bitmaps during "
2660 "inactivation of node '%s': ",
2661 bdrv_get_device_or_node_name(bs));
2662 }
2663
2664 ret = qcow2_cache_flush(bs, s->l2_table_cache);
2665 if (ret) {
2666 result = ret;
2667 error_report("Failed to flush the L2 table cache: %s",
2668 strerror(-ret));
2669 }
2670
2671 ret = qcow2_cache_flush(bs, s->refcount_block_cache);
2672 if (ret) {
2673 result = ret;
2674 error_report("Failed to flush the refcount block cache: %s",
2675 strerror(-ret));
2676 }
2677
2678 if (result == 0) {
2679 qcow2_mark_clean(bs);
2680 }
2681
2682 return result;
2683 }
2684
2685 static void qcow2_close(BlockDriverState *bs)
2686 {
2687 BDRVQcow2State *s = bs->opaque;
2688 qemu_vfree(s->l1_table);
2689 /* else pre-write overlap checks in cache_destroy may crash */
2690 s->l1_table = NULL;
2691
2692 if (!(s->flags & BDRV_O_INACTIVE)) {
2693 qcow2_inactivate(bs);
2694 }
2695
2696 cache_clean_timer_del(bs);
2697 qcow2_cache_destroy(s->l2_table_cache);
2698 qcow2_cache_destroy(s->refcount_block_cache);
2699
2700 qcrypto_block_free(s->crypto);
2701 s->crypto = NULL;
2702 qapi_free_QCryptoBlockOpenOptions(s->crypto_opts);
2703
2704 g_free(s->unknown_header_fields);
2705 cleanup_unknown_header_ext(bs);
2706
2707 g_free(s->image_data_file);
2708 g_free(s->image_backing_file);
2709 g_free(s->image_backing_format);
2710
2711 if (has_data_file(bs)) {
2712 bdrv_unref_child(bs, s->data_file);
2713 s->data_file = NULL;
2714 }
2715
2716 qcow2_refcount_close(bs);
2717 qcow2_free_snapshots(bs);
2718 }
2719
2720 static void coroutine_fn qcow2_co_invalidate_cache(BlockDriverState *bs,
2721 Error **errp)
2722 {
2723 BDRVQcow2State *s = bs->opaque;
2724 int flags = s->flags;
2725 QCryptoBlock *crypto = NULL;
2726 QDict *options;
2727 Error *local_err = NULL;
2728 int ret;
2729
2730 /*
2731 * Backing files are read-only which makes all of their metadata immutable,
2732 * that means we don't have to worry about reopening them here.
2733 */
2734
2735 crypto = s->crypto;
2736 s->crypto = NULL;
2737
2738 qcow2_close(bs);
2739
2740 memset(s, 0, sizeof(BDRVQcow2State));
2741 options = qdict_clone_shallow(bs->options);
2742
2743 flags &= ~BDRV_O_INACTIVE;
2744 qemu_co_mutex_lock(&s->lock);
2745 ret = qcow2_do_open(bs, options, flags, &local_err);
2746 qemu_co_mutex_unlock(&s->lock);
2747 qobject_unref(options);
2748 if (local_err) {
2749 error_propagate_prepend(errp, local_err,
2750 "Could not reopen qcow2 layer: ");
2751 bs->drv = NULL;
2752 return;
2753 } else if (ret < 0) {
2754 error_setg_errno(errp, -ret, "Could not reopen qcow2 layer");
2755 bs->drv = NULL;
2756 return;
2757 }
2758
2759 s->crypto = crypto;
2760 }
2761
2762 static size_t header_ext_add(char *buf, uint32_t magic, const void *s,
2763 size_t len, size_t buflen)
2764 {
2765 QCowExtension *ext_backing_fmt = (QCowExtension*) buf;
2766 size_t ext_len = sizeof(QCowExtension) + ((len + 7) & ~7);
2767
2768 if (buflen < ext_len) {
2769 return -ENOSPC;
2770 }
2771
2772 *ext_backing_fmt = (QCowExtension) {
2773 .magic = cpu_to_be32(magic),
2774 .len = cpu_to_be32(len),
2775 };
2776
2777 if (len) {
2778 memcpy(buf + sizeof(QCowExtension), s, len);
2779 }
2780
2781 return ext_len;
2782 }
2783
2784 /*
2785 * Updates the qcow2 header, including the variable length parts of it, i.e.
2786 * the backing file name and all extensions. qcow2 was not designed to allow
2787 * such changes, so if we run out of space (we can only use the first cluster)
2788 * this function may fail.
2789 *
2790 * Returns 0 on success, -errno in error cases.
2791 */
2792 int qcow2_update_header(BlockDriverState *bs)
2793 {
2794 BDRVQcow2State *s = bs->opaque;
2795 QCowHeader *header;
2796 char *buf;
2797 size_t buflen = s->cluster_size;
2798 int ret;
2799 uint64_t total_size;
2800 uint32_t refcount_table_clusters;
2801 size_t header_length;
2802 Qcow2UnknownHeaderExtension *uext;
2803
2804 buf = qemu_blockalign(bs, buflen);
2805
2806 /* Header structure */
2807 header = (QCowHeader*) buf;
2808
2809 if (buflen < sizeof(*header)) {
2810 ret = -ENOSPC;
2811 goto fail;
2812 }
2813
2814 header_length = sizeof(*header) + s->unknown_header_fields_size;
2815 total_size = bs->total_sectors * BDRV_SECTOR_SIZE;
2816 refcount_table_clusters = s->refcount_table_size >> (s->cluster_bits - 3);
2817
2818 ret = validate_compression_type(s, NULL);
2819 if (ret) {
2820 goto fail;
2821 }
2822
2823 *header = (QCowHeader) {
2824 /* Version 2 fields */
2825 .magic = cpu_to_be32(QCOW_MAGIC),
2826 .version = cpu_to_be32(s->qcow_version),
2827 .backing_file_offset = 0,
2828 .backing_file_size = 0,
2829 .cluster_bits = cpu_to_be32(s->cluster_bits),
2830 .size = cpu_to_be64(total_size),
2831 .crypt_method = cpu_to_be32(s->crypt_method_header),
2832 .l1_size = cpu_to_be32(s->l1_size),
2833 .l1_table_offset = cpu_to_be64(s->l1_table_offset),
2834 .refcount_table_offset = cpu_to_be64(s->refcount_table_offset),
2835 .refcount_table_clusters = cpu_to_be32(refcount_table_clusters),
2836 .nb_snapshots = cpu_to_be32(s->nb_snapshots),
2837 .snapshots_offset = cpu_to_be64(s->snapshots_offset),
2838
2839 /* Version 3 fields */
2840 .incompatible_features = cpu_to_be64(s->incompatible_features),
2841 .compatible_features = cpu_to_be64(s->compatible_features),
2842 .autoclear_features = cpu_to_be64(s->autoclear_features),
2843 .refcount_order = cpu_to_be32(s->refcount_order),
2844 .header_length = cpu_to_be32(header_length),
2845 .compression_type = s->compression_type,
2846 };
2847
2848 /* For older versions, write a shorter header */
2849 switch (s->qcow_version) {
2850 case 2:
2851 ret = offsetof(QCowHeader, incompatible_features);
2852 break;
2853 case 3:
2854 ret = sizeof(*header);
2855 break;
2856 default:
2857 ret = -EINVAL;
2858 goto fail;
2859 }
2860
2861 buf += ret;
2862 buflen -= ret;
2863 memset(buf, 0, buflen);
2864
2865 /* Preserve any unknown field in the header */
2866 if (s->unknown_header_fields_size) {
2867 if (buflen < s->unknown_header_fields_size) {
2868 ret = -ENOSPC;
2869 goto fail;
2870 }
2871
2872 memcpy(buf, s->unknown_header_fields, s->unknown_header_fields_size);
2873 buf += s->unknown_header_fields_size;
2874 buflen -= s->unknown_header_fields_size;
2875 }
2876
2877 /* Backing file format header extension */
2878 if (s->image_backing_format) {
2879 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_BACKING_FORMAT,
2880 s->image_backing_format,
2881 strlen(s->image_backing_format),
2882 buflen);
2883 if (ret < 0) {
2884 goto fail;
2885 }
2886
2887 buf += ret;
2888 buflen -= ret;
2889 }
2890
2891 /* External data file header extension */
2892 if (has_data_file(bs) && s->image_data_file) {
2893 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_DATA_FILE,
2894 s->image_data_file, strlen(s->image_data_file),
2895 buflen);
2896 if (ret < 0) {
2897 goto fail;
2898 }
2899
2900 buf += ret;
2901 buflen -= ret;
2902 }
2903
2904 /* Full disk encryption header pointer extension */
2905 if (s->crypto_header.offset != 0) {
2906 s->crypto_header.offset = cpu_to_be64(s->crypto_header.offset);
2907 s->crypto_header.length = cpu_to_be64(s->crypto_header.length);
2908 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_CRYPTO_HEADER,
2909 &s->crypto_header, sizeof(s->crypto_header),
2910 buflen);
2911 s->crypto_header.offset = be64_to_cpu(s->crypto_header.offset);
2912 s->crypto_header.length = be64_to_cpu(s->crypto_header.length);
2913 if (ret < 0) {
2914 goto fail;
2915 }
2916 buf += ret;
2917 buflen -= ret;
2918 }
2919
2920 /*
2921 * Feature table. A mere 8 feature names occupies 392 bytes, and
2922 * when coupled with the v3 minimum header of 104 bytes plus the
2923 * 8-byte end-of-extension marker, that would leave only 8 bytes
2924 * for a backing file name in an image with 512-byte clusters.
2925 * Thus, we choose to omit this header for cluster sizes 4k and
2926 * smaller.
2927 */
2928 if (s->qcow_version >= 3 && s->cluster_size > 4096) {
2929 static const Qcow2Feature features[] = {
2930 {
2931 .type = QCOW2_FEAT_TYPE_INCOMPATIBLE,
2932 .bit = QCOW2_INCOMPAT_DIRTY_BITNR,
2933 .name = "dirty bit",
2934 },
2935 {
2936 .type = QCOW2_FEAT_TYPE_INCOMPATIBLE,
2937 .bit = QCOW2_INCOMPAT_CORRUPT_BITNR,
2938 .name = "corrupt bit",
2939 },
2940 {
2941 .type = QCOW2_FEAT_TYPE_INCOMPATIBLE,
2942 .bit = QCOW2_INCOMPAT_DATA_FILE_BITNR,
2943 .name = "external data file",
2944 },
2945 {
2946 .type = QCOW2_FEAT_TYPE_INCOMPATIBLE,
2947 .bit = QCOW2_INCOMPAT_COMPRESSION_BITNR,
2948 .name = "compression type",
2949 },
2950 {
2951 .type = QCOW2_FEAT_TYPE_INCOMPATIBLE,
2952 .bit = QCOW2_INCOMPAT_EXTL2_BITNR,
2953 .name = "extended L2 entries",
2954 },
2955 {
2956 .type = QCOW2_FEAT_TYPE_COMPATIBLE,
2957 .bit = QCOW2_COMPAT_LAZY_REFCOUNTS_BITNR,
2958 .name = "lazy refcounts",
2959 },
2960 {
2961 .type = QCOW2_FEAT_TYPE_AUTOCLEAR,
2962 .bit = QCOW2_AUTOCLEAR_BITMAPS_BITNR,
2963 .name = "bitmaps",
2964 },
2965 {
2966 .type = QCOW2_FEAT_TYPE_AUTOCLEAR,
2967 .bit = QCOW2_AUTOCLEAR_DATA_FILE_RAW_BITNR,
2968 .name = "raw external data",
2969 },
2970 };
2971
2972 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_FEATURE_TABLE,
2973 features, sizeof(features), buflen);
2974 if (ret < 0) {
2975 goto fail;
2976 }
2977 buf += ret;
2978 buflen -= ret;
2979 }
2980
2981 /* Bitmap extension */
2982 if (s->nb_bitmaps > 0) {
2983 Qcow2BitmapHeaderExt bitmaps_header = {
2984 .nb_bitmaps = cpu_to_be32(s->nb_bitmaps),
2985 .bitmap_directory_size =
2986 cpu_to_be64(s->bitmap_directory_size),
2987 .bitmap_directory_offset =
2988 cpu_to_be64(s->bitmap_directory_offset)
2989 };
2990 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_BITMAPS,
2991 &bitmaps_header, sizeof(bitmaps_header),
2992 buflen);
2993 if (ret < 0) {
2994 goto fail;
2995 }
2996 buf += ret;
2997 buflen -= ret;
2998 }
2999
3000 /* Keep unknown header extensions */
3001 QLIST_FOREACH(uext, &s->unknown_header_ext, next) {
3002 ret = header_ext_add(buf, uext->magic, uext->data, uext->len, buflen);
3003 if (ret < 0) {
3004 goto fail;
3005 }
3006
3007 buf += ret;
3008 buflen -= ret;
3009 }
3010
3011 /* End of header extensions */
3012 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_END, NULL, 0, buflen);
3013 if (ret < 0) {
3014 goto fail;
3015 }
3016
3017 buf += ret;
3018 buflen -= ret;
3019
3020 /* Backing file name */
3021 if (s->image_backing_file) {
3022 size_t backing_file_len = strlen(s->image_backing_file);
3023
3024 if (buflen < backing_file_len) {
3025 ret = -ENOSPC;
3026 goto fail;
3027 }
3028
3029 /* Using strncpy is ok here, since buf is not NUL-terminated. */
3030 strncpy(buf, s->image_backing_file, buflen);
3031
3032 header->backing_file_offset = cpu_to_be64(buf - ((char*) header));
3033 header->backing_file_size = cpu_to_be32(backing_file_len);
3034 }
3035
3036 /* Write the new header */
3037 ret = bdrv_pwrite(bs->file, 0, header, s->cluster_size);
3038 if (ret < 0) {
3039 goto fail;
3040 }
3041
3042 ret = 0;
3043 fail:
3044 qemu_vfree(header);
3045 return ret;
3046 }
3047
3048 static int qcow2_change_backing_file(BlockDriverState *bs,
3049 const char *backing_file, const char *backing_fmt)
3050 {
3051 BDRVQcow2State *s = bs->opaque;
3052
3053 /* Adding a backing file means that the external data file alone won't be
3054 * enough to make sense of the content */
3055 if (backing_file && data_file_is_raw(bs)) {
3056 return -EINVAL;
3057 }
3058
3059 if (backing_file && strlen(backing_file) > 1023) {
3060 return -EINVAL;
3061 }
3062
3063 pstrcpy(bs->auto_backing_file, sizeof(bs->auto_backing_file),
3064 backing_file ?: "");
3065 pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_file ?: "");
3066 pstrcpy(bs->backing_format, sizeof(bs->backing_format), backing_fmt ?: "");
3067
3068 g_free(s->image_backing_file);
3069 g_free(s->image_backing_format);
3070
3071 s->image_backing_file = backing_file ? g_strdup(bs->backing_file) : NULL;
3072 s->image_backing_format = backing_fmt ? g_strdup(bs->backing_format) : NULL;
3073
3074 return qcow2_update_header(bs);
3075 }
3076
3077 static int qcow2_set_up_encryption(BlockDriverState *bs,
3078 QCryptoBlockCreateOptions *cryptoopts,
3079 Error **errp)
3080 {
3081 BDRVQcow2State *s = bs->opaque;
3082 QCryptoBlock *crypto = NULL;
3083 int fmt, ret;
3084
3085 switch (cryptoopts->format) {
3086 case Q_CRYPTO_BLOCK_FORMAT_LUKS:
3087 fmt = QCOW_CRYPT_LUKS;
3088 break;
3089 case Q_CRYPTO_BLOCK_FORMAT_QCOW:
3090 fmt = QCOW_CRYPT_AES;
3091 break;
3092 default:
3093 error_setg(errp, "Crypto format not supported in qcow2");
3094 return -EINVAL;
3095 }
3096
3097 s->crypt_method_header = fmt;
3098
3099 crypto = qcrypto_block_create(cryptoopts, "encrypt.",
3100 qcow2_crypto_hdr_init_func,
3101 qcow2_crypto_hdr_write_func,
3102 bs, errp);
3103 if (!crypto) {
3104 return -EINVAL;
3105 }
3106
3107 ret = qcow2_update_header(bs);
3108 if (ret < 0) {
3109 error_setg_errno(errp, -ret, "Could not write encryption header");
3110 goto out;
3111 }
3112
3113 ret = 0;
3114 out:
3115 qcrypto_block_free(crypto);
3116 return ret;
3117 }
3118
3119 /**
3120 * Preallocates metadata structures for data clusters between @offset (in the
3121 * guest disk) and @new_length (which is thus generally the new guest disk
3122 * size).
3123 *
3124 * Returns: 0 on success, -errno on failure.
3125 */
3126 static int coroutine_fn preallocate_co(BlockDriverState *bs, uint64_t offset,
3127 uint64_t new_length, PreallocMode mode,
3128 Error **errp)
3129 {
3130 BDRVQcow2State *s = bs->opaque;
3131 uint64_t bytes;
3132 uint64_t host_offset = 0;
3133 int64_t file_length;
3134 unsigned int cur_bytes;
3135 int ret;
3136 QCowL2Meta *meta = NULL, *m;
3137
3138 assert(offset <= new_length);
3139 bytes = new_length - offset;
3140
3141 while (bytes) {
3142 cur_bytes = MIN(bytes, QEMU_ALIGN_DOWN(INT_MAX, s->cluster_size));
3143 ret = qcow2_alloc_host_offset(bs, offset, &cur_bytes,
3144 &host_offset, &meta);
3145 if (ret < 0) {
3146 error_setg_errno(errp, -ret, "Allocating clusters failed");
3147 goto out;
3148 }
3149
3150 for (m = meta; m != NULL; m = m->next) {
3151 m->prealloc = true;
3152 }
3153
3154 ret = qcow2_handle_l2meta(bs, &meta, true);
3155 if (ret < 0) {
3156 error_setg_errno(errp, -ret, "Mapping clusters failed");
3157 goto out;
3158 }
3159
3160 /* TODO Preallocate data if requested */
3161
3162 bytes -= cur_bytes;
3163 offset += cur_bytes;
3164 }
3165
3166 /*
3167 * It is expected that the image file is large enough to actually contain
3168 * all of the allocated clusters (otherwise we get failing reads after
3169 * EOF). Extend the image to the last allocated sector.
3170 */
3171 file_length = bdrv_getlength(s->data_file->bs);
3172 if (file_length < 0) {
3173 error_setg_errno(errp, -file_length, "Could not get file size");
3174 ret = file_length;
3175 goto out;
3176 }
3177
3178 if (host_offset + cur_bytes > file_length) {
3179 if (mode == PREALLOC_MODE_METADATA) {
3180 mode = PREALLOC_MODE_OFF;
3181 }
3182 ret = bdrv_co_truncate(s->data_file, host_offset + cur_bytes, false,
3183 mode, 0, errp);
3184 if (ret < 0) {
3185 goto out;
3186 }
3187 }
3188
3189 ret = 0;
3190
3191 out:
3192 qcow2_handle_l2meta(bs, &meta, false);
3193 return ret;
3194 }
3195
3196 /* qcow2_refcount_metadata_size:
3197 * @clusters: number of clusters to refcount (including data and L1/L2 tables)
3198 * @cluster_size: size of a cluster, in bytes
3199 * @refcount_order: refcount bits power-of-2 exponent
3200 * @generous_increase: allow for the refcount table to be 1.5x as large as it
3201 * needs to be
3202 *
3203 * Returns: Number of bytes required for refcount blocks and table metadata.
3204 */
3205 int64_t qcow2_refcount_metadata_size(int64_t clusters, size_t cluster_size,
3206 int refcount_order, bool generous_increase,
3207 uint64_t *refblock_count)
3208 {
3209 /*
3210 * Every host cluster is reference-counted, including metadata (even
3211 * refcount metadata is recursively included).
3212 *
3213 * An accurate formula for the size of refcount metadata size is difficult
3214 * to derive. An easier method of calculation is finding the fixed point
3215 * where no further refcount blocks or table clusters are required to
3216 * reference count every cluster.
3217 */
3218 int64_t blocks_per_table_cluster = cluster_size / REFTABLE_ENTRY_SIZE;
3219 int64_t refcounts_per_block = cluster_size * 8 / (1 << refcount_order);
3220 int64_t table = 0; /* number of refcount table clusters */
3221 int64_t blocks = 0; /* number of refcount block clusters */
3222 int64_t last;
3223 int64_t n = 0;
3224
3225 do {
3226 last = n;
3227 blocks = DIV_ROUND_UP(clusters + table + blocks, refcounts_per_block);
3228 table = DIV_ROUND_UP(blocks, blocks_per_table_cluster);
3229 n = clusters + blocks + table;
3230
3231 if (n == last && generous_increase) {
3232 clusters += DIV_ROUND_UP(table, 2);
3233 n = 0; /* force another loop */
3234 generous_increase = false;
3235 }
3236 } while (n != last);
3237
3238 if (refblock_count) {
3239 *refblock_count = blocks;
3240 }
3241
3242 return (blocks + table) * cluster_size;
3243 }
3244
3245 /**
3246 * qcow2_calc_prealloc_size:
3247 * @total_size: virtual disk size in bytes
3248 * @cluster_size: cluster size in bytes
3249 * @refcount_order: refcount bits power-of-2 exponent
3250 * @extended_l2: true if the image has extended L2 entries
3251 *
3252 * Returns: Total number of bytes required for the fully allocated image
3253 * (including metadata).
3254 */
3255 static int64_t qcow2_calc_prealloc_size(int64_t total_size,
3256 size_t cluster_size,
3257 int refcount_order,
3258 bool extended_l2)
3259 {
3260 int64_t meta_size = 0;
3261 uint64_t nl1e, nl2e;
3262 int64_t aligned_total_size = ROUND_UP(total_size, cluster_size);
3263 size_t l2e_size = extended_l2 ? L2E_SIZE_EXTENDED : L2E_SIZE_NORMAL;
3264
3265 /* header: 1 cluster */
3266 meta_size += cluster_size;
3267
3268 /* total size of L2 tables */
3269 nl2e = aligned_total_size / cluster_size;
3270 nl2e = ROUND_UP(nl2e, cluster_size / l2e_size);
3271 meta_size += nl2e * l2e_size;
3272
3273 /* total size of L1 tables */
3274 nl1e = nl2e * l2e_size / cluster_size;
3275 nl1e = ROUND_UP(nl1e, cluster_size / L1E_SIZE);
3276 meta_size += nl1e * L1E_SIZE;
3277
3278 /* total size of refcount table and blocks */
3279 meta_size += qcow2_refcount_metadata_size(
3280 (meta_size + aligned_total_size) / cluster_size,
3281 cluster_size, refcount_order, false, NULL);
3282
3283 return meta_size + aligned_total_size;
3284 }
3285
3286 static bool validate_cluster_size(size_t cluster_size, bool extended_l2,
3287 Error **errp)
3288 {
3289 int cluster_bits = ctz32(cluster_size);
3290 if (cluster_bits < MIN_CLUSTER_BITS || cluster_bits > MAX_CLUSTER_BITS ||
3291 (1 << cluster_bits) != cluster_size)
3292 {
3293 error_setg(errp, "Cluster size must be a power of two between %d and "
3294 "%dk", 1 << MIN_CLUSTER_BITS, 1 << (MAX_CLUSTER_BITS - 10));
3295 return false;
3296 }
3297
3298 if (extended_l2) {
3299 unsigned min_cluster_size =
3300 (1 << MIN_CLUSTER_BITS) * QCOW_EXTL2_SUBCLUSTERS_PER_CLUSTER;
3301 if (cluster_size < min_cluster_size) {
3302 error_setg(errp, "Extended L2 entries are only supported with "
3303 "cluster sizes of at least %u bytes", min_cluster_size);
3304 return false;
3305 }
3306 }
3307
3308 return true;
3309 }
3310
3311 static size_t qcow2_opt_get_cluster_size_del(QemuOpts *opts, bool extended_l2,
3312 Error **errp)
3313 {
3314 size_t cluster_size;
3315
3316 cluster_size = qemu_opt_get_size_del(opts, BLOCK_OPT_CLUSTER_SIZE,
3317 DEFAULT_CLUSTER_SIZE);
3318 if (!validate_cluster_size(cluster_size, extended_l2, errp)) {
3319 return 0;
3320 }
3321 return cluster_size;
3322 }
3323
3324 static int qcow2_opt_get_version_del(QemuOpts *opts, Error **errp)
3325 {
3326 char *buf;
3327 int ret;
3328
3329 buf = qemu_opt_get_del(opts, BLOCK_OPT_COMPAT_LEVEL);
3330 if (!buf) {
3331 ret = 3; /* default */
3332 } else if (!strcmp(buf, "0.10")) {
3333 ret = 2;
3334 } else if (!strcmp(buf, "1.1")) {
3335 ret = 3;
3336 } else {
3337 error_setg(errp, "Invalid compatibility level: '%s'", buf);
3338 ret = -EINVAL;
3339 }
3340 g_free(buf);
3341 return ret;
3342 }
3343
3344 static uint64_t qcow2_opt_get_refcount_bits_del(QemuOpts *opts, int version,
3345 Error **errp)
3346 {
3347 uint64_t refcount_bits;
3348
3349 refcount_bits = qemu_opt_get_number_del(opts, BLOCK_OPT_REFCOUNT_BITS, 16);
3350 if (refcount_bits > 64 || !is_power_of_2(refcount_bits)) {
3351 error_setg(errp, "Refcount width must be a power of two and may not "
3352 "exceed 64 bits");
3353 return 0;
3354 }
3355
3356 if (version < 3 && refcount_bits != 16) {
3357 error_setg(errp, "Different refcount widths than 16 bits require "
3358 "compatibility level 1.1 or above (use compat=1.1 or "
3359 "greater)");
3360 return 0;
3361 }
3362
3363 return refcount_bits;
3364 }
3365
3366 static int coroutine_fn
3367 qcow2_co_create(BlockdevCreateOptions *create_options, Error **errp)
3368 {
3369 BlockdevCreateOptionsQcow2 *qcow2_opts;
3370 QDict *options;
3371
3372 /*
3373 * Open the image file and write a minimal qcow2 header.
3374 *
3375 * We keep things simple and start with a zero-sized image. We also
3376 * do without refcount blocks or a L1 table for now. We'll fix the
3377 * inconsistency later.
3378 *
3379 * We do need a refcount table because growing the refcount table means
3380 * allocating two new refcount blocks - the second of which would be at
3381 * 2 GB for 64k clusters, and we don't want to have a 2 GB initial file
3382 * size for any qcow2 image.
3383 */
3384 BlockBackend *blk = NULL;
3385 BlockDriverState *bs = NULL;
3386 BlockDriverState *data_bs = NULL;
3387 QCowHeader *header;
3388 size_t cluster_size;
3389 int version;
3390 int refcount_order;
3391 uint64_t *refcount_table;
3392 int ret;
3393 uint8_t compression_type = QCOW2_COMPRESSION_TYPE_ZLIB;
3394
3395 assert(create_options->driver == BLOCKDEV_DRIVER_QCOW2);
3396 qcow2_opts = &create_options->u.qcow2;
3397
3398 bs = bdrv_open_blockdev_ref(qcow2_opts->file, errp);
3399 if (bs == NULL) {
3400 return -EIO;
3401 }
3402
3403 /* Validate options and set default values */
3404 if (!QEMU_IS_ALIGNED(qcow2_opts->size, BDRV_SECTOR_SIZE)) {
3405 error_setg(errp, "Image size must be a multiple of %u bytes",
3406 (unsigned) BDRV_SECTOR_SIZE);
3407 ret = -EINVAL;
3408 goto out;
3409 }
3410
3411 if (qcow2_opts->has_version) {
3412 switch (qcow2_opts->version) {
3413 case BLOCKDEV_QCOW2_VERSION_V2:
3414 version = 2;
3415 break;
3416 case BLOCKDEV_QCOW2_VERSION_V3:
3417 version = 3;
3418 break;
3419 default:
3420 g_assert_not_reached();
3421 }
3422 } else {
3423 version = 3;
3424 }
3425
3426 if (qcow2_opts->has_cluster_size) {
3427 cluster_size = qcow2_opts->cluster_size;
3428 } else {
3429 cluster_size = DEFAULT_CLUSTER_SIZE;
3430 }
3431
3432 if (!qcow2_opts->has_extended_l2) {
3433 qcow2_opts->extended_l2 = false;
3434 }
3435 if (qcow2_opts->extended_l2) {
3436 if (version < 3) {
3437 error_setg(errp, "Extended L2 entries are only supported with "
3438 "compatibility level 1.1 and above (use version=v3 or "
3439 "greater)");
3440 ret = -EINVAL;
3441 goto out;
3442 }
3443 }
3444
3445 if (!validate_cluster_size(cluster_size, qcow2_opts->extended_l2, errp)) {
3446 ret = -EINVAL;
3447 goto out;
3448 }
3449
3450 if (!qcow2_opts->has_preallocation) {
3451 qcow2_opts->preallocation = PREALLOC_MODE_OFF;
3452 }
3453 if (qcow2_opts->has_backing_file &&
3454 qcow2_opts->preallocation != PREALLOC_MODE_OFF &&
3455 !qcow2_opts->extended_l2)
3456 {
3457 error_setg(errp, "Backing file and preallocation can only be used at "
3458 "the same time if extended_l2 is on");
3459 ret = -EINVAL;
3460 goto out;
3461 }
3462 if (qcow2_opts->has_backing_fmt && !qcow2_opts->has_backing_file) {
3463 error_setg(errp, "Backing format cannot be used without backing file");
3464 ret = -EINVAL;
3465 goto out;
3466 }
3467
3468 if (!qcow2_opts->has_lazy_refcounts) {
3469 qcow2_opts->lazy_refcounts = false;
3470 }
3471 if (version < 3 && qcow2_opts->lazy_refcounts) {
3472 error_setg(errp, "Lazy refcounts only supported with compatibility "
3473 "level 1.1 and above (use version=v3 or greater)");
3474 ret = -EINVAL;
3475 goto out;
3476 }
3477
3478 if (!qcow2_opts->has_refcount_bits) {
3479 qcow2_opts->refcount_bits = 16;
3480 }
3481 if (qcow2_opts->refcount_bits > 64 ||
3482 !is_power_of_2(qcow2_opts->refcount_bits))
3483 {
3484 error_setg(errp, "Refcount width must be a power of two and may not "
3485 "exceed 64 bits");
3486 ret = -EINVAL;
3487 goto out;
3488 }
3489 if (version < 3 && qcow2_opts->refcount_bits != 16) {
3490 error_setg(errp, "Different refcount widths than 16 bits require "
3491 "compatibility level 1.1 or above (use version=v3 or "
3492 "greater)");
3493 ret = -EINVAL;
3494 goto out;
3495 }
3496 refcount_order = ctz32(qcow2_opts->refcount_bits);
3497
3498 if (qcow2_opts->data_file_raw && !qcow2_opts->data_file) {
3499 error_setg(errp, "data-file-raw requires data-file");
3500 ret = -EINVAL;
3501 goto out;
3502 }
3503 if (qcow2_opts->data_file_raw && qcow2_opts->has_backing_file) {
3504 error_setg(errp, "Backing file and data-file-raw cannot be used at "
3505 "the same time");
3506 ret = -EINVAL;
3507 goto out;
3508 }
3509
3510 if (qcow2_opts->data_file) {
3511 if (version < 3) {
3512 error_setg(errp, "External data files are only supported with "
3513 "compatibility level 1.1 and above (use version=v3 or "
3514 "greater)");
3515 ret = -EINVAL;
3516 goto out;
3517 }
3518 data_bs = bdrv_open_blockdev_ref(qcow2_opts->data_file, errp);
3519 if (data_bs == NULL) {
3520 ret = -EIO;
3521 goto out;
3522 }
3523 }
3524
3525 if (qcow2_opts->has_compression_type &&
3526 qcow2_opts->compression_type != QCOW2_COMPRESSION_TYPE_ZLIB) {
3527
3528 ret = -EINVAL;
3529
3530 if (version < 3) {
3531 error_setg(errp, "Non-zlib compression type is only supported with "
3532 "compatibility level 1.1 and above (use version=v3 or "
3533 "greater)");
3534 goto out;
3535 }
3536
3537 switch (qcow2_opts->compression_type) {
3538 #ifdef CONFIG_ZSTD
3539 case QCOW2_COMPRESSION_TYPE_ZSTD:
3540 break;
3541 #endif
3542 default:
3543 error_setg(errp, "Unknown compression type");
3544 goto out;
3545 }
3546
3547 compression_type = qcow2_opts->compression_type;
3548 }
3549
3550 /* Create BlockBackend to write to the image */
3551 blk = blk_new_with_bs(bs, BLK_PERM_WRITE | BLK_PERM_RESIZE, BLK_PERM_ALL,
3552 errp);
3553 if (!blk) {
3554 ret = -EPERM;
3555 goto out;
3556 }
3557 blk_set_allow_write_beyond_eof(blk, true);
3558
3559 /* Write the header */
3560 QEMU_BUILD_BUG_ON((1 << MIN_CLUSTER_BITS) < sizeof(*header));
3561 header = g_malloc0(cluster_size);
3562 *header = (QCowHeader) {
3563 .magic = cpu_to_be32(QCOW_MAGIC),
3564 .version = cpu_to_be32(version),
3565 .cluster_bits = cpu_to_be32(ctz32(cluster_size)),
3566 .size = cpu_to_be64(0),
3567 .l1_table_offset = cpu_to_be64(0),
3568 .l1_size = cpu_to_be32(0),
3569 .refcount_table_offset = cpu_to_be64(cluster_size),
3570 .refcount_table_clusters = cpu_to_be32(1),
3571 .refcount_order = cpu_to_be32(refcount_order),
3572 /* don't deal with endianness since compression_type is 1 byte long */
3573 .compression_type = compression_type,
3574 .header_length = cpu_to_be32(sizeof(*header)),
3575 };
3576
3577 /* We'll update this to correct value later */
3578 header->crypt_method = cpu_to_be32(QCOW_CRYPT_NONE);
3579
3580 if (qcow2_opts->lazy_refcounts) {
3581 header->compatible_features |=
3582 cpu_to_be64(QCOW2_COMPAT_LAZY_REFCOUNTS);
3583 }
3584 if (data_bs) {
3585 header->incompatible_features |=
3586 cpu_to_be64(QCOW2_INCOMPAT_DATA_FILE);
3587 }
3588 if (qcow2_opts->data_file_raw) {
3589 header->autoclear_features |=
3590 cpu_to_be64(QCOW2_AUTOCLEAR_DATA_FILE_RAW);
3591 }
3592 if (compression_type != QCOW2_COMPRESSION_TYPE_ZLIB) {
3593 header->incompatible_features |=
3594 cpu_to_be64(QCOW2_INCOMPAT_COMPRESSION);
3595 }
3596
3597 if (qcow2_opts->extended_l2) {
3598 header->incompatible_features |=
3599 cpu_to_be64(QCOW2_INCOMPAT_EXTL2);
3600 }
3601
3602 ret = blk_pwrite(blk, 0, header, cluster_size, 0);
3603 g_free(header);
3604 if (ret < 0) {
3605 error_setg_errno(errp, -ret, "Could not write qcow2 header");
3606 goto out;
3607 }
3608
3609 /* Write a refcount table with one refcount block */
3610 refcount_table = g_malloc0(2 * cluster_size);
3611 refcount_table[0] = cpu_to_be64(2 * cluster_size);
3612 ret = blk_pwrite(blk, cluster_size, refcount_table, 2 * cluster_size, 0);
3613 g_free(refcount_table);
3614
3615 if (ret < 0) {
3616 error_setg_errno(errp, -ret, "Could not write refcount table");
3617 goto out;
3618 }
3619
3620 blk_unref(blk);
3621 blk = NULL;
3622
3623 /*
3624 * And now open the image and make it consistent first (i.e. increase the
3625 * refcount of the cluster that is occupied by the header and the refcount
3626 * table)
3627 */
3628 options = qdict_new();
3629 qdict_put_str(options, "driver", "qcow2");
3630 qdict_put_str(options, "file", bs->node_name);
3631 if (data_bs) {
3632 qdict_put_str(options, "data-file", data_bs->node_name);
3633 }
3634 blk = blk_new_open(NULL, NULL, options,
3635 BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_NO_FLUSH,
3636 errp);
3637 if (blk == NULL) {
3638 ret = -EIO;
3639 goto out;
3640 }
3641
3642 ret = qcow2_alloc_clusters(blk_bs(blk), 3 * cluster_size);
3643 if (ret < 0) {
3644 error_setg_errno(errp, -ret, "Could not allocate clusters for qcow2 "
3645 "header and refcount table");
3646 goto out;
3647
3648 } else if (ret != 0) {
3649 error_report("Huh, first cluster in empty image is already in use?");
3650 abort();
3651 }
3652
3653 /* Set the external data file if necessary */
3654 if (data_bs) {
3655 BDRVQcow2State *s = blk_bs(blk)->opaque;
3656 s->image_data_file = g_strdup(data_bs->filename);
3657 }
3658
3659 /* Create a full header (including things like feature table) */
3660 ret = qcow2_update_header(blk_bs(blk));
3661 if (ret < 0) {
3662 error_setg_errno(errp, -ret, "Could not update qcow2 header");
3663 goto out;
3664 }
3665
3666 /* Okay, now that we have a valid image, let's give it the right size */
3667 ret = blk_truncate(blk, qcow2_opts->size, false, qcow2_opts->preallocation,
3668 0, errp);
3669 if (ret < 0) {
3670 error_prepend(errp, "Could not resize image: ");
3671 goto out;
3672 }
3673
3674 /* Want a backing file? There you go. */
3675 if (qcow2_opts->has_backing_file) {
3676 const char *backing_format = NULL;
3677
3678 if (qcow2_opts->has_backing_fmt) {
3679 backing_format = BlockdevDriver_str(qcow2_opts->backing_fmt);
3680 }
3681
3682 ret = bdrv_change_backing_file(blk_bs(blk), qcow2_opts->backing_file,
3683 backing_format, false);
3684 if (ret < 0) {
3685 error_setg_errno(errp, -ret, "Could not assign backing file '%s' "
3686 "with format '%s'", qcow2_opts->backing_file,
3687 backing_format);
3688 goto out;
3689 }
3690 }
3691
3692 /* Want encryption? There you go. */
3693 if (qcow2_opts->has_encrypt) {
3694 ret = qcow2_set_up_encryption(blk_bs(blk), qcow2_opts->encrypt, errp);
3695 if (ret < 0) {
3696 goto out;
3697 }
3698 }
3699
3700 blk_unref(blk);
3701 blk = NULL;
3702
3703 /* Reopen the image without BDRV_O_NO_FLUSH to flush it before returning.
3704 * Using BDRV_O_NO_IO, since encryption is now setup we don't want to
3705 * have to setup decryption context. We're not doing any I/O on the top
3706 * level BlockDriverState, only lower layers, where BDRV_O_NO_IO does
3707 * not have effect.
3708 */
3709 options = qdict_new();
3710 qdict_put_str(options, "driver", "qcow2");
3711 qdict_put_str(options, "file", bs->node_name);
3712 if (data_bs) {
3713 qdict_put_str(options, "data-file", data_bs->node_name);
3714 }
3715 blk = blk_new_open(NULL, NULL, options,
3716 BDRV_O_RDWR | BDRV_O_NO_BACKING | BDRV_O_NO_IO,
3717 errp);
3718 if (blk == NULL) {
3719 ret = -EIO;
3720 goto out;
3721 }
3722
3723 ret = 0;
3724 out:
3725 blk_unref(blk);
3726 bdrv_unref(bs);
3727 bdrv_unref(data_bs);
3728 return ret;
3729 }
3730
3731 static int coroutine_fn qcow2_co_create_opts(BlockDriver *drv,
3732 const char *filename,
3733 QemuOpts *opts,
3734 Error **errp)
3735 {
3736 BlockdevCreateOptions *create_options = NULL;
3737 QDict *qdict;
3738 Visitor *v;
3739 BlockDriverState *bs = NULL;
3740 BlockDriverState *data_bs = NULL;
3741 const char *val;
3742 int ret;
3743
3744 /* Only the keyval visitor supports the dotted syntax needed for
3745 * encryption, so go through a QDict before getting a QAPI type. Ignore
3746 * options meant for the protocol layer so that the visitor doesn't
3747 * complain. */
3748 qdict = qemu_opts_to_qdict_filtered(opts, NULL, bdrv_qcow2.create_opts,
3749 true);
3750
3751 /* Handle encryption options */
3752 val = qdict_get_try_str(qdict, BLOCK_OPT_ENCRYPT);
3753 if (val && !strcmp(val, "on")) {
3754 qdict_put_str(qdict, BLOCK_OPT_ENCRYPT, "qcow");
3755 } else if (val && !strcmp(val, "off")) {
3756 qdict_del(qdict, BLOCK_OPT_ENCRYPT);
3757 }
3758
3759 val = qdict_get_try_str(qdict, BLOCK_OPT_ENCRYPT_FORMAT);
3760 if (val && !strcmp(val, "aes")) {
3761 qdict_put_str(qdict, BLOCK_OPT_ENCRYPT_FORMAT, "qcow");
3762 }
3763
3764 /* Convert compat=0.10/1.1 into compat=v2/v3, to be renamed into
3765 * version=v2/v3 below. */
3766 val = qdict_get_try_str(qdict, BLOCK_OPT_COMPAT_LEVEL);
3767 if (val && !strcmp(val, "0.10")) {
3768 qdict_put_str(qdict, BLOCK_OPT_COMPAT_LEVEL, "v2");
3769 } else if (val && !strcmp(val, "1.1")) {
3770 qdict_put_str(qdict, BLOCK_OPT_COMPAT_LEVEL, "v3");
3771 }
3772
3773 /* Change legacy command line options into QMP ones */
3774 static const QDictRenames opt_renames[] = {
3775 { BLOCK_OPT_BACKING_FILE, "backing-file" },
3776 { BLOCK_OPT_BACKING_FMT, "backing-fmt" },
3777 { BLOCK_OPT_CLUSTER_SIZE, "cluster-size" },
3778 { BLOCK_OPT_LAZY_REFCOUNTS, "lazy-refcounts" },
3779 { BLOCK_OPT_EXTL2, "extended-l2" },
3780 { BLOCK_OPT_REFCOUNT_BITS, "refcount-bits" },
3781 { BLOCK_OPT_ENCRYPT, BLOCK_OPT_ENCRYPT_FORMAT },
3782 { BLOCK_OPT_COMPAT_LEVEL, "version" },
3783 { BLOCK_OPT_DATA_FILE_RAW, "data-file-raw" },
3784 { BLOCK_OPT_COMPRESSION_TYPE, "compression-type" },
3785 { NULL, NULL },
3786 };
3787
3788 if (!qdict_rename_keys(qdict, opt_renames, errp)) {
3789 ret = -EINVAL;
3790 goto finish;
3791 }
3792
3793 /* Create and open the file (protocol layer) */
3794 ret = bdrv_create_file(filename, opts, errp);
3795 if (ret < 0) {
3796 goto finish;
3797 }
3798
3799 bs = bdrv_open(filename, NULL, NULL,
3800 BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL, errp);
3801 if (bs == NULL) {
3802 ret = -EIO;
3803 goto finish;
3804 }
3805
3806 /* Create and open an external data file (protocol layer) */
3807 val = qdict_get_try_str(qdict, BLOCK_OPT_DATA_FILE);
3808 if (val) {
3809 ret = bdrv_create_file(val, opts, errp);
3810 if (ret < 0) {
3811 goto finish;
3812 }
3813
3814 data_bs = bdrv_open(val, NULL, NULL,
3815 BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL,
3816 errp);
3817 if (data_bs == NULL) {
3818 ret = -EIO;
3819 goto finish;
3820 }
3821
3822 qdict_del(qdict, BLOCK_OPT_DATA_FILE);
3823 qdict_put_str(qdict, "data-file", data_bs->node_name);
3824 }
3825
3826 /* Set 'driver' and 'node' options */
3827 qdict_put_str(qdict, "driver", "qcow2");
3828 qdict_put_str(qdict, "file", bs->node_name);
3829
3830 /* Now get the QAPI type BlockdevCreateOptions */
3831 v = qobject_input_visitor_new_flat_confused(qdict, errp);
3832 if (!v) {
3833 ret = -EINVAL;
3834 goto finish;
3835 }
3836
3837 visit_type_BlockdevCreateOptions(v, NULL, &create_options, errp);
3838 visit_free(v);
3839 if (!create_options) {
3840 ret = -EINVAL;
3841 goto finish;
3842 }
3843
3844 /* Silently round up size */
3845 create_options->u.qcow2.size = ROUND_UP(create_options->u.qcow2.size,
3846 BDRV_SECTOR_SIZE);
3847
3848 /* Create the qcow2 image (format layer) */
3849 ret = qcow2_co_create(create_options, errp);
3850 if (ret < 0) {
3851 goto finish;
3852 }
3853
3854 ret = 0;
3855 finish:
3856 qobject_unref(qdict);
3857 bdrv_unref(bs);
3858 bdrv_unref(data_bs);
3859 qapi_free_BlockdevCreateOptions(create_options);
3860 return ret;
3861 }
3862
3863
3864 static bool is_zero(BlockDriverState *bs, int64_t offset, int64_t bytes)
3865 {
3866 int64_t nr;
3867 int res;
3868
3869 /* Clamp to image length, before checking status of underlying sectors */
3870 if (offset + bytes > bs->total_sectors * BDRV_SECTOR_SIZE) {
3871 bytes = bs->total_sectors * BDRV_SECTOR_SIZE - offset;
3872 }
3873
3874 if (!bytes) {
3875 return true;
3876 }
3877
3878 /*
3879 * bdrv_block_status_above doesn't merge different types of zeros, for
3880 * example, zeros which come from the region which is unallocated in
3881 * the whole backing chain, and zeros which come because of a short
3882 * backing file. So, we need a loop.
3883 */
3884 do {
3885 res = bdrv_block_status_above(bs, NULL, offset, bytes, &nr, NULL, NULL);
3886 offset += nr;
3887 bytes -= nr;
3888 } while (res >= 0 && (res & BDRV_BLOCK_ZERO) && nr && bytes);
3889
3890 return res >= 0 && (res & BDRV_BLOCK_ZERO) && bytes == 0;
3891 }
3892
3893 static coroutine_fn int qcow2_co_pwrite_zeroes(BlockDriverState *bs,
3894 int64_t offset, int bytes, BdrvRequestFlags flags)
3895 {
3896 int ret;
3897 BDRVQcow2State *s = bs->opaque;
3898
3899 uint32_t head = offset_into_subcluster(s, offset);
3900 uint32_t tail = ROUND_UP(offset + bytes, s->subcluster_size) -
3901 (offset + bytes);
3902
3903 trace_qcow2_pwrite_zeroes_start_req(qemu_coroutine_self(), offset, bytes);
3904 if (offset + bytes == bs->total_sectors * BDRV_SECTOR_SIZE) {
3905 tail = 0;
3906 }
3907
3908 if (head || tail) {
3909 uint64_t off;
3910 unsigned int nr;
3911 QCow2SubclusterType type;
3912
3913 assert(head + bytes + tail <= s->subcluster_size);
3914
3915 /* check whether remainder of cluster already reads as zero */
3916 if (!(is_zero(bs, offset - head, head) &&
3917 is_zero(bs, offset + bytes, tail))) {
3918 return -ENOTSUP;
3919 }
3920
3921 qemu_co_mutex_lock(&s->lock);
3922 /* We can have new write after previous check */
3923 offset -= head;
3924 bytes = s->subcluster_size;
3925 nr = s->subcluster_size;
3926 ret = qcow2_get_host_offset(bs, offset, &nr, &off, &type);
3927 if (ret < 0 ||
3928 (type != QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN &&
3929 type != QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC &&
3930 type != QCOW2_SUBCLUSTER_ZERO_PLAIN &&
3931 type != QCOW2_SUBCLUSTER_ZERO_ALLOC)) {
3932 qemu_co_mutex_unlock(&s->lock);
3933 return ret < 0 ? ret : -ENOTSUP;
3934 }
3935 } else {
3936 qemu_co_mutex_lock(&s->lock);
3937 }
3938
3939 trace_qcow2_pwrite_zeroes(qemu_coroutine_self(), offset, bytes);
3940
3941 /* Whatever is left can use real zero subclusters */
3942 ret = qcow2_subcluster_zeroize(bs, offset, bytes, flags);
3943 qemu_co_mutex_unlock(&s->lock);
3944
3945 return ret;
3946 }
3947
3948 static coroutine_fn int qcow2_co_pdiscard(BlockDriverState *bs,
3949 int64_t offset, int bytes)
3950 {
3951 int ret;
3952 BDRVQcow2State *s = bs->opaque;
3953
3954 /* If the image does not support QCOW_OFLAG_ZERO then discarding
3955 * clusters could expose stale data from the backing file. */
3956 if (s->qcow_version < 3 && bs->backing) {
3957 return -ENOTSUP;
3958 }
3959
3960 if (!QEMU_IS_ALIGNED(offset | bytes, s->cluster_size)) {
3961 assert(bytes < s->cluster_size);
3962 /* Ignore partial clusters, except for the special case of the
3963 * complete partial cluster at the end of an unaligned file */
3964 if (!QEMU_IS_ALIGNED(offset, s->cluster_size) ||
3965 offset + bytes != bs->total_sectors * BDRV_SECTOR_SIZE) {
3966 return -ENOTSUP;
3967 }
3968 }
3969
3970 qemu_co_mutex_lock(&s->lock);
3971 ret = qcow2_cluster_discard(bs, offset, bytes, QCOW2_DISCARD_REQUEST,
3972 false);
3973 qemu_co_mutex_unlock(&s->lock);
3974 return ret;
3975 }
3976
3977 static int coroutine_fn
3978 qcow2_co_copy_range_from(BlockDriverState *bs,
3979 BdrvChild *src, uint64_t src_offset,
3980 BdrvChild *dst, uint64_t dst_offset,
3981 uint64_t bytes, BdrvRequestFlags read_flags,
3982 BdrvRequestFlags write_flags)
3983 {
3984 BDRVQcow2State *s = bs->opaque;
3985 int ret;
3986 unsigned int cur_bytes; /* number of bytes in current iteration */
3987 BdrvChild *child = NULL;
3988 BdrvRequestFlags cur_write_flags;
3989
3990 assert(!bs->encrypted);
3991 qemu_co_mutex_lock(&s->lock);
3992
3993 while (bytes != 0) {
3994 uint64_t copy_offset = 0;
3995 QCow2SubclusterType type;
3996 /* prepare next request */
3997 cur_bytes = MIN(bytes, INT_MAX);
3998 cur_write_flags = write_flags;
3999
4000 ret = qcow2_get_host_offset(bs, src_offset, &cur_bytes,
4001 &copy_offset, &type);
4002 if (ret < 0) {
4003 goto out;
4004 }
4005
4006 switch (type) {
4007 case QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN:
4008 case QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC:
4009 if (bs->backing && bs->backing->bs) {
4010 int64_t backing_length = bdrv_getlength(bs->backing->bs);
4011 if (src_offset >= backing_length) {
4012 cur_write_flags |= BDRV_REQ_ZERO_WRITE;
4013 } else {
4014 child = bs->backing;
4015 cur_bytes = MIN(cur_bytes, backing_length - src_offset);
4016 copy_offset = src_offset;
4017 }
4018 } else {
4019 cur_write_flags |= BDRV_REQ_ZERO_WRITE;
4020 }
4021 break;
4022
4023 case QCOW2_SUBCLUSTER_ZERO_PLAIN:
4024 case QCOW2_SUBCLUSTER_ZERO_ALLOC:
4025 cur_write_flags |= BDRV_REQ_ZERO_WRITE;
4026 break;
4027
4028 case QCOW2_SUBCLUSTER_COMPRESSED:
4029 ret = -ENOTSUP;
4030 goto out;
4031
4032 case QCOW2_SUBCLUSTER_NORMAL:
4033 child = s->data_file;
4034 break;
4035
4036 default:
4037 abort();
4038 }
4039 qemu_co_mutex_unlock(&s->lock);
4040 ret = bdrv_co_copy_range_from(child,
4041 copy_offset,
4042 dst, dst_offset,
4043 cur_bytes, read_flags, cur_write_flags);
4044 qemu_co_mutex_lock(&s->lock);
4045 if (ret < 0) {
4046 goto out;
4047 }
4048
4049 bytes -= cur_bytes;
4050 src_offset += cur_bytes;
4051 dst_offset += cur_bytes;
4052 }
4053 ret = 0;
4054
4055 out:
4056 qemu_co_mutex_unlock(&s->lock);
4057 return ret;
4058 }
4059
4060 static int coroutine_fn
4061 qcow2_co_copy_range_to(BlockDriverState *bs,
4062 BdrvChild *src, uint64_t src_offset,
4063 BdrvChild *dst, uint64_t dst_offset,
4064 uint64_t bytes, BdrvRequestFlags read_flags,
4065 BdrvRequestFlags write_flags)
4066 {
4067 BDRVQcow2State *s = bs->opaque;
4068 int ret;
4069 unsigned int cur_bytes; /* number of sectors in current iteration */
4070 uint64_t host_offset;
4071 QCowL2Meta *l2meta = NULL;
4072
4073 assert(!bs->encrypted);
4074
4075 qemu_co_mutex_lock(&s->lock);
4076
4077 while (bytes != 0) {
4078
4079 l2meta = NULL;
4080
4081 cur_bytes = MIN(bytes, INT_MAX);
4082
4083 /* TODO:
4084 * If src->bs == dst->bs, we could simply copy by incrementing
4085 * the refcnt, without copying user data.
4086 * Or if src->bs == dst->bs->backing->bs, we could copy by discarding. */
4087 ret = qcow2_alloc_host_offset(bs, dst_offset, &cur_bytes,
4088 &host_offset, &l2meta);
4089 if (ret < 0) {
4090 goto fail;
4091 }
4092
4093 ret = qcow2_pre_write_overlap_check(bs, 0, host_offset, cur_bytes,
4094 true);
4095 if (ret < 0) {
4096 goto fail;
4097 }
4098
4099 qemu_co_mutex_unlock(&s->lock);
4100 ret = bdrv_co_copy_range_to(src, src_offset, s->data_file, host_offset,
4101 cur_bytes, read_flags, write_flags);
4102 qemu_co_mutex_lock(&s->lock);
4103 if (ret < 0) {
4104 goto fail;
4105 }
4106
4107 ret = qcow2_handle_l2meta(bs, &l2meta, true);
4108 if (ret) {
4109 goto fail;
4110 }
4111
4112 bytes -= cur_bytes;
4113 src_offset += cur_bytes;
4114 dst_offset += cur_bytes;
4115 }
4116 ret = 0;
4117
4118 fail:
4119 qcow2_handle_l2meta(bs, &l2meta, false);
4120
4121 qemu_co_mutex_unlock(&s->lock);
4122
4123 trace_qcow2_writev_done_req(qemu_coroutine_self(), ret);
4124
4125 return ret;
4126 }
4127
4128 static int coroutine_fn qcow2_co_truncate(BlockDriverState *bs, int64_t offset,
4129 bool exact, PreallocMode prealloc,
4130 BdrvRequestFlags flags, Error **errp)
4131 {
4132 BDRVQcow2State *s = bs->opaque;
4133 uint64_t old_length;
4134 int64_t new_l1_size;
4135 int ret;
4136 QDict *options;
4137
4138 if (prealloc != PREALLOC_MODE_OFF && prealloc != PREALLOC_MODE_METADATA &&
4139 prealloc != PREALLOC_MODE_FALLOC && prealloc != PREALLOC_MODE_FULL)
4140 {
4141 error_setg(errp, "Unsupported preallocation mode '%s'",
4142 PreallocMode_str(prealloc));
4143 return -ENOTSUP;
4144 }
4145
4146 if (!QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE)) {
4147 error_setg(errp, "The new size must be a multiple of %u",
4148 (unsigned) BDRV_SECTOR_SIZE);
4149 return -EINVAL;
4150 }
4151
4152 qemu_co_mutex_lock(&s->lock);
4153
4154 /*
4155 * Even though we store snapshot size for all images, it was not
4156 * required until v3, so it is not safe to proceed for v2.
4157 */
4158 if (s->nb_snapshots && s->qcow_version < 3) {
4159 error_setg(errp, "Can't resize a v2 image which has snapshots");
4160 ret = -ENOTSUP;
4161 goto fail;
4162 }
4163
4164 /* See qcow2-bitmap.c for which bitmap scenarios prevent a resize. */
4165 if (qcow2_truncate_bitmaps_check(bs, errp)) {
4166 ret = -ENOTSUP;
4167 goto fail;
4168 }
4169
4170 old_length = bs->total_sectors * BDRV_SECTOR_SIZE;
4171 new_l1_size = size_to_l1(s, offset);
4172
4173 if (offset < old_length) {
4174 int64_t last_cluster, old_file_size;
4175 if (prealloc != PREALLOC_MODE_OFF) {
4176 error_setg(errp,
4177 "Preallocation can't be used for shrinking an image");
4178 ret = -EINVAL;
4179 goto fail;
4180 }
4181
4182 ret = qcow2_cluster_discard(bs, ROUND_UP(offset, s->cluster_size),
4183 old_length - ROUND_UP(offset,
4184 s->cluster_size),
4185 QCOW2_DISCARD_ALWAYS, true);
4186 if (ret < 0) {
4187 error_setg_errno(errp, -ret, "Failed to discard cropped clusters");
4188 goto fail;
4189 }
4190
4191 ret = qcow2_shrink_l1_table(bs, new_l1_size);
4192 if (ret < 0) {
4193 error_setg_errno(errp, -ret,
4194 "Failed to reduce the number of L2 tables");
4195 goto fail;
4196 }
4197
4198 ret = qcow2_shrink_reftable(bs);
4199 if (ret < 0) {
4200 error_setg_errno(errp, -ret,
4201 "Failed to discard unused refblocks");
4202 goto fail;
4203 }
4204
4205 old_file_size = bdrv_getlength(bs->file->bs);
4206 if (old_file_size < 0) {
4207 error_setg_errno(errp, -old_file_size,
4208 "Failed to inquire current file length");
4209 ret = old_file_size;
4210 goto fail;
4211 }
4212 last_cluster = qcow2_get_last_cluster(bs, old_file_size);
4213 if (last_cluster < 0) {
4214 error_setg_errno(errp, -last_cluster,
4215 "Failed to find the last cluster");
4216 ret = last_cluster;
4217 goto fail;
4218 }
4219 if ((last_cluster + 1) * s->cluster_size < old_file_size) {
4220 Error *local_err = NULL;
4221
4222 /*
4223 * Do not pass @exact here: It will not help the user if
4224 * we get an error here just because they wanted to shrink
4225 * their qcow2 image (on a block device) with qemu-img.
4226 * (And on the qcow2 layer, the @exact requirement is
4227 * always fulfilled, so there is no need to pass it on.)
4228 */
4229 bdrv_co_truncate(bs->file, (last_cluster + 1) * s->cluster_size,
4230 false, PREALLOC_MODE_OFF, 0, &local_err);
4231 if (local_err) {
4232 warn_reportf_err(local_err,
4233 "Failed to truncate the tail of the image: ");
4234 }
4235 }
4236 } else {
4237 ret = qcow2_grow_l1_table(bs, new_l1_size, true);
4238 if (ret < 0) {
4239 error_setg_errno(errp, -ret, "Failed to grow the L1 table");
4240 goto fail;
4241 }
4242 }
4243
4244 switch (prealloc) {
4245 case PREALLOC_MODE_OFF:
4246 if (has_data_file(bs)) {
4247 /*
4248 * If the caller wants an exact resize, the external data
4249 * file should be resized to the exact target size, too,
4250 * so we pass @exact here.
4251 */
4252 ret = bdrv_co_truncate(s->data_file, offset, exact, prealloc, 0,
4253 errp);
4254 if (ret < 0) {
4255 goto fail;
4256 }
4257 }
4258 break;
4259
4260 case PREALLOC_MODE_METADATA:
4261 ret = preallocate_co(bs, old_length, offset, prealloc, errp);
4262 if (ret < 0) {
4263 goto fail;
4264 }
4265 break;
4266
4267 case PREALLOC_MODE_FALLOC:
4268 case PREALLOC_MODE_FULL:
4269 {
4270 int64_t allocation_start, host_offset, guest_offset;
4271 int64_t clusters_allocated;
4272 int64_t old_file_size, last_cluster, new_file_size;
4273 uint64_t nb_new_data_clusters, nb_new_l2_tables;
4274 bool subclusters_need_allocation = false;
4275
4276 /* With a data file, preallocation means just allocating the metadata
4277 * and forwarding the truncate request to the data file */
4278 if (has_data_file(bs)) {
4279 ret = preallocate_co(bs, old_length, offset, prealloc, errp);
4280 if (ret < 0) {
4281 goto fail;
4282 }
4283 break;
4284 }
4285
4286 old_file_size = bdrv_getlength(bs->file->bs);
4287 if (old_file_size < 0) {
4288 error_setg_errno(errp, -old_file_size,
4289 "Failed to inquire current file length");
4290 ret = old_file_size;
4291 goto fail;
4292 }
4293
4294 last_cluster = qcow2_get_last_cluster(bs, old_file_size);
4295 if (last_cluster >= 0) {
4296 old_file_size = (last_cluster + 1) * s->cluster_size;
4297 } else {
4298 old_file_size = ROUND_UP(old_file_size, s->cluster_size);
4299 }
4300
4301 nb_new_data_clusters = (ROUND_UP(offset, s->cluster_size) -
4302 start_of_cluster(s, old_length)) >> s->cluster_bits;
4303
4304 /* This is an overestimation; we will not actually allocate space for
4305 * these in the file but just make sure the new refcount structures are
4306 * able to cover them so we will not have to allocate new refblocks
4307 * while entering the data blocks in the potentially new L2 tables.
4308 * (We do not actually care where the L2 tables are placed. Maybe they
4309 * are already allocated or they can be placed somewhere before
4310 * @old_file_size. It does not matter because they will be fully
4311 * allocated automatically, so they do not need to be covered by the
4312 * preallocation. All that matters is that we will not have to allocate
4313 * new refcount structures for them.) */
4314 nb_new_l2_tables = DIV_ROUND_UP(nb_new_data_clusters,
4315 s->cluster_size / l2_entry_size(s));
4316 /* The cluster range may not be aligned to L2 boundaries, so add one L2
4317 * table for a potential head/tail */
4318 nb_new_l2_tables++;
4319
4320 allocation_start = qcow2_refcount_area(bs, old_file_size,
4321 nb_new_data_clusters +
4322 nb_new_l2_tables,
4323 true, 0, 0);
4324 if (allocation_start < 0) {
4325 error_setg_errno(errp, -allocation_start,
4326 "Failed to resize refcount structures");
4327 ret = allocation_start;
4328 goto fail;
4329 }
4330
4331 clusters_allocated = qcow2_alloc_clusters_at(bs, allocation_start,
4332 nb_new_data_clusters);
4333 if (clusters_allocated < 0) {
4334 error_setg_errno(errp, -clusters_allocated,
4335 "Failed to allocate data clusters");
4336 ret = clusters_allocated;
4337 goto fail;
4338 }
4339
4340 assert(clusters_allocated == nb_new_data_clusters);
4341
4342 /* Allocate the data area */
4343 new_file_size = allocation_start +
4344 nb_new_data_clusters * s->cluster_size;
4345 /*
4346 * Image file grows, so @exact does not matter.
4347 *
4348 * If we need to zero out the new area, try first whether the protocol
4349 * driver can already take care of this.
4350 */
4351 if (flags & BDRV_REQ_ZERO_WRITE) {
4352 ret = bdrv_co_truncate(bs->file, new_file_size, false, prealloc,
4353 BDRV_REQ_ZERO_WRITE, NULL);
4354 if (ret >= 0) {
4355 flags &= ~BDRV_REQ_ZERO_WRITE;
4356 /* Ensure that we read zeroes and not backing file data */
4357 subclusters_need_allocation = true;
4358 }
4359 } else {
4360 ret = -1;
4361 }
4362 if (ret < 0) {
4363 ret = bdrv_co_truncate(bs->file, new_file_size, false, prealloc, 0,
4364 errp);
4365 }
4366 if (ret < 0) {
4367 error_prepend(errp, "Failed to resize underlying file: ");
4368 qcow2_free_clusters(bs, allocation_start,
4369 nb_new_data_clusters * s->cluster_size,
4370 QCOW2_DISCARD_OTHER);
4371 goto fail;
4372 }
4373
4374 /* Create the necessary L2 entries */
4375 host_offset = allocation_start;
4376 guest_offset = old_length;
4377 while (nb_new_data_clusters) {
4378 int64_t nb_clusters = MIN(
4379 nb_new_data_clusters,
4380 s->l2_slice_size - offset_to_l2_slice_index(s, guest_offset));
4381 unsigned cow_start_length = offset_into_cluster(s, guest_offset);
4382 QCowL2Meta allocation;
4383 guest_offset = start_of_cluster(s, guest_offset);
4384 allocation = (QCowL2Meta) {
4385 .offset = guest_offset,
4386 .alloc_offset = host_offset,
4387 .nb_clusters = nb_clusters,
4388 .cow_start = {
4389 .offset = 0,
4390 .nb_bytes = cow_start_length,
4391 },
4392 .cow_end = {
4393 .offset = nb_clusters << s->cluster_bits,
4394 .nb_bytes = 0,
4395 },
4396 .prealloc = !subclusters_need_allocation,
4397 };
4398 qemu_co_queue_init(&allocation.dependent_requests);
4399
4400 ret = qcow2_alloc_cluster_link_l2(bs, &allocation);
4401 if (ret < 0) {
4402 error_setg_errno(errp, -ret, "Failed to update L2 tables");
4403 qcow2_free_clusters(bs, host_offset,
4404 nb_new_data_clusters * s->cluster_size,
4405 QCOW2_DISCARD_OTHER);
4406 goto fail;
4407 }
4408
4409 guest_offset += nb_clusters * s->cluster_size;
4410 host_offset += nb_clusters * s->cluster_size;
4411 nb_new_data_clusters -= nb_clusters;
4412 }
4413 break;
4414 }
4415
4416 default:
4417 g_assert_not_reached();
4418 }
4419
4420 if ((flags & BDRV_REQ_ZERO_WRITE) && offset > old_length) {
4421 uint64_t zero_start = QEMU_ALIGN_UP(old_length, s->subcluster_size);
4422
4423 /*
4424 * Use zero clusters as much as we can. qcow2_subcluster_zeroize()
4425 * requires a subcluster-aligned start. The end may be unaligned if
4426 * it is at the end of the image (which it is here).
4427 */
4428 if (offset > zero_start) {
4429 ret = qcow2_subcluster_zeroize(bs, zero_start, offset - zero_start,
4430 0);
4431 if (ret < 0) {
4432 error_setg_errno(errp, -ret, "Failed to zero out new clusters");
4433 goto fail;
4434 }
4435 }
4436
4437 /* Write explicit zeros for the unaligned head */
4438 if (zero_start > old_length) {
4439 uint64_t len = MIN(zero_start, offset) - old_length;
4440 uint8_t *buf = qemu_blockalign0(bs, len);
4441 QEMUIOVector qiov;
4442 qemu_iovec_init_buf(&qiov, buf, len);
4443
4444 qemu_co_mutex_unlock(&s->lock);
4445 ret = qcow2_co_pwritev_part(bs, old_length, len, &qiov, 0, 0);
4446 qemu_co_mutex_lock(&s->lock);
4447
4448 qemu_vfree(buf);
4449 if (ret < 0) {
4450 error_setg_errno(errp, -ret, "Failed to zero out the new area");
4451 goto fail;
4452 }
4453 }
4454 }
4455
4456 if (prealloc != PREALLOC_MODE_OFF) {
4457 /* Flush metadata before actually changing the image size */
4458 ret = qcow2_write_caches(bs);
4459 if (ret < 0) {
4460 error_setg_errno(errp, -ret,
4461 "Failed to flush the preallocated area to disk");
4462 goto fail;
4463 }
4464 }
4465
4466 bs->total_sectors = offset / BDRV_SECTOR_SIZE;
4467
4468 /* write updated header.size */
4469 offset = cpu_to_be64(offset);
4470 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, size),
4471 &offset, sizeof(offset));
4472 if (ret < 0) {
4473 error_setg_errno(errp, -ret, "Failed to update the image size");
4474 goto fail;
4475 }
4476
4477 s->l1_vm_state_index = new_l1_size;
4478
4479 /* Update cache sizes */
4480 options = qdict_clone_shallow(bs->options);
4481 ret = qcow2_update_options(bs, options, s->flags, errp);
4482 qobject_unref(options);
4483 if (ret < 0) {
4484 goto fail;
4485 }
4486 ret = 0;
4487 fail:
4488 qemu_co_mutex_unlock(&s->lock);
4489 return ret;
4490 }
4491
4492 static coroutine_fn int
4493 qcow2_co_pwritev_compressed_task(BlockDriverState *bs,
4494 uint64_t offset, uint64_t bytes,
4495 QEMUIOVector *qiov, size_t qiov_offset)
4496 {
4497 BDRVQcow2State *s = bs->opaque;
4498 int ret;
4499 ssize_t out_len;
4500 uint8_t *buf, *out_buf;
4501 uint64_t cluster_offset;
4502
4503 assert(bytes == s->cluster_size || (bytes < s->cluster_size &&
4504 (offset + bytes == bs->total_sectors << BDRV_SECTOR_BITS)));
4505
4506 buf = qemu_blockalign(bs, s->cluster_size);
4507 if (bytes < s->cluster_size) {
4508 /* Zero-pad last write if image size is not cluster aligned */
4509 memset(buf + bytes, 0, s->cluster_size - bytes);
4510 }
4511 qemu_iovec_to_buf(qiov, qiov_offset, buf, bytes);
4512
4513 out_buf = g_malloc(s->cluster_size);
4514
4515 out_len = qcow2_co_compress(bs, out_buf, s->cluster_size - 1,
4516 buf, s->cluster_size);
4517 if (out_len == -ENOMEM) {
4518 /* could not compress: write normal cluster */
4519 ret = qcow2_co_pwritev_part(bs, offset, bytes, qiov, qiov_offset, 0);
4520 if (ret < 0) {
4521 goto fail;
4522 }
4523 goto success;
4524 } else if (out_len < 0) {
4525 ret = -EINVAL;
4526 goto fail;
4527 }
4528
4529 qemu_co_mutex_lock(&s->lock);
4530 ret = qcow2_alloc_compressed_cluster_offset(bs, offset, out_len,
4531 &cluster_offset);
4532 if (ret < 0) {
4533 qemu_co_mutex_unlock(&s->lock);
4534 goto fail;
4535 }
4536
4537 ret = qcow2_pre_write_overlap_check(bs, 0, cluster_offset, out_len, true);
4538 qemu_co_mutex_unlock(&s->lock);
4539 if (ret < 0) {
4540 goto fail;
4541 }
4542
4543 BLKDBG_EVENT(s->data_file, BLKDBG_WRITE_COMPRESSED);
4544 ret = bdrv_co_pwrite(s->data_file, cluster_offset, out_len, out_buf, 0);
4545 if (ret < 0) {
4546 goto fail;
4547 }
4548 success:
4549 ret = 0;
4550 fail:
4551 qemu_vfree(buf);
4552 g_free(out_buf);
4553 return ret;
4554 }
4555
4556 static coroutine_fn int qcow2_co_pwritev_compressed_task_entry(AioTask *task)
4557 {
4558 Qcow2AioTask *t = container_of(task, Qcow2AioTask, task);
4559
4560 assert(!t->subcluster_type && !t->l2meta);
4561
4562 return qcow2_co_pwritev_compressed_task(t->bs, t->offset, t->bytes, t->qiov,
4563 t->qiov_offset);
4564 }
4565
4566 /*
4567 * XXX: put compressed sectors first, then all the cluster aligned
4568 * tables to avoid losing bytes in alignment
4569 */
4570 static coroutine_fn int
4571 qcow2_co_pwritev_compressed_part(BlockDriverState *bs,
4572 uint64_t offset, uint64_t bytes,
4573 QEMUIOVector *qiov, size_t qiov_offset)
4574 {
4575 BDRVQcow2State *s = bs->opaque;
4576 AioTaskPool *aio = NULL;
4577 int ret = 0;
4578
4579 if (has_data_file(bs)) {
4580 return -ENOTSUP;
4581 }
4582
4583 if (bytes == 0) {
4584 /*
4585 * align end of file to a sector boundary to ease reading with
4586 * sector based I/Os
4587 */
4588 int64_t len = bdrv_getlength(bs->file->bs);
4589 if (len < 0) {
4590 return len;
4591 }
4592 return bdrv_co_truncate(bs->file, len, false, PREALLOC_MODE_OFF, 0,
4593 NULL);
4594 }
4595
4596 if (offset_into_cluster