block/io: refactor save/load vmstate
[qemu.git] / include / block / block.h
1 #ifndef BLOCK_H
2 #define BLOCK_H
3
4 #include "block/aio.h"
5 #include "block/aio-wait.h"
6 #include "qemu/iov.h"
7 #include "qemu/coroutine.h"
8 #include "block/accounting.h"
9 #include "block/dirty-bitmap.h"
10 #include "block/blockjob.h"
11 #include "qemu/hbitmap.h"
12
13 /*
14 * generated_co_wrapper
15 *
16 * Function specifier, which does nothing but mark functions to be
17 * generated by scripts/block-coroutine-wrapper.py
18 *
19 * Read more in docs/devel/block-coroutine-wrapper.rst
20 */
21 #define generated_co_wrapper
22
23 /* block.c */
24 typedef struct BlockDriver BlockDriver;
25 typedef struct BdrvChild BdrvChild;
26 typedef struct BdrvChildClass BdrvChildClass;
27
28 typedef struct BlockDriverInfo {
29 /* in bytes, 0 if irrelevant */
30 int cluster_size;
31 /* offset at which the VM state can be saved (0 if not possible) */
32 int64_t vm_state_offset;
33 bool is_dirty;
34 /*
35 * True if this block driver only supports compressed writes
36 */
37 bool needs_compressed_writes;
38 } BlockDriverInfo;
39
40 typedef struct BlockFragInfo {
41 uint64_t allocated_clusters;
42 uint64_t total_clusters;
43 uint64_t fragmented_clusters;
44 uint64_t compressed_clusters;
45 } BlockFragInfo;
46
47 typedef enum {
48 BDRV_REQ_COPY_ON_READ = 0x1,
49 BDRV_REQ_ZERO_WRITE = 0x2,
50
51 /*
52 * The BDRV_REQ_MAY_UNMAP flag is used in write_zeroes requests to indicate
53 * that the block driver should unmap (discard) blocks if it is guaranteed
54 * that the result will read back as zeroes. The flag is only passed to the
55 * driver if the block device is opened with BDRV_O_UNMAP.
56 */
57 BDRV_REQ_MAY_UNMAP = 0x4,
58
59 BDRV_REQ_FUA = 0x10,
60 BDRV_REQ_WRITE_COMPRESSED = 0x20,
61
62 /* Signifies that this write request will not change the visible disk
63 * content. */
64 BDRV_REQ_WRITE_UNCHANGED = 0x40,
65
66 /*
67 * BDRV_REQ_SERIALISING forces request serialisation for writes.
68 * It is used to ensure that writes to the backing file of a backup process
69 * target cannot race with a read of the backup target that defers to the
70 * backing file.
71 *
72 * Note, that BDRV_REQ_SERIALISING is _not_ opposite in meaning to
73 * BDRV_REQ_NO_SERIALISING. A more descriptive name for the latter might be
74 * _DO_NOT_WAIT_FOR_SERIALISING, except that is too long.
75 */
76 BDRV_REQ_SERIALISING = 0x80,
77
78 /* Execute the request only if the operation can be offloaded or otherwise
79 * be executed efficiently, but return an error instead of using a slow
80 * fallback. */
81 BDRV_REQ_NO_FALLBACK = 0x100,
82
83 /*
84 * BDRV_REQ_PREFETCH may be used only together with BDRV_REQ_COPY_ON_READ
85 * on read request and means that caller doesn't really need data to be
86 * written to qiov parameter which may be NULL.
87 */
88 BDRV_REQ_PREFETCH = 0x200,
89 /* Mask of valid flags */
90 BDRV_REQ_MASK = 0x3ff,
91 } BdrvRequestFlags;
92
93 typedef struct BlockSizes {
94 uint32_t phys;
95 uint32_t log;
96 } BlockSizes;
97
98 typedef struct HDGeometry {
99 uint32_t heads;
100 uint32_t sectors;
101 uint32_t cylinders;
102 } HDGeometry;
103
104 #define BDRV_O_RDWR 0x0002
105 #define BDRV_O_RESIZE 0x0004 /* request permission for resizing the node */
106 #define BDRV_O_SNAPSHOT 0x0008 /* open the file read only and save writes in a snapshot */
107 #define BDRV_O_TEMPORARY 0x0010 /* delete the file after use */
108 #define BDRV_O_NOCACHE 0x0020 /* do not use the host page cache */
109 #define BDRV_O_NATIVE_AIO 0x0080 /* use native AIO instead of the thread pool */
110 #define BDRV_O_NO_BACKING 0x0100 /* don't open the backing file */
111 #define BDRV_O_NO_FLUSH 0x0200 /* disable flushing on this disk */
112 #define BDRV_O_COPY_ON_READ 0x0400 /* copy read backing sectors into image */
113 #define BDRV_O_INACTIVE 0x0800 /* consistency hint for migration handoff */
114 #define BDRV_O_CHECK 0x1000 /* open solely for consistency check */
115 #define BDRV_O_ALLOW_RDWR 0x2000 /* allow reopen to change from r/o to r/w */
116 #define BDRV_O_UNMAP 0x4000 /* execute guest UNMAP/TRIM operations */
117 #define BDRV_O_PROTOCOL 0x8000 /* if no block driver is explicitly given:
118 select an appropriate protocol driver,
119 ignoring the format layer */
120 #define BDRV_O_NO_IO 0x10000 /* don't initialize for I/O */
121 #define BDRV_O_AUTO_RDONLY 0x20000 /* degrade to read-only if opening read-write fails */
122 #define BDRV_O_IO_URING 0x40000 /* use io_uring instead of the thread pool */
123
124 #define BDRV_O_CACHE_MASK (BDRV_O_NOCACHE | BDRV_O_NO_FLUSH)
125
126
127 /* Option names of options parsed by the block layer */
128
129 #define BDRV_OPT_CACHE_WB "cache.writeback"
130 #define BDRV_OPT_CACHE_DIRECT "cache.direct"
131 #define BDRV_OPT_CACHE_NO_FLUSH "cache.no-flush"
132 #define BDRV_OPT_READ_ONLY "read-only"
133 #define BDRV_OPT_AUTO_READ_ONLY "auto-read-only"
134 #define BDRV_OPT_DISCARD "discard"
135 #define BDRV_OPT_FORCE_SHARE "force-share"
136
137
138 #define BDRV_SECTOR_BITS 9
139 #define BDRV_SECTOR_SIZE (1ULL << BDRV_SECTOR_BITS)
140
141 #define BDRV_REQUEST_MAX_SECTORS MIN_CONST(SIZE_MAX >> BDRV_SECTOR_BITS, \
142 INT_MAX >> BDRV_SECTOR_BITS)
143 #define BDRV_REQUEST_MAX_BYTES (BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS)
144
145 /*
146 * Allocation status flags for bdrv_block_status() and friends.
147 *
148 * Public flags:
149 * BDRV_BLOCK_DATA: allocation for data at offset is tied to this layer
150 * BDRV_BLOCK_ZERO: offset reads as zero
151 * BDRV_BLOCK_OFFSET_VALID: an associated offset exists for accessing raw data
152 * BDRV_BLOCK_ALLOCATED: the content of the block is determined by this
153 * layer rather than any backing, set by block layer
154 * BDRV_BLOCK_EOF: the returned pnum covers through end of file for this
155 * layer, set by block layer
156 *
157 * Internal flags:
158 * BDRV_BLOCK_RAW: for use by passthrough drivers, such as raw, to request
159 * that the block layer recompute the answer from the returned
160 * BDS; must be accompanied by just BDRV_BLOCK_OFFSET_VALID.
161 * BDRV_BLOCK_RECURSE: request that the block layer will recursively search for
162 * zeroes in file child of current block node inside
163 * returned region. Only valid together with both
164 * BDRV_BLOCK_DATA and BDRV_BLOCK_OFFSET_VALID. Should not
165 * appear with BDRV_BLOCK_ZERO.
166 *
167 * If BDRV_BLOCK_OFFSET_VALID is set, the map parameter represents the
168 * host offset within the returned BDS that is allocated for the
169 * corresponding raw guest data. However, whether that offset
170 * actually contains data also depends on BDRV_BLOCK_DATA, as follows:
171 *
172 * DATA ZERO OFFSET_VALID
173 * t t t sectors read as zero, returned file is zero at offset
174 * t f t sectors read as valid from file at offset
175 * f t t sectors preallocated, read as zero, returned file not
176 * necessarily zero at offset
177 * f f t sectors preallocated but read from backing_hd,
178 * returned file contains garbage at offset
179 * t t f sectors preallocated, read as zero, unknown offset
180 * t f f sectors read from unknown file or offset
181 * f t f not allocated or unknown offset, read as zero
182 * f f f not allocated or unknown offset, read from backing_hd
183 */
184 #define BDRV_BLOCK_DATA 0x01
185 #define BDRV_BLOCK_ZERO 0x02
186 #define BDRV_BLOCK_OFFSET_VALID 0x04
187 #define BDRV_BLOCK_RAW 0x08
188 #define BDRV_BLOCK_ALLOCATED 0x10
189 #define BDRV_BLOCK_EOF 0x20
190 #define BDRV_BLOCK_RECURSE 0x40
191
192 typedef QTAILQ_HEAD(BlockReopenQueue, BlockReopenQueueEntry) BlockReopenQueue;
193
194 typedef struct BDRVReopenState {
195 BlockDriverState *bs;
196 int flags;
197 BlockdevDetectZeroesOptions detect_zeroes;
198 bool backing_missing;
199 bool replace_backing_bs; /* new_backing_bs is ignored if this is false */
200 BlockDriverState *new_backing_bs; /* If NULL then detach the current bs */
201 uint64_t perm, shared_perm;
202 QDict *options;
203 QDict *explicit_options;
204 void *opaque;
205 } BDRVReopenState;
206
207 /*
208 * Block operation types
209 */
210 typedef enum BlockOpType {
211 BLOCK_OP_TYPE_BACKUP_SOURCE,
212 BLOCK_OP_TYPE_BACKUP_TARGET,
213 BLOCK_OP_TYPE_CHANGE,
214 BLOCK_OP_TYPE_COMMIT_SOURCE,
215 BLOCK_OP_TYPE_COMMIT_TARGET,
216 BLOCK_OP_TYPE_DATAPLANE,
217 BLOCK_OP_TYPE_DRIVE_DEL,
218 BLOCK_OP_TYPE_EJECT,
219 BLOCK_OP_TYPE_EXTERNAL_SNAPSHOT,
220 BLOCK_OP_TYPE_INTERNAL_SNAPSHOT,
221 BLOCK_OP_TYPE_INTERNAL_SNAPSHOT_DELETE,
222 BLOCK_OP_TYPE_MIRROR_SOURCE,
223 BLOCK_OP_TYPE_MIRROR_TARGET,
224 BLOCK_OP_TYPE_RESIZE,
225 BLOCK_OP_TYPE_STREAM,
226 BLOCK_OP_TYPE_REPLACE,
227 BLOCK_OP_TYPE_MAX,
228 } BlockOpType;
229
230 /* Block node permission constants */
231 enum {
232 /**
233 * A user that has the "permission" of consistent reads is guaranteed that
234 * their view of the contents of the block device is complete and
235 * self-consistent, representing the contents of a disk at a specific
236 * point.
237 *
238 * For most block devices (including their backing files) this is true, but
239 * the property cannot be maintained in a few situations like for
240 * intermediate nodes of a commit block job.
241 */
242 BLK_PERM_CONSISTENT_READ = 0x01,
243
244 /** This permission is required to change the visible disk contents. */
245 BLK_PERM_WRITE = 0x02,
246
247 /**
248 * This permission (which is weaker than BLK_PERM_WRITE) is both enough and
249 * required for writes to the block node when the caller promises that
250 * the visible disk content doesn't change.
251 *
252 * As the BLK_PERM_WRITE permission is strictly stronger, either is
253 * sufficient to perform an unchanging write.
254 */
255 BLK_PERM_WRITE_UNCHANGED = 0x04,
256
257 /** This permission is required to change the size of a block node. */
258 BLK_PERM_RESIZE = 0x08,
259
260 /**
261 * This permission is required to change the node that this BdrvChild
262 * points to.
263 */
264 BLK_PERM_GRAPH_MOD = 0x10,
265
266 BLK_PERM_ALL = 0x1f,
267
268 DEFAULT_PERM_PASSTHROUGH = BLK_PERM_CONSISTENT_READ
269 | BLK_PERM_WRITE
270 | BLK_PERM_WRITE_UNCHANGED
271 | BLK_PERM_RESIZE,
272
273 DEFAULT_PERM_UNCHANGED = BLK_PERM_ALL & ~DEFAULT_PERM_PASSTHROUGH,
274 };
275
276 /*
277 * Flags that parent nodes assign to child nodes to specify what kind of
278 * role(s) they take.
279 *
280 * At least one of DATA, METADATA, FILTERED, or COW must be set for
281 * every child.
282 */
283 enum BdrvChildRoleBits {
284 /*
285 * This child stores data.
286 * Any node may have an arbitrary number of such children.
287 */
288 BDRV_CHILD_DATA = (1 << 0),
289
290 /*
291 * This child stores metadata.
292 * Any node may have an arbitrary number of metadata-storing
293 * children.
294 */
295 BDRV_CHILD_METADATA = (1 << 1),
296
297 /*
298 * A child that always presents exactly the same visible data as
299 * the parent, e.g. by virtue of the parent forwarding all reads
300 * and writes.
301 * This flag is mutually exclusive with DATA, METADATA, and COW.
302 * Any node may have at most one filtered child at a time.
303 */
304 BDRV_CHILD_FILTERED = (1 << 2),
305
306 /*
307 * Child from which to read all data that isn’t allocated in the
308 * parent (i.e., the backing child); such data is copied to the
309 * parent through COW (and optionally COR).
310 * This field is mutually exclusive with DATA, METADATA, and
311 * FILTERED.
312 * Any node may have at most one such backing child at a time.
313 */
314 BDRV_CHILD_COW = (1 << 3),
315
316 /*
317 * The primary child. For most drivers, this is the child whose
318 * filename applies best to the parent node.
319 * Any node may have at most one primary child at a time.
320 */
321 BDRV_CHILD_PRIMARY = (1 << 4),
322
323 /* Useful combination of flags */
324 BDRV_CHILD_IMAGE = BDRV_CHILD_DATA
325 | BDRV_CHILD_METADATA
326 | BDRV_CHILD_PRIMARY,
327 };
328
329 /* Mask of BdrvChildRoleBits values */
330 typedef unsigned int BdrvChildRole;
331
332 char *bdrv_perm_names(uint64_t perm);
333 uint64_t bdrv_qapi_perm_to_blk_perm(BlockPermission qapi_perm);
334
335 /* disk I/O throttling */
336 void bdrv_init(void);
337 void bdrv_init_with_whitelist(void);
338 bool bdrv_uses_whitelist(void);
339 int bdrv_is_whitelisted(BlockDriver *drv, bool read_only);
340 BlockDriver *bdrv_find_protocol(const char *filename,
341 bool allow_protocol_prefix,
342 Error **errp);
343 BlockDriver *bdrv_find_format(const char *format_name);
344 int bdrv_create(BlockDriver *drv, const char* filename,
345 QemuOpts *opts, Error **errp);
346 int bdrv_create_file(const char *filename, QemuOpts *opts, Error **errp);
347
348 BlockDriverState *bdrv_new(void);
349 void bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top,
350 Error **errp);
351 void bdrv_replace_node(BlockDriverState *from, BlockDriverState *to,
352 Error **errp);
353
354 int bdrv_parse_aio(const char *mode, int *flags);
355 int bdrv_parse_cache_mode(const char *mode, int *flags, bool *writethrough);
356 int bdrv_parse_discard_flags(const char *mode, int *flags);
357 BdrvChild *bdrv_open_child(const char *filename,
358 QDict *options, const char *bdref_key,
359 BlockDriverState* parent,
360 const BdrvChildClass *child_class,
361 BdrvChildRole child_role,
362 bool allow_none, Error **errp);
363 BlockDriverState *bdrv_open_blockdev_ref(BlockdevRef *ref, Error **errp);
364 void bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd,
365 Error **errp);
366 int bdrv_open_backing_file(BlockDriverState *bs, QDict *parent_options,
367 const char *bdref_key, Error **errp);
368 BlockDriverState *bdrv_open(const char *filename, const char *reference,
369 QDict *options, int flags, Error **errp);
370 BlockDriverState *bdrv_new_open_driver(BlockDriver *drv, const char *node_name,
371 int flags, Error **errp);
372 BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue,
373 BlockDriverState *bs, QDict *options,
374 bool keep_old_opts);
375 int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp);
376 int bdrv_reopen_set_read_only(BlockDriverState *bs, bool read_only,
377 Error **errp);
378 int bdrv_reopen_prepare(BDRVReopenState *reopen_state,
379 BlockReopenQueue *queue, Error **errp);
380 void bdrv_reopen_commit(BDRVReopenState *reopen_state);
381 void bdrv_reopen_abort(BDRVReopenState *reopen_state);
382 int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset,
383 int bytes, BdrvRequestFlags flags);
384 int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags);
385 int bdrv_pread(BdrvChild *child, int64_t offset, void *buf, int bytes);
386 int bdrv_pwrite(BdrvChild *child, int64_t offset, const void *buf, int bytes);
387 int bdrv_pwrite_sync(BdrvChild *child, int64_t offset,
388 const void *buf, int count);
389 /*
390 * Efficiently zero a region of the disk image. Note that this is a regular
391 * I/O request like read or write and should have a reasonable size. This
392 * function is not suitable for zeroing the entire image in a single request
393 * because it may allocate memory for the entire region.
394 */
395 int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset,
396 int bytes, BdrvRequestFlags flags);
397 BlockDriverState *bdrv_find_backing_image(BlockDriverState *bs,
398 const char *backing_file);
399 void bdrv_refresh_filename(BlockDriverState *bs);
400
401 int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset, bool exact,
402 PreallocMode prealloc, BdrvRequestFlags flags,
403 Error **errp);
404 int generated_co_wrapper
405 bdrv_truncate(BdrvChild *child, int64_t offset, bool exact,
406 PreallocMode prealloc, BdrvRequestFlags flags, Error **errp);
407
408 int64_t bdrv_nb_sectors(BlockDriverState *bs);
409 int64_t bdrv_getlength(BlockDriverState *bs);
410 int64_t bdrv_get_allocated_file_size(BlockDriverState *bs);
411 BlockMeasureInfo *bdrv_measure(BlockDriver *drv, QemuOpts *opts,
412 BlockDriverState *in_bs, Error **errp);
413 void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr);
414 void bdrv_refresh_limits(BlockDriverState *bs, Error **errp);
415 int bdrv_commit(BlockDriverState *bs);
416 int bdrv_make_empty(BdrvChild *c, Error **errp);
417 int bdrv_change_backing_file(BlockDriverState *bs, const char *backing_file,
418 const char *backing_fmt, bool warn);
419 void bdrv_register(BlockDriver *bdrv);
420 int bdrv_drop_intermediate(BlockDriverState *top, BlockDriverState *base,
421 const char *backing_file_str);
422 BlockDriverState *bdrv_find_overlay(BlockDriverState *active,
423 BlockDriverState *bs);
424 BlockDriverState *bdrv_find_base(BlockDriverState *bs);
425 bool bdrv_is_backing_chain_frozen(BlockDriverState *bs, BlockDriverState *base,
426 Error **errp);
427 int bdrv_freeze_backing_chain(BlockDriverState *bs, BlockDriverState *base,
428 Error **errp);
429 void bdrv_unfreeze_backing_chain(BlockDriverState *bs, BlockDriverState *base);
430 int coroutine_fn bdrv_co_delete_file(BlockDriverState *bs, Error **errp);
431
432
433 typedef struct BdrvCheckResult {
434 int corruptions;
435 int leaks;
436 int check_errors;
437 int corruptions_fixed;
438 int leaks_fixed;
439 int64_t image_end_offset;
440 BlockFragInfo bfi;
441 } BdrvCheckResult;
442
443 typedef enum {
444 BDRV_FIX_LEAKS = 1,
445 BDRV_FIX_ERRORS = 2,
446 } BdrvCheckMode;
447
448 int generated_co_wrapper bdrv_check(BlockDriverState *bs, BdrvCheckResult *res,
449 BdrvCheckMode fix);
450
451 /* The units of offset and total_work_size may be chosen arbitrarily by the
452 * block driver; total_work_size may change during the course of the amendment
453 * operation */
454 typedef void BlockDriverAmendStatusCB(BlockDriverState *bs, int64_t offset,
455 int64_t total_work_size, void *opaque);
456 int bdrv_amend_options(BlockDriverState *bs_new, QemuOpts *opts,
457 BlockDriverAmendStatusCB *status_cb, void *cb_opaque,
458 bool force,
459 Error **errp);
460
461 /* check if a named node can be replaced when doing drive-mirror */
462 BlockDriverState *check_to_replace_node(BlockDriverState *parent_bs,
463 const char *node_name, Error **errp);
464
465 /* async block I/O */
466 void bdrv_aio_cancel(BlockAIOCB *acb);
467 void bdrv_aio_cancel_async(BlockAIOCB *acb);
468
469 /* sg packet commands */
470 int bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf);
471
472 /* Invalidate any cached metadata used by image formats */
473 int generated_co_wrapper bdrv_invalidate_cache(BlockDriverState *bs,
474 Error **errp);
475 void bdrv_invalidate_cache_all(Error **errp);
476 int bdrv_inactivate_all(void);
477
478 /* Ensure contents are flushed to disk. */
479 int generated_co_wrapper bdrv_flush(BlockDriverState *bs);
480 int coroutine_fn bdrv_co_flush(BlockDriverState *bs);
481 int bdrv_flush_all(void);
482 void bdrv_close_all(void);
483 void bdrv_drain(BlockDriverState *bs);
484 void coroutine_fn bdrv_co_drain(BlockDriverState *bs);
485 void bdrv_drain_all_begin(void);
486 void bdrv_drain_all_end(void);
487 void bdrv_drain_all(void);
488
489 #define BDRV_POLL_WHILE(bs, cond) ({ \
490 BlockDriverState *bs_ = (bs); \
491 AIO_WAIT_WHILE(bdrv_get_aio_context(bs_), \
492 cond); })
493
494 int generated_co_wrapper bdrv_pdiscard(BdrvChild *child, int64_t offset,
495 int64_t bytes);
496 int bdrv_co_pdiscard(BdrvChild *child, int64_t offset, int64_t bytes);
497 int bdrv_has_zero_init_1(BlockDriverState *bs);
498 int bdrv_has_zero_init(BlockDriverState *bs);
499 bool bdrv_can_write_zeroes_with_unmap(BlockDriverState *bs);
500 int bdrv_block_status(BlockDriverState *bs, int64_t offset,
501 int64_t bytes, int64_t *pnum, int64_t *map,
502 BlockDriverState **file);
503 int bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base,
504 int64_t offset, int64_t bytes, int64_t *pnum,
505 int64_t *map, BlockDriverState **file);
506 int bdrv_is_allocated(BlockDriverState *bs, int64_t offset, int64_t bytes,
507 int64_t *pnum);
508 int bdrv_is_allocated_above(BlockDriverState *top, BlockDriverState *base,
509 bool include_base, int64_t offset, int64_t bytes,
510 int64_t *pnum);
511
512 bool bdrv_is_read_only(BlockDriverState *bs);
513 int bdrv_can_set_read_only(BlockDriverState *bs, bool read_only,
514 bool ignore_allow_rdw, Error **errp);
515 int bdrv_apply_auto_read_only(BlockDriverState *bs, const char *errmsg,
516 Error **errp);
517 bool bdrv_is_writable(BlockDriverState *bs);
518 bool bdrv_is_sg(BlockDriverState *bs);
519 bool bdrv_is_inserted(BlockDriverState *bs);
520 void bdrv_lock_medium(BlockDriverState *bs, bool locked);
521 void bdrv_eject(BlockDriverState *bs, bool eject_flag);
522 const char *bdrv_get_format_name(BlockDriverState *bs);
523 BlockDriverState *bdrv_find_node(const char *node_name);
524 BlockDeviceInfoList *bdrv_named_nodes_list(bool flat, Error **errp);
525 XDbgBlockGraph *bdrv_get_xdbg_block_graph(Error **errp);
526 BlockDriverState *bdrv_lookup_bs(const char *device,
527 const char *node_name,
528 Error **errp);
529 bool bdrv_chain_contains(BlockDriverState *top, BlockDriverState *base);
530 BlockDriverState *bdrv_next_node(BlockDriverState *bs);
531 BlockDriverState *bdrv_next_all_states(BlockDriverState *bs);
532
533 typedef struct BdrvNextIterator {
534 enum {
535 BDRV_NEXT_BACKEND_ROOTS,
536 BDRV_NEXT_MONITOR_OWNED,
537 } phase;
538 BlockBackend *blk;
539 BlockDriverState *bs;
540 } BdrvNextIterator;
541
542 BlockDriverState *bdrv_first(BdrvNextIterator *it);
543 BlockDriverState *bdrv_next(BdrvNextIterator *it);
544 void bdrv_next_cleanup(BdrvNextIterator *it);
545
546 BlockDriverState *bdrv_next_monitor_owned(BlockDriverState *bs);
547 bool bdrv_supports_compressed_writes(BlockDriverState *bs);
548 void bdrv_iterate_format(void (*it)(void *opaque, const char *name),
549 void *opaque, bool read_only);
550 const char *bdrv_get_node_name(const BlockDriverState *bs);
551 const char *bdrv_get_device_name(const BlockDriverState *bs);
552 const char *bdrv_get_device_or_node_name(const BlockDriverState *bs);
553 int bdrv_get_flags(BlockDriverState *bs);
554 int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi);
555 ImageInfoSpecific *bdrv_get_specific_info(BlockDriverState *bs,
556 Error **errp);
557 BlockStatsSpecific *bdrv_get_specific_stats(BlockDriverState *bs);
558 void bdrv_round_to_clusters(BlockDriverState *bs,
559 int64_t offset, int64_t bytes,
560 int64_t *cluster_offset,
561 int64_t *cluster_bytes);
562
563 void bdrv_get_backing_filename(BlockDriverState *bs,
564 char *filename, int filename_size);
565 char *bdrv_get_full_backing_filename(BlockDriverState *bs, Error **errp);
566 char *bdrv_get_full_backing_filename_from_filename(const char *backed,
567 const char *backing,
568 Error **errp);
569 char *bdrv_dirname(BlockDriverState *bs, Error **errp);
570
571 int path_has_protocol(const char *path);
572 int path_is_absolute(const char *path);
573 char *path_combine(const char *base_path, const char *filename);
574
575 int generated_co_wrapper
576 bdrv_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos);
577 int generated_co_wrapper
578 bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos);
579 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
580 int64_t pos, int size);
581
582 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
583 int64_t pos, int size);
584
585 void bdrv_img_create(const char *filename, const char *fmt,
586 const char *base_filename, const char *base_fmt,
587 char *options, uint64_t img_size, int flags,
588 bool quiet, Error **errp);
589
590 /* Returns the alignment in bytes that is required so that no bounce buffer
591 * is required throughout the stack */
592 size_t bdrv_min_mem_align(BlockDriverState *bs);
593 /* Returns optimal alignment in bytes for bounce buffer */
594 size_t bdrv_opt_mem_align(BlockDriverState *bs);
595 void *qemu_blockalign(BlockDriverState *bs, size_t size);
596 void *qemu_blockalign0(BlockDriverState *bs, size_t size);
597 void *qemu_try_blockalign(BlockDriverState *bs, size_t size);
598 void *qemu_try_blockalign0(BlockDriverState *bs, size_t size);
599 bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov);
600
601 void bdrv_enable_copy_on_read(BlockDriverState *bs);
602 void bdrv_disable_copy_on_read(BlockDriverState *bs);
603
604 void bdrv_ref(BlockDriverState *bs);
605 void bdrv_unref(BlockDriverState *bs);
606 void bdrv_unref_child(BlockDriverState *parent, BdrvChild *child);
607 BdrvChild *bdrv_attach_child(BlockDriverState *parent_bs,
608 BlockDriverState *child_bs,
609 const char *child_name,
610 const BdrvChildClass *child_class,
611 BdrvChildRole child_role,
612 Error **errp);
613
614 bool bdrv_op_is_blocked(BlockDriverState *bs, BlockOpType op, Error **errp);
615 void bdrv_op_block(BlockDriverState *bs, BlockOpType op, Error *reason);
616 void bdrv_op_unblock(BlockDriverState *bs, BlockOpType op, Error *reason);
617 void bdrv_op_block_all(BlockDriverState *bs, Error *reason);
618 void bdrv_op_unblock_all(BlockDriverState *bs, Error *reason);
619 bool bdrv_op_blocker_is_empty(BlockDriverState *bs);
620
621 #define BLKDBG_EVENT(child, evt) \
622 do { \
623 if (child) { \
624 bdrv_debug_event(child->bs, evt); \
625 } \
626 } while (0)
627
628 void bdrv_debug_event(BlockDriverState *bs, BlkdebugEvent event);
629
630 int bdrv_debug_breakpoint(BlockDriverState *bs, const char *event,
631 const char *tag);
632 int bdrv_debug_remove_breakpoint(BlockDriverState *bs, const char *tag);
633 int bdrv_debug_resume(BlockDriverState *bs, const char *tag);
634 bool bdrv_debug_is_suspended(BlockDriverState *bs, const char *tag);
635
636 /**
637 * bdrv_get_aio_context:
638 *
639 * Returns: the currently bound #AioContext
640 */
641 AioContext *bdrv_get_aio_context(BlockDriverState *bs);
642
643 /**
644 * Transfer control to @co in the aio context of @bs
645 */
646 void bdrv_coroutine_enter(BlockDriverState *bs, Coroutine *co);
647
648 void bdrv_set_aio_context_ignore(BlockDriverState *bs,
649 AioContext *new_context, GSList **ignore);
650 int bdrv_try_set_aio_context(BlockDriverState *bs, AioContext *ctx,
651 Error **errp);
652 int bdrv_child_try_set_aio_context(BlockDriverState *bs, AioContext *ctx,
653 BdrvChild *ignore_child, Error **errp);
654 bool bdrv_child_can_set_aio_context(BdrvChild *c, AioContext *ctx,
655 GSList **ignore, Error **errp);
656 bool bdrv_can_set_aio_context(BlockDriverState *bs, AioContext *ctx,
657 GSList **ignore, Error **errp);
658 int bdrv_probe_blocksizes(BlockDriverState *bs, BlockSizes *bsz);
659 int bdrv_probe_geometry(BlockDriverState *bs, HDGeometry *geo);
660
661 void bdrv_io_plug(BlockDriverState *bs);
662 void bdrv_io_unplug(BlockDriverState *bs);
663
664 /**
665 * bdrv_parent_drained_begin_single:
666 *
667 * Begin a quiesced section for the parent of @c. If @poll is true, wait for
668 * any pending activity to cease.
669 */
670 void bdrv_parent_drained_begin_single(BdrvChild *c, bool poll);
671
672 /**
673 * bdrv_parent_drained_end_single:
674 *
675 * End a quiesced section for the parent of @c.
676 *
677 * This polls @bs's AioContext until all scheduled sub-drained_ends
678 * have settled, which may result in graph changes.
679 */
680 void bdrv_parent_drained_end_single(BdrvChild *c);
681
682 /**
683 * bdrv_drain_poll:
684 *
685 * Poll for pending requests in @bs, its parents (except for @ignore_parent),
686 * and if @recursive is true its children as well (used for subtree drain).
687 *
688 * If @ignore_bds_parents is true, parents that are BlockDriverStates must
689 * ignore the drain request because they will be drained separately (used for
690 * drain_all).
691 *
692 * This is part of bdrv_drained_begin.
693 */
694 bool bdrv_drain_poll(BlockDriverState *bs, bool recursive,
695 BdrvChild *ignore_parent, bool ignore_bds_parents);
696
697 /**
698 * bdrv_drained_begin:
699 *
700 * Begin a quiesced section for exclusive access to the BDS, by disabling
701 * external request sources including NBD server and device model. Note that
702 * this doesn't block timers or coroutines from submitting more requests, which
703 * means block_job_pause is still necessary.
704 *
705 * This function can be recursive.
706 */
707 void bdrv_drained_begin(BlockDriverState *bs);
708
709 /**
710 * bdrv_do_drained_begin_quiesce:
711 *
712 * Quiesces a BDS like bdrv_drained_begin(), but does not wait for already
713 * running requests to complete.
714 */
715 void bdrv_do_drained_begin_quiesce(BlockDriverState *bs,
716 BdrvChild *parent, bool ignore_bds_parents);
717
718 /**
719 * Like bdrv_drained_begin, but recursively begins a quiesced section for
720 * exclusive access to all child nodes as well.
721 */
722 void bdrv_subtree_drained_begin(BlockDriverState *bs);
723
724 /**
725 * bdrv_drained_end:
726 *
727 * End a quiescent section started by bdrv_drained_begin().
728 *
729 * This polls @bs's AioContext until all scheduled sub-drained_ends
730 * have settled. On one hand, that may result in graph changes. On
731 * the other, this requires that the caller either runs in the main
732 * loop; or that all involved nodes (@bs and all of its parents) are
733 * in the caller's AioContext.
734 */
735 void bdrv_drained_end(BlockDriverState *bs);
736
737 /**
738 * bdrv_drained_end_no_poll:
739 *
740 * Same as bdrv_drained_end(), but do not poll for the subgraph to
741 * actually become unquiesced. Therefore, no graph changes will occur
742 * with this function.
743 *
744 * *drained_end_counter is incremented for every background operation
745 * that is scheduled, and will be decremented for every operation once
746 * it settles. The caller must poll until it reaches 0. The counter
747 * should be accessed using atomic operations only.
748 */
749 void bdrv_drained_end_no_poll(BlockDriverState *bs, int *drained_end_counter);
750
751 /**
752 * End a quiescent section started by bdrv_subtree_drained_begin().
753 */
754 void bdrv_subtree_drained_end(BlockDriverState *bs);
755
756 void bdrv_add_child(BlockDriverState *parent, BlockDriverState *child,
757 Error **errp);
758 void bdrv_del_child(BlockDriverState *parent, BdrvChild *child, Error **errp);
759
760 bool bdrv_can_store_new_dirty_bitmap(BlockDriverState *bs, const char *name,
761 uint32_t granularity, Error **errp);
762 /**
763 *
764 * bdrv_register_buf/bdrv_unregister_buf:
765 *
766 * Register/unregister a buffer for I/O. For example, VFIO drivers are
767 * interested to know the memory areas that would later be used for I/O, so
768 * that they can prepare IOMMU mapping etc., to get better performance.
769 */
770 void bdrv_register_buf(BlockDriverState *bs, void *host, size_t size);
771 void bdrv_unregister_buf(BlockDriverState *bs, void *host);
772
773 /**
774 *
775 * bdrv_co_copy_range:
776 *
777 * Do offloaded copy between two children. If the operation is not implemented
778 * by the driver, or if the backend storage doesn't support it, a negative
779 * error code will be returned.
780 *
781 * Note: block layer doesn't emulate or fallback to a bounce buffer approach
782 * because usually the caller shouldn't attempt offloaded copy any more (e.g.
783 * calling copy_file_range(2)) after the first error, thus it should fall back
784 * to a read+write path in the caller level.
785 *
786 * @src: Source child to copy data from
787 * @src_offset: offset in @src image to read data
788 * @dst: Destination child to copy data to
789 * @dst_offset: offset in @dst image to write data
790 * @bytes: number of bytes to copy
791 * @flags: request flags. Supported flags:
792 * BDRV_REQ_ZERO_WRITE - treat the @src range as zero data and do zero
793 * write on @dst as if bdrv_co_pwrite_zeroes is
794 * called. Used to simplify caller code, or
795 * during BlockDriver.bdrv_co_copy_range_from()
796 * recursion.
797 * BDRV_REQ_NO_SERIALISING - do not serialize with other overlapping
798 * requests currently in flight.
799 *
800 * Returns: 0 if succeeded; negative error code if failed.
801 **/
802 int coroutine_fn bdrv_co_copy_range(BdrvChild *src, uint64_t src_offset,
803 BdrvChild *dst, uint64_t dst_offset,
804 uint64_t bytes, BdrvRequestFlags read_flags,
805 BdrvRequestFlags write_flags);
806 #endif