Merge tag 'pull-for-6.2-291121-1' of https://github.com/stsquad/qemu into staging
[qemu.git] / tests / unit / test-block-iothread.c
1 /*
2 * Block tests for iothreads
3 *
4 * Copyright (c) 2018 Kevin Wolf <kwolf@redhat.com>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25 #include "qemu/osdep.h"
26 #include "block/block.h"
27 #include "block/blockjob_int.h"
28 #include "sysemu/block-backend.h"
29 #include "qapi/error.h"
30 #include "qapi/qmp/qdict.h"
31 #include "qemu/main-loop.h"
32 #include "iothread.h"
33
34 static int coroutine_fn bdrv_test_co_prwv(BlockDriverState *bs,
35 uint64_t offset, uint64_t bytes,
36 QEMUIOVector *qiov, int flags)
37 {
38 return 0;
39 }
40
41 static int coroutine_fn bdrv_test_co_pdiscard(BlockDriverState *bs,
42 int64_t offset, int bytes)
43 {
44 return 0;
45 }
46
47 static int coroutine_fn
48 bdrv_test_co_truncate(BlockDriverState *bs, int64_t offset, bool exact,
49 PreallocMode prealloc, BdrvRequestFlags flags,
50 Error **errp)
51 {
52 return 0;
53 }
54
55 static int coroutine_fn bdrv_test_co_block_status(BlockDriverState *bs,
56 bool want_zero,
57 int64_t offset, int64_t count,
58 int64_t *pnum, int64_t *map,
59 BlockDriverState **file)
60 {
61 *pnum = count;
62 return 0;
63 }
64
65 static BlockDriver bdrv_test = {
66 .format_name = "test",
67 .instance_size = 1,
68
69 .bdrv_co_preadv = bdrv_test_co_prwv,
70 .bdrv_co_pwritev = bdrv_test_co_prwv,
71 .bdrv_co_pdiscard = bdrv_test_co_pdiscard,
72 .bdrv_co_truncate = bdrv_test_co_truncate,
73 .bdrv_co_block_status = bdrv_test_co_block_status,
74 };
75
76 static void test_sync_op_pread(BdrvChild *c)
77 {
78 uint8_t buf[512];
79 int ret;
80
81 /* Success */
82 ret = bdrv_pread(c, 0, buf, sizeof(buf));
83 g_assert_cmpint(ret, ==, 512);
84
85 /* Early error: Negative offset */
86 ret = bdrv_pread(c, -2, buf, sizeof(buf));
87 g_assert_cmpint(ret, ==, -EIO);
88 }
89
90 static void test_sync_op_pwrite(BdrvChild *c)
91 {
92 uint8_t buf[512] = { 0 };
93 int ret;
94
95 /* Success */
96 ret = bdrv_pwrite(c, 0, buf, sizeof(buf));
97 g_assert_cmpint(ret, ==, 512);
98
99 /* Early error: Negative offset */
100 ret = bdrv_pwrite(c, -2, buf, sizeof(buf));
101 g_assert_cmpint(ret, ==, -EIO);
102 }
103
104 static void test_sync_op_blk_pread(BlockBackend *blk)
105 {
106 uint8_t buf[512];
107 int ret;
108
109 /* Success */
110 ret = blk_pread(blk, 0, buf, sizeof(buf));
111 g_assert_cmpint(ret, ==, 512);
112
113 /* Early error: Negative offset */
114 ret = blk_pread(blk, -2, buf, sizeof(buf));
115 g_assert_cmpint(ret, ==, -EIO);
116 }
117
118 static void test_sync_op_blk_pwrite(BlockBackend *blk)
119 {
120 uint8_t buf[512] = { 0 };
121 int ret;
122
123 /* Success */
124 ret = blk_pwrite(blk, 0, buf, sizeof(buf), 0);
125 g_assert_cmpint(ret, ==, 512);
126
127 /* Early error: Negative offset */
128 ret = blk_pwrite(blk, -2, buf, sizeof(buf), 0);
129 g_assert_cmpint(ret, ==, -EIO);
130 }
131
132 static void test_sync_op_load_vmstate(BdrvChild *c)
133 {
134 uint8_t buf[512];
135 int ret;
136
137 /* Error: Driver does not support snapshots */
138 ret = bdrv_load_vmstate(c->bs, buf, 0, sizeof(buf));
139 g_assert_cmpint(ret, ==, -ENOTSUP);
140 }
141
142 static void test_sync_op_save_vmstate(BdrvChild *c)
143 {
144 uint8_t buf[512] = { 0 };
145 int ret;
146
147 /* Error: Driver does not support snapshots */
148 ret = bdrv_save_vmstate(c->bs, buf, 0, sizeof(buf));
149 g_assert_cmpint(ret, ==, -ENOTSUP);
150 }
151
152 static void test_sync_op_pdiscard(BdrvChild *c)
153 {
154 int ret;
155
156 /* Normal success path */
157 c->bs->open_flags |= BDRV_O_UNMAP;
158 ret = bdrv_pdiscard(c, 0, 512);
159 g_assert_cmpint(ret, ==, 0);
160
161 /* Early success: UNMAP not supported */
162 c->bs->open_flags &= ~BDRV_O_UNMAP;
163 ret = bdrv_pdiscard(c, 0, 512);
164 g_assert_cmpint(ret, ==, 0);
165
166 /* Early error: Negative offset */
167 ret = bdrv_pdiscard(c, -2, 512);
168 g_assert_cmpint(ret, ==, -EIO);
169 }
170
171 static void test_sync_op_blk_pdiscard(BlockBackend *blk)
172 {
173 int ret;
174
175 /* Early success: UNMAP not supported */
176 ret = blk_pdiscard(blk, 0, 512);
177 g_assert_cmpint(ret, ==, 0);
178
179 /* Early error: Negative offset */
180 ret = blk_pdiscard(blk, -2, 512);
181 g_assert_cmpint(ret, ==, -EIO);
182 }
183
184 static void test_sync_op_truncate(BdrvChild *c)
185 {
186 int ret;
187
188 /* Normal success path */
189 ret = bdrv_truncate(c, 65536, false, PREALLOC_MODE_OFF, 0, NULL);
190 g_assert_cmpint(ret, ==, 0);
191
192 /* Early error: Negative offset */
193 ret = bdrv_truncate(c, -2, false, PREALLOC_MODE_OFF, 0, NULL);
194 g_assert_cmpint(ret, ==, -EINVAL);
195
196 /* Error: Read-only image */
197 c->bs->open_flags &= ~BDRV_O_RDWR;
198
199 ret = bdrv_truncate(c, 65536, false, PREALLOC_MODE_OFF, 0, NULL);
200 g_assert_cmpint(ret, ==, -EACCES);
201
202 c->bs->open_flags |= BDRV_O_RDWR;
203 }
204
205 static void test_sync_op_block_status(BdrvChild *c)
206 {
207 int ret;
208 int64_t n;
209
210 /* Normal success path */
211 ret = bdrv_is_allocated(c->bs, 0, 65536, &n);
212 g_assert_cmpint(ret, ==, 0);
213
214 /* Early success: No driver support */
215 bdrv_test.bdrv_co_block_status = NULL;
216 ret = bdrv_is_allocated(c->bs, 0, 65536, &n);
217 g_assert_cmpint(ret, ==, 1);
218
219 /* Early success: bytes = 0 */
220 ret = bdrv_is_allocated(c->bs, 0, 0, &n);
221 g_assert_cmpint(ret, ==, 0);
222
223 /* Early success: Offset > image size*/
224 ret = bdrv_is_allocated(c->bs, 0x1000000, 0x1000000, &n);
225 g_assert_cmpint(ret, ==, 0);
226 }
227
228 static void test_sync_op_flush(BdrvChild *c)
229 {
230 int ret;
231
232 /* Normal success path */
233 ret = bdrv_flush(c->bs);
234 g_assert_cmpint(ret, ==, 0);
235
236 /* Early success: Read-only image */
237 c->bs->open_flags &= ~BDRV_O_RDWR;
238
239 ret = bdrv_flush(c->bs);
240 g_assert_cmpint(ret, ==, 0);
241
242 c->bs->open_flags |= BDRV_O_RDWR;
243 }
244
245 static void test_sync_op_blk_flush(BlockBackend *blk)
246 {
247 BlockDriverState *bs = blk_bs(blk);
248 int ret;
249
250 /* Normal success path */
251 ret = blk_flush(blk);
252 g_assert_cmpint(ret, ==, 0);
253
254 /* Early success: Read-only image */
255 bs->open_flags &= ~BDRV_O_RDWR;
256
257 ret = blk_flush(blk);
258 g_assert_cmpint(ret, ==, 0);
259
260 bs->open_flags |= BDRV_O_RDWR;
261 }
262
263 static void test_sync_op_check(BdrvChild *c)
264 {
265 BdrvCheckResult result;
266 int ret;
267
268 /* Error: Driver does not implement check */
269 ret = bdrv_check(c->bs, &result, 0);
270 g_assert_cmpint(ret, ==, -ENOTSUP);
271 }
272
273 static void test_sync_op_invalidate_cache(BdrvChild *c)
274 {
275 /* Early success: Image is not inactive */
276 bdrv_invalidate_cache(c->bs, NULL);
277 }
278
279
280 typedef struct SyncOpTest {
281 const char *name;
282 void (*fn)(BdrvChild *c);
283 void (*blkfn)(BlockBackend *blk);
284 } SyncOpTest;
285
286 const SyncOpTest sync_op_tests[] = {
287 {
288 .name = "/sync-op/pread",
289 .fn = test_sync_op_pread,
290 .blkfn = test_sync_op_blk_pread,
291 }, {
292 .name = "/sync-op/pwrite",
293 .fn = test_sync_op_pwrite,
294 .blkfn = test_sync_op_blk_pwrite,
295 }, {
296 .name = "/sync-op/load_vmstate",
297 .fn = test_sync_op_load_vmstate,
298 }, {
299 .name = "/sync-op/save_vmstate",
300 .fn = test_sync_op_save_vmstate,
301 }, {
302 .name = "/sync-op/pdiscard",
303 .fn = test_sync_op_pdiscard,
304 .blkfn = test_sync_op_blk_pdiscard,
305 }, {
306 .name = "/sync-op/truncate",
307 .fn = test_sync_op_truncate,
308 }, {
309 .name = "/sync-op/block_status",
310 .fn = test_sync_op_block_status,
311 }, {
312 .name = "/sync-op/flush",
313 .fn = test_sync_op_flush,
314 .blkfn = test_sync_op_blk_flush,
315 }, {
316 .name = "/sync-op/check",
317 .fn = test_sync_op_check,
318 }, {
319 .name = "/sync-op/invalidate_cache",
320 .fn = test_sync_op_invalidate_cache,
321 },
322 };
323
324 /* Test synchronous operations that run in a different iothread, so we have to
325 * poll for the coroutine there to return. */
326 static void test_sync_op(const void *opaque)
327 {
328 const SyncOpTest *t = opaque;
329 IOThread *iothread = iothread_new();
330 AioContext *ctx = iothread_get_aio_context(iothread);
331 BlockBackend *blk;
332 BlockDriverState *bs;
333 BdrvChild *c;
334
335 blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
336 bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort);
337 bs->total_sectors = 65536 / BDRV_SECTOR_SIZE;
338 blk_insert_bs(blk, bs, &error_abort);
339 c = QLIST_FIRST(&bs->parents);
340
341 blk_set_aio_context(blk, ctx, &error_abort);
342 aio_context_acquire(ctx);
343 t->fn(c);
344 if (t->blkfn) {
345 t->blkfn(blk);
346 }
347 blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
348 aio_context_release(ctx);
349
350 bdrv_unref(bs);
351 blk_unref(blk);
352 }
353
354 typedef struct TestBlockJob {
355 BlockJob common;
356 bool should_complete;
357 int n;
358 } TestBlockJob;
359
360 static int test_job_prepare(Job *job)
361 {
362 g_assert(qemu_get_current_aio_context() == qemu_get_aio_context());
363 return 0;
364 }
365
366 static int coroutine_fn test_job_run(Job *job, Error **errp)
367 {
368 TestBlockJob *s = container_of(job, TestBlockJob, common.job);
369
370 job_transition_to_ready(&s->common.job);
371 while (!s->should_complete) {
372 s->n++;
373 g_assert(qemu_get_current_aio_context() == job->aio_context);
374
375 /* Avoid job_sleep_ns() because it marks the job as !busy. We want to
376 * emulate some actual activity (probably some I/O) here so that the
377 * drain involved in AioContext switches has to wait for this activity
378 * to stop. */
379 qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 1000000);
380
381 job_pause_point(&s->common.job);
382 }
383
384 g_assert(qemu_get_current_aio_context() == job->aio_context);
385 return 0;
386 }
387
388 static void test_job_complete(Job *job, Error **errp)
389 {
390 TestBlockJob *s = container_of(job, TestBlockJob, common.job);
391 s->should_complete = true;
392 }
393
394 BlockJobDriver test_job_driver = {
395 .job_driver = {
396 .instance_size = sizeof(TestBlockJob),
397 .free = block_job_free,
398 .user_resume = block_job_user_resume,
399 .run = test_job_run,
400 .complete = test_job_complete,
401 .prepare = test_job_prepare,
402 },
403 };
404
405 static void test_attach_blockjob(void)
406 {
407 IOThread *iothread = iothread_new();
408 AioContext *ctx = iothread_get_aio_context(iothread);
409 BlockBackend *blk;
410 BlockDriverState *bs;
411 TestBlockJob *tjob;
412
413 blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
414 bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort);
415 blk_insert_bs(blk, bs, &error_abort);
416
417 tjob = block_job_create("job0", &test_job_driver, NULL, bs,
418 0, BLK_PERM_ALL,
419 0, 0, NULL, NULL, &error_abort);
420 job_start(&tjob->common.job);
421
422 while (tjob->n == 0) {
423 aio_poll(qemu_get_aio_context(), false);
424 }
425
426 blk_set_aio_context(blk, ctx, &error_abort);
427
428 tjob->n = 0;
429 while (tjob->n == 0) {
430 aio_poll(qemu_get_aio_context(), false);
431 }
432
433 aio_context_acquire(ctx);
434 blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
435 aio_context_release(ctx);
436
437 tjob->n = 0;
438 while (tjob->n == 0) {
439 aio_poll(qemu_get_aio_context(), false);
440 }
441
442 blk_set_aio_context(blk, ctx, &error_abort);
443
444 tjob->n = 0;
445 while (tjob->n == 0) {
446 aio_poll(qemu_get_aio_context(), false);
447 }
448
449 aio_context_acquire(ctx);
450 job_complete_sync(&tjob->common.job, &error_abort);
451 blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
452 aio_context_release(ctx);
453
454 bdrv_unref(bs);
455 blk_unref(blk);
456 }
457
458 /*
459 * Test that changing the AioContext for one node in a tree (here through blk)
460 * changes all other nodes as well:
461 *
462 * blk
463 * |
464 * | bs_verify [blkverify]
465 * | / \
466 * | / \
467 * bs_a [bdrv_test] bs_b [bdrv_test]
468 *
469 */
470 static void test_propagate_basic(void)
471 {
472 IOThread *iothread = iothread_new();
473 AioContext *ctx = iothread_get_aio_context(iothread);
474 AioContext *main_ctx;
475 BlockBackend *blk;
476 BlockDriverState *bs_a, *bs_b, *bs_verify;
477 QDict *options;
478
479 /*
480 * Create bs_a and its BlockBackend. We cannot take the RESIZE
481 * permission because blkverify will not share it on the test
482 * image.
483 */
484 blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL & ~BLK_PERM_RESIZE,
485 BLK_PERM_ALL);
486 bs_a = bdrv_new_open_driver(&bdrv_test, "bs_a", BDRV_O_RDWR, &error_abort);
487 blk_insert_bs(blk, bs_a, &error_abort);
488
489 /* Create bs_b */
490 bs_b = bdrv_new_open_driver(&bdrv_test, "bs_b", BDRV_O_RDWR, &error_abort);
491
492 /* Create blkverify filter that references both bs_a and bs_b */
493 options = qdict_new();
494 qdict_put_str(options, "driver", "blkverify");
495 qdict_put_str(options, "test", "bs_a");
496 qdict_put_str(options, "raw", "bs_b");
497
498 bs_verify = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
499
500 /* Switch the AioContext */
501 blk_set_aio_context(blk, ctx, &error_abort);
502 g_assert(blk_get_aio_context(blk) == ctx);
503 g_assert(bdrv_get_aio_context(bs_a) == ctx);
504 g_assert(bdrv_get_aio_context(bs_verify) == ctx);
505 g_assert(bdrv_get_aio_context(bs_b) == ctx);
506
507 /* Switch the AioContext back */
508 main_ctx = qemu_get_aio_context();
509 aio_context_acquire(ctx);
510 blk_set_aio_context(blk, main_ctx, &error_abort);
511 aio_context_release(ctx);
512 g_assert(blk_get_aio_context(blk) == main_ctx);
513 g_assert(bdrv_get_aio_context(bs_a) == main_ctx);
514 g_assert(bdrv_get_aio_context(bs_verify) == main_ctx);
515 g_assert(bdrv_get_aio_context(bs_b) == main_ctx);
516
517 bdrv_unref(bs_verify);
518 bdrv_unref(bs_b);
519 bdrv_unref(bs_a);
520 blk_unref(blk);
521 }
522
523 /*
524 * Test that diamonds in the graph don't lead to endless recursion:
525 *
526 * blk
527 * |
528 * bs_verify [blkverify]
529 * / \
530 * / \
531 * bs_b [raw] bs_c[raw]
532 * \ /
533 * \ /
534 * bs_a [bdrv_test]
535 */
536 static void test_propagate_diamond(void)
537 {
538 IOThread *iothread = iothread_new();
539 AioContext *ctx = iothread_get_aio_context(iothread);
540 AioContext *main_ctx;
541 BlockBackend *blk;
542 BlockDriverState *bs_a, *bs_b, *bs_c, *bs_verify;
543 QDict *options;
544
545 /* Create bs_a */
546 bs_a = bdrv_new_open_driver(&bdrv_test, "bs_a", BDRV_O_RDWR, &error_abort);
547
548 /* Create bs_b and bc_c */
549 options = qdict_new();
550 qdict_put_str(options, "driver", "raw");
551 qdict_put_str(options, "file", "bs_a");
552 qdict_put_str(options, "node-name", "bs_b");
553 bs_b = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
554
555 options = qdict_new();
556 qdict_put_str(options, "driver", "raw");
557 qdict_put_str(options, "file", "bs_a");
558 qdict_put_str(options, "node-name", "bs_c");
559 bs_c = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
560
561 /* Create blkverify filter that references both bs_b and bs_c */
562 options = qdict_new();
563 qdict_put_str(options, "driver", "blkverify");
564 qdict_put_str(options, "test", "bs_b");
565 qdict_put_str(options, "raw", "bs_c");
566
567 bs_verify = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
568 /*
569 * Do not take the RESIZE permission: This would require the same
570 * from bs_c and thus from bs_a; however, blkverify will not share
571 * it on bs_b, and thus it will not be available for bs_a.
572 */
573 blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL & ~BLK_PERM_RESIZE,
574 BLK_PERM_ALL);
575 blk_insert_bs(blk, bs_verify, &error_abort);
576
577 /* Switch the AioContext */
578 blk_set_aio_context(blk, ctx, &error_abort);
579 g_assert(blk_get_aio_context(blk) == ctx);
580 g_assert(bdrv_get_aio_context(bs_verify) == ctx);
581 g_assert(bdrv_get_aio_context(bs_a) == ctx);
582 g_assert(bdrv_get_aio_context(bs_b) == ctx);
583 g_assert(bdrv_get_aio_context(bs_c) == ctx);
584
585 /* Switch the AioContext back */
586 main_ctx = qemu_get_aio_context();
587 aio_context_acquire(ctx);
588 blk_set_aio_context(blk, main_ctx, &error_abort);
589 aio_context_release(ctx);
590 g_assert(blk_get_aio_context(blk) == main_ctx);
591 g_assert(bdrv_get_aio_context(bs_verify) == main_ctx);
592 g_assert(bdrv_get_aio_context(bs_a) == main_ctx);
593 g_assert(bdrv_get_aio_context(bs_b) == main_ctx);
594 g_assert(bdrv_get_aio_context(bs_c) == main_ctx);
595
596 blk_unref(blk);
597 bdrv_unref(bs_verify);
598 bdrv_unref(bs_c);
599 bdrv_unref(bs_b);
600 bdrv_unref(bs_a);
601 }
602
603 static void test_propagate_mirror(void)
604 {
605 IOThread *iothread = iothread_new();
606 AioContext *ctx = iothread_get_aio_context(iothread);
607 AioContext *main_ctx = qemu_get_aio_context();
608 BlockDriverState *src, *target, *filter;
609 BlockBackend *blk;
610 Job *job;
611 Error *local_err = NULL;
612
613 /* Create src and target*/
614 src = bdrv_new_open_driver(&bdrv_test, "src", BDRV_O_RDWR, &error_abort);
615 target = bdrv_new_open_driver(&bdrv_test, "target", BDRV_O_RDWR,
616 &error_abort);
617
618 /* Start a mirror job */
619 mirror_start("job0", src, target, NULL, JOB_DEFAULT, 0, 0, 0,
620 MIRROR_SYNC_MODE_NONE, MIRROR_OPEN_BACKING_CHAIN, false,
621 BLOCKDEV_ON_ERROR_REPORT, BLOCKDEV_ON_ERROR_REPORT,
622 false, "filter_node", MIRROR_COPY_MODE_BACKGROUND,
623 &error_abort);
624 job = job_get("job0");
625 filter = bdrv_find_node("filter_node");
626
627 /* Change the AioContext of src */
628 bdrv_try_set_aio_context(src, ctx, &error_abort);
629 g_assert(bdrv_get_aio_context(src) == ctx);
630 g_assert(bdrv_get_aio_context(target) == ctx);
631 g_assert(bdrv_get_aio_context(filter) == ctx);
632 g_assert(job->aio_context == ctx);
633
634 /* Change the AioContext of target */
635 aio_context_acquire(ctx);
636 bdrv_try_set_aio_context(target, main_ctx, &error_abort);
637 aio_context_release(ctx);
638 g_assert(bdrv_get_aio_context(src) == main_ctx);
639 g_assert(bdrv_get_aio_context(target) == main_ctx);
640 g_assert(bdrv_get_aio_context(filter) == main_ctx);
641
642 /* With a BlockBackend on src, changing target must fail */
643 blk = blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL);
644 blk_insert_bs(blk, src, &error_abort);
645
646 bdrv_try_set_aio_context(target, ctx, &local_err);
647 error_free_or_abort(&local_err);
648
649 g_assert(blk_get_aio_context(blk) == main_ctx);
650 g_assert(bdrv_get_aio_context(src) == main_ctx);
651 g_assert(bdrv_get_aio_context(target) == main_ctx);
652 g_assert(bdrv_get_aio_context(filter) == main_ctx);
653
654 /* ...unless we explicitly allow it */
655 aio_context_acquire(ctx);
656 blk_set_allow_aio_context_change(blk, true);
657 bdrv_try_set_aio_context(target, ctx, &error_abort);
658 aio_context_release(ctx);
659
660 g_assert(blk_get_aio_context(blk) == ctx);
661 g_assert(bdrv_get_aio_context(src) == ctx);
662 g_assert(bdrv_get_aio_context(target) == ctx);
663 g_assert(bdrv_get_aio_context(filter) == ctx);
664
665 job_cancel_sync_all();
666
667 aio_context_acquire(ctx);
668 blk_set_aio_context(blk, main_ctx, &error_abort);
669 bdrv_try_set_aio_context(target, main_ctx, &error_abort);
670 aio_context_release(ctx);
671
672 blk_unref(blk);
673 bdrv_unref(src);
674 bdrv_unref(target);
675 }
676
677 static void test_attach_second_node(void)
678 {
679 IOThread *iothread = iothread_new();
680 AioContext *ctx = iothread_get_aio_context(iothread);
681 AioContext *main_ctx = qemu_get_aio_context();
682 BlockBackend *blk;
683 BlockDriverState *bs, *filter;
684 QDict *options;
685
686 blk = blk_new(ctx, BLK_PERM_ALL, BLK_PERM_ALL);
687 bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort);
688 blk_insert_bs(blk, bs, &error_abort);
689
690 options = qdict_new();
691 qdict_put_str(options, "driver", "raw");
692 qdict_put_str(options, "file", "base");
693
694 filter = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
695 g_assert(blk_get_aio_context(blk) == ctx);
696 g_assert(bdrv_get_aio_context(bs) == ctx);
697 g_assert(bdrv_get_aio_context(filter) == ctx);
698
699 aio_context_acquire(ctx);
700 blk_set_aio_context(blk, main_ctx, &error_abort);
701 aio_context_release(ctx);
702 g_assert(blk_get_aio_context(blk) == main_ctx);
703 g_assert(bdrv_get_aio_context(bs) == main_ctx);
704 g_assert(bdrv_get_aio_context(filter) == main_ctx);
705
706 bdrv_unref(filter);
707 bdrv_unref(bs);
708 blk_unref(blk);
709 }
710
711 static void test_attach_preserve_blk_ctx(void)
712 {
713 IOThread *iothread = iothread_new();
714 AioContext *ctx = iothread_get_aio_context(iothread);
715 BlockBackend *blk;
716 BlockDriverState *bs;
717
718 blk = blk_new(ctx, BLK_PERM_ALL, BLK_PERM_ALL);
719 bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort);
720 bs->total_sectors = 65536 / BDRV_SECTOR_SIZE;
721
722 /* Add node to BlockBackend that has an iothread context assigned */
723 blk_insert_bs(blk, bs, &error_abort);
724 g_assert(blk_get_aio_context(blk) == ctx);
725 g_assert(bdrv_get_aio_context(bs) == ctx);
726
727 /* Remove the node again */
728 aio_context_acquire(ctx);
729 blk_remove_bs(blk);
730 aio_context_release(ctx);
731 g_assert(blk_get_aio_context(blk) == ctx);
732 g_assert(bdrv_get_aio_context(bs) == qemu_get_aio_context());
733
734 /* Re-attach the node */
735 blk_insert_bs(blk, bs, &error_abort);
736 g_assert(blk_get_aio_context(blk) == ctx);
737 g_assert(bdrv_get_aio_context(bs) == ctx);
738
739 aio_context_acquire(ctx);
740 blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
741 aio_context_release(ctx);
742 bdrv_unref(bs);
743 blk_unref(blk);
744 }
745
746 int main(int argc, char **argv)
747 {
748 int i;
749
750 bdrv_init();
751 qemu_init_main_loop(&error_abort);
752
753 g_test_init(&argc, &argv, NULL);
754
755 for (i = 0; i < ARRAY_SIZE(sync_op_tests); i++) {
756 const SyncOpTest *t = &sync_op_tests[i];
757 g_test_add_data_func(t->name, t, test_sync_op);
758 }
759
760 g_test_add_func("/attach/blockjob", test_attach_blockjob);
761 g_test_add_func("/attach/second_node", test_attach_second_node);
762 g_test_add_func("/attach/preserve_blk_ctx", test_attach_preserve_blk_ctx);
763 g_test_add_func("/propagate/basic", test_propagate_basic);
764 g_test_add_func("/propagate/diamond", test_propagate_diamond);
765 g_test_add_func("/propagate/mirror", test_propagate_mirror);
766
767 return g_test_run();
768 }