xen-disk: add support for multi-page shared rings
[qemu.git] / hw / block / xen_disk.c
1 /*
2 * xen paravirt block device backend
3 *
4 * (c) Gerd Hoffmann <kraxel@redhat.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; under version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, see <http://www.gnu.org/licenses/>.
17 *
18 * Contributions after 2012-01-13 are licensed under the terms of the
19 * GNU GPL, version 2 or (at your option) any later version.
20 */
21
22 #include "qemu/osdep.h"
23 #include <sys/ioctl.h>
24 #include <sys/uio.h>
25
26 #include "hw/hw.h"
27 #include "hw/xen/xen_backend.h"
28 #include "xen_blkif.h"
29 #include "sysemu/blockdev.h"
30 #include "sysemu/block-backend.h"
31 #include "qapi/error.h"
32 #include "qapi/qmp/qdict.h"
33 #include "qapi/qmp/qstring.h"
34
35 /* ------------------------------------------------------------- */
36
37 static int batch_maps = 0;
38
39 /* ------------------------------------------------------------- */
40
41 #define BLOCK_SIZE 512
42 #define IOCB_COUNT (BLKIF_MAX_SEGMENTS_PER_REQUEST + 2)
43
44 struct PersistentGrant {
45 void *page;
46 struct XenBlkDev *blkdev;
47 };
48
49 typedef struct PersistentGrant PersistentGrant;
50
51 struct PersistentRegion {
52 void *addr;
53 int num;
54 };
55
56 typedef struct PersistentRegion PersistentRegion;
57
58 struct ioreq {
59 blkif_request_t req;
60 int16_t status;
61
62 /* parsed request */
63 off_t start;
64 QEMUIOVector v;
65 int presync;
66 uint8_t mapped;
67
68 /* grant mapping */
69 uint32_t domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
70 uint32_t refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
71 int prot;
72 void *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
73 void *pages;
74 int num_unmap;
75
76 /* aio status */
77 int aio_inflight;
78 int aio_errors;
79
80 struct XenBlkDev *blkdev;
81 QLIST_ENTRY(ioreq) list;
82 BlockAcctCookie acct;
83 };
84
85 #define MAX_RING_PAGE_ORDER 4
86
87 struct XenBlkDev {
88 struct XenDevice xendev; /* must be first */
89 char *params;
90 char *mode;
91 char *type;
92 char *dev;
93 char *devtype;
94 bool directiosafe;
95 const char *fileproto;
96 const char *filename;
97 unsigned int ring_ref[1 << MAX_RING_PAGE_ORDER];
98 unsigned int nr_ring_ref;
99 void *sring;
100 int64_t file_blk;
101 int64_t file_size;
102 int protocol;
103 blkif_back_rings_t rings;
104 int more_work;
105 int cnt_map;
106
107 /* request lists */
108 QLIST_HEAD(inflight_head, ioreq) inflight;
109 QLIST_HEAD(finished_head, ioreq) finished;
110 QLIST_HEAD(freelist_head, ioreq) freelist;
111 int requests_total;
112 int requests_inflight;
113 int requests_finished;
114 unsigned int max_requests;
115
116 /* Persistent grants extension */
117 gboolean feature_discard;
118 gboolean feature_persistent;
119 GTree *persistent_gnts;
120 GSList *persistent_regions;
121 unsigned int persistent_gnt_count;
122 unsigned int max_grants;
123
124 /* Grant copy */
125 gboolean feature_grant_copy;
126
127 /* qemu block driver */
128 DriveInfo *dinfo;
129 BlockBackend *blk;
130 QEMUBH *bh;
131 };
132
133 /* ------------------------------------------------------------- */
134
135 static void ioreq_reset(struct ioreq *ioreq)
136 {
137 memset(&ioreq->req, 0, sizeof(ioreq->req));
138 ioreq->status = 0;
139 ioreq->start = 0;
140 ioreq->presync = 0;
141 ioreq->mapped = 0;
142
143 memset(ioreq->domids, 0, sizeof(ioreq->domids));
144 memset(ioreq->refs, 0, sizeof(ioreq->refs));
145 ioreq->prot = 0;
146 memset(ioreq->page, 0, sizeof(ioreq->page));
147 ioreq->pages = NULL;
148
149 ioreq->aio_inflight = 0;
150 ioreq->aio_errors = 0;
151
152 ioreq->blkdev = NULL;
153 memset(&ioreq->list, 0, sizeof(ioreq->list));
154 memset(&ioreq->acct, 0, sizeof(ioreq->acct));
155
156 qemu_iovec_reset(&ioreq->v);
157 }
158
159 static gint int_cmp(gconstpointer a, gconstpointer b, gpointer user_data)
160 {
161 uint ua = GPOINTER_TO_UINT(a);
162 uint ub = GPOINTER_TO_UINT(b);
163 return (ua > ub) - (ua < ub);
164 }
165
166 static void destroy_grant(gpointer pgnt)
167 {
168 PersistentGrant *grant = pgnt;
169 xengnttab_handle *gnt = grant->blkdev->xendev.gnttabdev;
170
171 if (xengnttab_unmap(gnt, grant->page, 1) != 0) {
172 xen_pv_printf(&grant->blkdev->xendev, 0,
173 "xengnttab_unmap failed: %s\n",
174 strerror(errno));
175 }
176 grant->blkdev->persistent_gnt_count--;
177 xen_pv_printf(&grant->blkdev->xendev, 3,
178 "unmapped grant %p\n", grant->page);
179 g_free(grant);
180 }
181
182 static void remove_persistent_region(gpointer data, gpointer dev)
183 {
184 PersistentRegion *region = data;
185 struct XenBlkDev *blkdev = dev;
186 xengnttab_handle *gnt = blkdev->xendev.gnttabdev;
187
188 if (xengnttab_unmap(gnt, region->addr, region->num) != 0) {
189 xen_pv_printf(&blkdev->xendev, 0,
190 "xengnttab_unmap region %p failed: %s\n",
191 region->addr, strerror(errno));
192 }
193 xen_pv_printf(&blkdev->xendev, 3,
194 "unmapped grant region %p with %d pages\n",
195 region->addr, region->num);
196 g_free(region);
197 }
198
199 static struct ioreq *ioreq_start(struct XenBlkDev *blkdev)
200 {
201 struct ioreq *ioreq = NULL;
202
203 if (QLIST_EMPTY(&blkdev->freelist)) {
204 if (blkdev->requests_total >= blkdev->max_requests) {
205 goto out;
206 }
207 /* allocate new struct */
208 ioreq = g_malloc0(sizeof(*ioreq));
209 ioreq->blkdev = blkdev;
210 blkdev->requests_total++;
211 qemu_iovec_init(&ioreq->v, BLKIF_MAX_SEGMENTS_PER_REQUEST);
212 } else {
213 /* get one from freelist */
214 ioreq = QLIST_FIRST(&blkdev->freelist);
215 QLIST_REMOVE(ioreq, list);
216 }
217 QLIST_INSERT_HEAD(&blkdev->inflight, ioreq, list);
218 blkdev->requests_inflight++;
219
220 out:
221 return ioreq;
222 }
223
224 static void ioreq_finish(struct ioreq *ioreq)
225 {
226 struct XenBlkDev *blkdev = ioreq->blkdev;
227
228 QLIST_REMOVE(ioreq, list);
229 QLIST_INSERT_HEAD(&blkdev->finished, ioreq, list);
230 blkdev->requests_inflight--;
231 blkdev->requests_finished++;
232 }
233
234 static void ioreq_release(struct ioreq *ioreq, bool finish)
235 {
236 struct XenBlkDev *blkdev = ioreq->blkdev;
237
238 QLIST_REMOVE(ioreq, list);
239 ioreq_reset(ioreq);
240 ioreq->blkdev = blkdev;
241 QLIST_INSERT_HEAD(&blkdev->freelist, ioreq, list);
242 if (finish) {
243 blkdev->requests_finished--;
244 } else {
245 blkdev->requests_inflight--;
246 }
247 }
248
249 /*
250 * translate request into iovec + start offset
251 * do sanity checks along the way
252 */
253 static int ioreq_parse(struct ioreq *ioreq)
254 {
255 struct XenBlkDev *blkdev = ioreq->blkdev;
256 uintptr_t mem;
257 size_t len;
258 int i;
259
260 xen_pv_printf(&blkdev->xendev, 3,
261 "op %d, nr %d, handle %d, id %" PRId64 ", sector %" PRId64 "\n",
262 ioreq->req.operation, ioreq->req.nr_segments,
263 ioreq->req.handle, ioreq->req.id, ioreq->req.sector_number);
264 switch (ioreq->req.operation) {
265 case BLKIF_OP_READ:
266 ioreq->prot = PROT_WRITE; /* to memory */
267 break;
268 case BLKIF_OP_FLUSH_DISKCACHE:
269 ioreq->presync = 1;
270 if (!ioreq->req.nr_segments) {
271 return 0;
272 }
273 /* fall through */
274 case BLKIF_OP_WRITE:
275 ioreq->prot = PROT_READ; /* from memory */
276 break;
277 case BLKIF_OP_DISCARD:
278 return 0;
279 default:
280 xen_pv_printf(&blkdev->xendev, 0, "error: unknown operation (%d)\n",
281 ioreq->req.operation);
282 goto err;
283 };
284
285 if (ioreq->req.operation != BLKIF_OP_READ && blkdev->mode[0] != 'w') {
286 xen_pv_printf(&blkdev->xendev, 0, "error: write req for ro device\n");
287 goto err;
288 }
289
290 ioreq->start = ioreq->req.sector_number * blkdev->file_blk;
291 for (i = 0; i < ioreq->req.nr_segments; i++) {
292 if (i == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
293 xen_pv_printf(&blkdev->xendev, 0, "error: nr_segments too big\n");
294 goto err;
295 }
296 if (ioreq->req.seg[i].first_sect > ioreq->req.seg[i].last_sect) {
297 xen_pv_printf(&blkdev->xendev, 0, "error: first > last sector\n");
298 goto err;
299 }
300 if (ioreq->req.seg[i].last_sect * BLOCK_SIZE >= XC_PAGE_SIZE) {
301 xen_pv_printf(&blkdev->xendev, 0, "error: page crossing\n");
302 goto err;
303 }
304
305 ioreq->domids[i] = blkdev->xendev.dom;
306 ioreq->refs[i] = ioreq->req.seg[i].gref;
307
308 mem = ioreq->req.seg[i].first_sect * blkdev->file_blk;
309 len = (ioreq->req.seg[i].last_sect - ioreq->req.seg[i].first_sect + 1) * blkdev->file_blk;
310 qemu_iovec_add(&ioreq->v, (void*)mem, len);
311 }
312 if (ioreq->start + ioreq->v.size > blkdev->file_size) {
313 xen_pv_printf(&blkdev->xendev, 0, "error: access beyond end of file\n");
314 goto err;
315 }
316 return 0;
317
318 err:
319 ioreq->status = BLKIF_RSP_ERROR;
320 return -1;
321 }
322
323 static void ioreq_unmap(struct ioreq *ioreq)
324 {
325 xengnttab_handle *gnt = ioreq->blkdev->xendev.gnttabdev;
326 int i;
327
328 if (ioreq->num_unmap == 0 || ioreq->mapped == 0) {
329 return;
330 }
331 if (batch_maps) {
332 if (!ioreq->pages) {
333 return;
334 }
335 if (xengnttab_unmap(gnt, ioreq->pages, ioreq->num_unmap) != 0) {
336 xen_pv_printf(&ioreq->blkdev->xendev, 0,
337 "xengnttab_unmap failed: %s\n",
338 strerror(errno));
339 }
340 ioreq->blkdev->cnt_map -= ioreq->num_unmap;
341 ioreq->pages = NULL;
342 } else {
343 for (i = 0; i < ioreq->num_unmap; i++) {
344 if (!ioreq->page[i]) {
345 continue;
346 }
347 if (xengnttab_unmap(gnt, ioreq->page[i], 1) != 0) {
348 xen_pv_printf(&ioreq->blkdev->xendev, 0,
349 "xengnttab_unmap failed: %s\n",
350 strerror(errno));
351 }
352 ioreq->blkdev->cnt_map--;
353 ioreq->page[i] = NULL;
354 }
355 }
356 ioreq->mapped = 0;
357 }
358
359 static int ioreq_map(struct ioreq *ioreq)
360 {
361 xengnttab_handle *gnt = ioreq->blkdev->xendev.gnttabdev;
362 uint32_t domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
363 uint32_t refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
364 void *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
365 int i, j, new_maps = 0;
366 PersistentGrant *grant;
367 PersistentRegion *region;
368 /* domids and refs variables will contain the information necessary
369 * to map the grants that are needed to fulfill this request.
370 *
371 * After mapping the needed grants, the page array will contain the
372 * memory address of each granted page in the order specified in ioreq
373 * (disregarding if it's a persistent grant or not).
374 */
375
376 if (ioreq->v.niov == 0 || ioreq->mapped == 1) {
377 return 0;
378 }
379 if (ioreq->blkdev->feature_persistent) {
380 for (i = 0; i < ioreq->v.niov; i++) {
381 grant = g_tree_lookup(ioreq->blkdev->persistent_gnts,
382 GUINT_TO_POINTER(ioreq->refs[i]));
383
384 if (grant != NULL) {
385 page[i] = grant->page;
386 xen_pv_printf(&ioreq->blkdev->xendev, 3,
387 "using persistent-grant %" PRIu32 "\n",
388 ioreq->refs[i]);
389 } else {
390 /* Add the grant to the list of grants that
391 * should be mapped
392 */
393 domids[new_maps] = ioreq->domids[i];
394 refs[new_maps] = ioreq->refs[i];
395 page[i] = NULL;
396 new_maps++;
397 }
398 }
399 /* Set the protection to RW, since grants may be reused later
400 * with a different protection than the one needed for this request
401 */
402 ioreq->prot = PROT_WRITE | PROT_READ;
403 } else {
404 /* All grants in the request should be mapped */
405 memcpy(refs, ioreq->refs, sizeof(refs));
406 memcpy(domids, ioreq->domids, sizeof(domids));
407 memset(page, 0, sizeof(page));
408 new_maps = ioreq->v.niov;
409 }
410
411 if (batch_maps && new_maps) {
412 ioreq->pages = xengnttab_map_grant_refs
413 (gnt, new_maps, domids, refs, ioreq->prot);
414 if (ioreq->pages == NULL) {
415 xen_pv_printf(&ioreq->blkdev->xendev, 0,
416 "can't map %d grant refs (%s, %d maps)\n",
417 new_maps, strerror(errno), ioreq->blkdev->cnt_map);
418 return -1;
419 }
420 for (i = 0, j = 0; i < ioreq->v.niov; i++) {
421 if (page[i] == NULL) {
422 page[i] = ioreq->pages + (j++) * XC_PAGE_SIZE;
423 }
424 }
425 ioreq->blkdev->cnt_map += new_maps;
426 } else if (new_maps) {
427 for (i = 0; i < new_maps; i++) {
428 ioreq->page[i] = xengnttab_map_grant_ref
429 (gnt, domids[i], refs[i], ioreq->prot);
430 if (ioreq->page[i] == NULL) {
431 xen_pv_printf(&ioreq->blkdev->xendev, 0,
432 "can't map grant ref %d (%s, %d maps)\n",
433 refs[i], strerror(errno), ioreq->blkdev->cnt_map);
434 ioreq->mapped = 1;
435 ioreq_unmap(ioreq);
436 return -1;
437 }
438 ioreq->blkdev->cnt_map++;
439 }
440 for (i = 0, j = 0; i < ioreq->v.niov; i++) {
441 if (page[i] == NULL) {
442 page[i] = ioreq->page[j++];
443 }
444 }
445 }
446 if (ioreq->blkdev->feature_persistent && new_maps != 0 &&
447 (!batch_maps || (ioreq->blkdev->persistent_gnt_count + new_maps <=
448 ioreq->blkdev->max_grants))) {
449 /*
450 * If we are using persistent grants and batch mappings only
451 * add the new maps to the list of persistent grants if the whole
452 * area can be persistently mapped.
453 */
454 if (batch_maps) {
455 region = g_malloc0(sizeof(*region));
456 region->addr = ioreq->pages;
457 region->num = new_maps;
458 ioreq->blkdev->persistent_regions = g_slist_append(
459 ioreq->blkdev->persistent_regions,
460 region);
461 }
462 while ((ioreq->blkdev->persistent_gnt_count < ioreq->blkdev->max_grants)
463 && new_maps) {
464 /* Go through the list of newly mapped grants and add as many
465 * as possible to the list of persistently mapped grants.
466 *
467 * Since we start at the end of ioreq->page(s), we only need
468 * to decrease new_maps to prevent this granted pages from
469 * being unmapped in ioreq_unmap.
470 */
471 grant = g_malloc0(sizeof(*grant));
472 new_maps--;
473 if (batch_maps) {
474 grant->page = ioreq->pages + (new_maps) * XC_PAGE_SIZE;
475 } else {
476 grant->page = ioreq->page[new_maps];
477 }
478 grant->blkdev = ioreq->blkdev;
479 xen_pv_printf(&ioreq->blkdev->xendev, 3,
480 "adding grant %" PRIu32 " page: %p\n",
481 refs[new_maps], grant->page);
482 g_tree_insert(ioreq->blkdev->persistent_gnts,
483 GUINT_TO_POINTER(refs[new_maps]),
484 grant);
485 ioreq->blkdev->persistent_gnt_count++;
486 }
487 assert(!batch_maps || new_maps == 0);
488 }
489 for (i = 0; i < ioreq->v.niov; i++) {
490 ioreq->v.iov[i].iov_base += (uintptr_t)page[i];
491 }
492 ioreq->mapped = 1;
493 ioreq->num_unmap = new_maps;
494 return 0;
495 }
496
497 #if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40800
498
499 static void ioreq_free_copy_buffers(struct ioreq *ioreq)
500 {
501 int i;
502
503 for (i = 0; i < ioreq->v.niov; i++) {
504 ioreq->page[i] = NULL;
505 }
506
507 qemu_vfree(ioreq->pages);
508 }
509
510 static int ioreq_init_copy_buffers(struct ioreq *ioreq)
511 {
512 int i;
513
514 if (ioreq->v.niov == 0) {
515 return 0;
516 }
517
518 ioreq->pages = qemu_memalign(XC_PAGE_SIZE, ioreq->v.niov * XC_PAGE_SIZE);
519
520 for (i = 0; i < ioreq->v.niov; i++) {
521 ioreq->page[i] = ioreq->pages + i * XC_PAGE_SIZE;
522 ioreq->v.iov[i].iov_base = ioreq->page[i];
523 }
524
525 return 0;
526 }
527
528 static int ioreq_grant_copy(struct ioreq *ioreq)
529 {
530 xengnttab_handle *gnt = ioreq->blkdev->xendev.gnttabdev;
531 xengnttab_grant_copy_segment_t segs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
532 int i, count, rc;
533 int64_t file_blk = ioreq->blkdev->file_blk;
534
535 if (ioreq->v.niov == 0) {
536 return 0;
537 }
538
539 count = ioreq->v.niov;
540
541 for (i = 0; i < count; i++) {
542 if (ioreq->req.operation == BLKIF_OP_READ) {
543 segs[i].flags = GNTCOPY_dest_gref;
544 segs[i].dest.foreign.ref = ioreq->refs[i];
545 segs[i].dest.foreign.domid = ioreq->domids[i];
546 segs[i].dest.foreign.offset = ioreq->req.seg[i].first_sect * file_blk;
547 segs[i].source.virt = ioreq->v.iov[i].iov_base;
548 } else {
549 segs[i].flags = GNTCOPY_source_gref;
550 segs[i].source.foreign.ref = ioreq->refs[i];
551 segs[i].source.foreign.domid = ioreq->domids[i];
552 segs[i].source.foreign.offset = ioreq->req.seg[i].first_sect * file_blk;
553 segs[i].dest.virt = ioreq->v.iov[i].iov_base;
554 }
555 segs[i].len = (ioreq->req.seg[i].last_sect
556 - ioreq->req.seg[i].first_sect + 1) * file_blk;
557 }
558
559 rc = xengnttab_grant_copy(gnt, count, segs);
560
561 if (rc) {
562 xen_pv_printf(&ioreq->blkdev->xendev, 0,
563 "failed to copy data %d\n", rc);
564 ioreq->aio_errors++;
565 return -1;
566 }
567
568 for (i = 0; i < count; i++) {
569 if (segs[i].status != GNTST_okay) {
570 xen_pv_printf(&ioreq->blkdev->xendev, 3,
571 "failed to copy data %d for gref %d, domid %d\n",
572 segs[i].status, ioreq->refs[i], ioreq->domids[i]);
573 ioreq->aio_errors++;
574 rc = -1;
575 }
576 }
577
578 return rc;
579 }
580 #else
581 static void ioreq_free_copy_buffers(struct ioreq *ioreq)
582 {
583 abort();
584 }
585
586 static int ioreq_init_copy_buffers(struct ioreq *ioreq)
587 {
588 abort();
589 }
590
591 static int ioreq_grant_copy(struct ioreq *ioreq)
592 {
593 abort();
594 }
595 #endif
596
597 static int ioreq_runio_qemu_aio(struct ioreq *ioreq);
598
599 static void qemu_aio_complete(void *opaque, int ret)
600 {
601 struct ioreq *ioreq = opaque;
602
603 if (ret != 0) {
604 xen_pv_printf(&ioreq->blkdev->xendev, 0, "%s I/O error\n",
605 ioreq->req.operation == BLKIF_OP_READ ? "read" : "write");
606 ioreq->aio_errors++;
607 }
608
609 ioreq->aio_inflight--;
610 if (ioreq->presync) {
611 ioreq->presync = 0;
612 ioreq_runio_qemu_aio(ioreq);
613 return;
614 }
615 if (ioreq->aio_inflight > 0) {
616 return;
617 }
618
619 if (ioreq->blkdev->feature_grant_copy) {
620 switch (ioreq->req.operation) {
621 case BLKIF_OP_READ:
622 /* in case of failure ioreq->aio_errors is increased */
623 if (ret == 0) {
624 ioreq_grant_copy(ioreq);
625 }
626 ioreq_free_copy_buffers(ioreq);
627 break;
628 case BLKIF_OP_WRITE:
629 case BLKIF_OP_FLUSH_DISKCACHE:
630 if (!ioreq->req.nr_segments) {
631 break;
632 }
633 ioreq_free_copy_buffers(ioreq);
634 break;
635 default:
636 break;
637 }
638 }
639
640 ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY;
641 if (!ioreq->blkdev->feature_grant_copy) {
642 ioreq_unmap(ioreq);
643 }
644 ioreq_finish(ioreq);
645 switch (ioreq->req.operation) {
646 case BLKIF_OP_WRITE:
647 case BLKIF_OP_FLUSH_DISKCACHE:
648 if (!ioreq->req.nr_segments) {
649 break;
650 }
651 case BLKIF_OP_READ:
652 if (ioreq->status == BLKIF_RSP_OKAY) {
653 block_acct_done(blk_get_stats(ioreq->blkdev->blk), &ioreq->acct);
654 } else {
655 block_acct_failed(blk_get_stats(ioreq->blkdev->blk), &ioreq->acct);
656 }
657 break;
658 case BLKIF_OP_DISCARD:
659 default:
660 break;
661 }
662 qemu_bh_schedule(ioreq->blkdev->bh);
663 }
664
665 static bool blk_split_discard(struct ioreq *ioreq, blkif_sector_t sector_number,
666 uint64_t nr_sectors)
667 {
668 struct XenBlkDev *blkdev = ioreq->blkdev;
669 int64_t byte_offset;
670 int byte_chunk;
671 uint64_t byte_remaining, limit;
672 uint64_t sec_start = sector_number;
673 uint64_t sec_count = nr_sectors;
674
675 /* Wrap around, or overflowing byte limit? */
676 if (sec_start + sec_count < sec_count ||
677 sec_start + sec_count > INT64_MAX >> BDRV_SECTOR_BITS) {
678 return false;
679 }
680
681 limit = BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS;
682 byte_offset = sec_start << BDRV_SECTOR_BITS;
683 byte_remaining = sec_count << BDRV_SECTOR_BITS;
684
685 do {
686 byte_chunk = byte_remaining > limit ? limit : byte_remaining;
687 ioreq->aio_inflight++;
688 blk_aio_pdiscard(blkdev->blk, byte_offset, byte_chunk,
689 qemu_aio_complete, ioreq);
690 byte_remaining -= byte_chunk;
691 byte_offset += byte_chunk;
692 } while (byte_remaining > 0);
693
694 return true;
695 }
696
697 static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
698 {
699 struct XenBlkDev *blkdev = ioreq->blkdev;
700
701 if (ioreq->blkdev->feature_grant_copy) {
702 ioreq_init_copy_buffers(ioreq);
703 if (ioreq->req.nr_segments && (ioreq->req.operation == BLKIF_OP_WRITE ||
704 ioreq->req.operation == BLKIF_OP_FLUSH_DISKCACHE) &&
705 ioreq_grant_copy(ioreq)) {
706 ioreq_free_copy_buffers(ioreq);
707 goto err;
708 }
709 } else {
710 if (ioreq->req.nr_segments && ioreq_map(ioreq)) {
711 goto err;
712 }
713 }
714
715 ioreq->aio_inflight++;
716 if (ioreq->presync) {
717 blk_aio_flush(ioreq->blkdev->blk, qemu_aio_complete, ioreq);
718 return 0;
719 }
720
721 switch (ioreq->req.operation) {
722 case BLKIF_OP_READ:
723 block_acct_start(blk_get_stats(blkdev->blk), &ioreq->acct,
724 ioreq->v.size, BLOCK_ACCT_READ);
725 ioreq->aio_inflight++;
726 blk_aio_preadv(blkdev->blk, ioreq->start, &ioreq->v, 0,
727 qemu_aio_complete, ioreq);
728 break;
729 case BLKIF_OP_WRITE:
730 case BLKIF_OP_FLUSH_DISKCACHE:
731 if (!ioreq->req.nr_segments) {
732 break;
733 }
734
735 block_acct_start(blk_get_stats(blkdev->blk), &ioreq->acct,
736 ioreq->v.size,
737 ioreq->req.operation == BLKIF_OP_WRITE ?
738 BLOCK_ACCT_WRITE : BLOCK_ACCT_FLUSH);
739 ioreq->aio_inflight++;
740 blk_aio_pwritev(blkdev->blk, ioreq->start, &ioreq->v, 0,
741 qemu_aio_complete, ioreq);
742 break;
743 case BLKIF_OP_DISCARD:
744 {
745 struct blkif_request_discard *req = (void *)&ioreq->req;
746 if (!blk_split_discard(ioreq, req->sector_number, req->nr_sectors)) {
747 goto err;
748 }
749 break;
750 }
751 default:
752 /* unknown operation (shouldn't happen -- parse catches this) */
753 if (!ioreq->blkdev->feature_grant_copy) {
754 ioreq_unmap(ioreq);
755 }
756 goto err;
757 }
758
759 qemu_aio_complete(ioreq, 0);
760
761 return 0;
762
763 err:
764 ioreq_finish(ioreq);
765 ioreq->status = BLKIF_RSP_ERROR;
766 return -1;
767 }
768
769 static int blk_send_response_one(struct ioreq *ioreq)
770 {
771 struct XenBlkDev *blkdev = ioreq->blkdev;
772 int send_notify = 0;
773 int have_requests = 0;
774 blkif_response_t *resp;
775
776 /* Place on the response ring for the relevant domain. */
777 switch (blkdev->protocol) {
778 case BLKIF_PROTOCOL_NATIVE:
779 resp = (blkif_response_t *) RING_GET_RESPONSE(&blkdev->rings.native,
780 blkdev->rings.native.rsp_prod_pvt);
781 break;
782 case BLKIF_PROTOCOL_X86_32:
783 resp = (blkif_response_t *) RING_GET_RESPONSE(&blkdev->rings.x86_32_part,
784 blkdev->rings.x86_32_part.rsp_prod_pvt);
785 break;
786 case BLKIF_PROTOCOL_X86_64:
787 resp = (blkif_response_t *) RING_GET_RESPONSE(&blkdev->rings.x86_64_part,
788 blkdev->rings.x86_64_part.rsp_prod_pvt);
789 break;
790 default:
791 return 0;
792 }
793
794 resp->id = ioreq->req.id;
795 resp->operation = ioreq->req.operation;
796 resp->status = ioreq->status;
797
798 blkdev->rings.common.rsp_prod_pvt++;
799
800 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blkdev->rings.common, send_notify);
801 if (blkdev->rings.common.rsp_prod_pvt == blkdev->rings.common.req_cons) {
802 /*
803 * Tail check for pending requests. Allows frontend to avoid
804 * notifications if requests are already in flight (lower
805 * overheads and promotes batching).
806 */
807 RING_FINAL_CHECK_FOR_REQUESTS(&blkdev->rings.common, have_requests);
808 } else if (RING_HAS_UNCONSUMED_REQUESTS(&blkdev->rings.common)) {
809 have_requests = 1;
810 }
811
812 if (have_requests) {
813 blkdev->more_work++;
814 }
815 return send_notify;
816 }
817
818 /* walk finished list, send outstanding responses, free requests */
819 static void blk_send_response_all(struct XenBlkDev *blkdev)
820 {
821 struct ioreq *ioreq;
822 int send_notify = 0;
823
824 while (!QLIST_EMPTY(&blkdev->finished)) {
825 ioreq = QLIST_FIRST(&blkdev->finished);
826 send_notify += blk_send_response_one(ioreq);
827 ioreq_release(ioreq, true);
828 }
829 if (send_notify) {
830 xen_pv_send_notify(&blkdev->xendev);
831 }
832 }
833
834 static int blk_get_request(struct XenBlkDev *blkdev, struct ioreq *ioreq, RING_IDX rc)
835 {
836 switch (blkdev->protocol) {
837 case BLKIF_PROTOCOL_NATIVE:
838 memcpy(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.native, rc),
839 sizeof(ioreq->req));
840 break;
841 case BLKIF_PROTOCOL_X86_32:
842 blkif_get_x86_32_req(&ioreq->req,
843 RING_GET_REQUEST(&blkdev->rings.x86_32_part, rc));
844 break;
845 case BLKIF_PROTOCOL_X86_64:
846 blkif_get_x86_64_req(&ioreq->req,
847 RING_GET_REQUEST(&blkdev->rings.x86_64_part, rc));
848 break;
849 }
850 /* Prevent the compiler from accessing the on-ring fields instead. */
851 barrier();
852 return 0;
853 }
854
855 static void blk_handle_requests(struct XenBlkDev *blkdev)
856 {
857 RING_IDX rc, rp;
858 struct ioreq *ioreq;
859
860 blkdev->more_work = 0;
861
862 rc = blkdev->rings.common.req_cons;
863 rp = blkdev->rings.common.sring->req_prod;
864 xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
865
866 blk_send_response_all(blkdev);
867 while (rc != rp) {
868 /* pull request from ring */
869 if (RING_REQUEST_CONS_OVERFLOW(&blkdev->rings.common, rc)) {
870 break;
871 }
872 ioreq = ioreq_start(blkdev);
873 if (ioreq == NULL) {
874 blkdev->more_work++;
875 break;
876 }
877 blk_get_request(blkdev, ioreq, rc);
878 blkdev->rings.common.req_cons = ++rc;
879
880 /* parse them */
881 if (ioreq_parse(ioreq) != 0) {
882
883 switch (ioreq->req.operation) {
884 case BLKIF_OP_READ:
885 block_acct_invalid(blk_get_stats(blkdev->blk),
886 BLOCK_ACCT_READ);
887 break;
888 case BLKIF_OP_WRITE:
889 block_acct_invalid(blk_get_stats(blkdev->blk),
890 BLOCK_ACCT_WRITE);
891 break;
892 case BLKIF_OP_FLUSH_DISKCACHE:
893 block_acct_invalid(blk_get_stats(blkdev->blk),
894 BLOCK_ACCT_FLUSH);
895 default:
896 break;
897 };
898
899 if (blk_send_response_one(ioreq)) {
900 xen_pv_send_notify(&blkdev->xendev);
901 }
902 ioreq_release(ioreq, false);
903 continue;
904 }
905
906 ioreq_runio_qemu_aio(ioreq);
907 }
908
909 if (blkdev->more_work && blkdev->requests_inflight < blkdev->max_requests) {
910 qemu_bh_schedule(blkdev->bh);
911 }
912 }
913
914 /* ------------------------------------------------------------- */
915
916 static void blk_bh(void *opaque)
917 {
918 struct XenBlkDev *blkdev = opaque;
919 blk_handle_requests(blkdev);
920 }
921
922 static void blk_alloc(struct XenDevice *xendev)
923 {
924 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
925
926 QLIST_INIT(&blkdev->inflight);
927 QLIST_INIT(&blkdev->finished);
928 QLIST_INIT(&blkdev->freelist);
929 blkdev->bh = qemu_bh_new(blk_bh, blkdev);
930 if (xen_mode != XEN_EMULATE) {
931 batch_maps = 1;
932 }
933 }
934
935 static void blk_parse_discard(struct XenBlkDev *blkdev)
936 {
937 int enable;
938
939 blkdev->feature_discard = true;
940
941 if (xenstore_read_be_int(&blkdev->xendev, "discard-enable", &enable) == 0) {
942 blkdev->feature_discard = !!enable;
943 }
944
945 if (blkdev->feature_discard) {
946 xenstore_write_be_int(&blkdev->xendev, "feature-discard", 1);
947 }
948 }
949
950 static int blk_init(struct XenDevice *xendev)
951 {
952 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
953 int info = 0;
954 char *directiosafe = NULL;
955
956 /* read xenstore entries */
957 if (blkdev->params == NULL) {
958 char *h = NULL;
959 blkdev->params = xenstore_read_be_str(&blkdev->xendev, "params");
960 if (blkdev->params != NULL) {
961 h = strchr(blkdev->params, ':');
962 }
963 if (h != NULL) {
964 blkdev->fileproto = blkdev->params;
965 blkdev->filename = h+1;
966 *h = 0;
967 } else {
968 blkdev->fileproto = "<unset>";
969 blkdev->filename = blkdev->params;
970 }
971 }
972 if (!strcmp("aio", blkdev->fileproto)) {
973 blkdev->fileproto = "raw";
974 }
975 if (!strcmp("vhd", blkdev->fileproto)) {
976 blkdev->fileproto = "vpc";
977 }
978 if (blkdev->mode == NULL) {
979 blkdev->mode = xenstore_read_be_str(&blkdev->xendev, "mode");
980 }
981 if (blkdev->type == NULL) {
982 blkdev->type = xenstore_read_be_str(&blkdev->xendev, "type");
983 }
984 if (blkdev->dev == NULL) {
985 blkdev->dev = xenstore_read_be_str(&blkdev->xendev, "dev");
986 }
987 if (blkdev->devtype == NULL) {
988 blkdev->devtype = xenstore_read_be_str(&blkdev->xendev, "device-type");
989 }
990 directiosafe = xenstore_read_be_str(&blkdev->xendev, "direct-io-safe");
991 blkdev->directiosafe = (directiosafe && atoi(directiosafe));
992
993 /* do we have all we need? */
994 if (blkdev->params == NULL ||
995 blkdev->mode == NULL ||
996 blkdev->type == NULL ||
997 blkdev->dev == NULL) {
998 goto out_error;
999 }
1000
1001 /* read-only ? */
1002 if (strcmp(blkdev->mode, "w")) {
1003 info |= VDISK_READONLY;
1004 }
1005
1006 /* cdrom ? */
1007 if (blkdev->devtype && !strcmp(blkdev->devtype, "cdrom")) {
1008 info |= VDISK_CDROM;
1009 }
1010
1011 blkdev->file_blk = BLOCK_SIZE;
1012
1013 blkdev->feature_grant_copy =
1014 (xengnttab_grant_copy(blkdev->xendev.gnttabdev, 0, NULL) == 0);
1015
1016 xen_pv_printf(&blkdev->xendev, 3, "grant copy operation %s\n",
1017 blkdev->feature_grant_copy ? "enabled" : "disabled");
1018
1019 /* fill info
1020 * blk_connect supplies sector-size and sectors
1021 */
1022 xenstore_write_be_int(&blkdev->xendev, "feature-flush-cache", 1);
1023 xenstore_write_be_int(&blkdev->xendev, "feature-persistent",
1024 !blkdev->feature_grant_copy);
1025 xenstore_write_be_int(&blkdev->xendev, "info", info);
1026
1027 xenstore_write_be_int(&blkdev->xendev, "max-ring-page-order",
1028 MAX_RING_PAGE_ORDER);
1029
1030 blk_parse_discard(blkdev);
1031
1032 g_free(directiosafe);
1033 return 0;
1034
1035 out_error:
1036 g_free(blkdev->params);
1037 blkdev->params = NULL;
1038 g_free(blkdev->mode);
1039 blkdev->mode = NULL;
1040 g_free(blkdev->type);
1041 blkdev->type = NULL;
1042 g_free(blkdev->dev);
1043 blkdev->dev = NULL;
1044 g_free(blkdev->devtype);
1045 blkdev->devtype = NULL;
1046 g_free(directiosafe);
1047 blkdev->directiosafe = false;
1048 return -1;
1049 }
1050
1051 /*
1052 * We need to account for the grant allocations requiring contiguous
1053 * chunks; the worst case number would be
1054 * max_req * max_seg + (max_req - 1) * (max_seg - 1) + 1,
1055 * but in order to keep things simple just use
1056 * 2 * max_req * max_seg.
1057 */
1058 #define MAX_GRANTS(max_req, max_seg) (2 * (max_req) * (max_seg))
1059
1060 static int blk_connect(struct XenDevice *xendev)
1061 {
1062 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
1063 int pers, index, qflags;
1064 bool readonly = true;
1065 bool writethrough = true;
1066 int order, ring_ref;
1067 unsigned int ring_size, max_grants;
1068 unsigned int i;
1069 uint32_t *domids;
1070
1071 /* read-only ? */
1072 if (blkdev->directiosafe) {
1073 qflags = BDRV_O_NOCACHE | BDRV_O_NATIVE_AIO;
1074 } else {
1075 qflags = 0;
1076 writethrough = false;
1077 }
1078 if (strcmp(blkdev->mode, "w") == 0) {
1079 qflags |= BDRV_O_RDWR;
1080 readonly = false;
1081 }
1082 if (blkdev->feature_discard) {
1083 qflags |= BDRV_O_UNMAP;
1084 }
1085
1086 /* init qemu block driver */
1087 index = (blkdev->xendev.dev - 202 * 256) / 16;
1088 blkdev->dinfo = drive_get(IF_XEN, 0, index);
1089 if (!blkdev->dinfo) {
1090 Error *local_err = NULL;
1091 QDict *options = NULL;
1092
1093 if (strcmp(blkdev->fileproto, "<unset>")) {
1094 options = qdict_new();
1095 qdict_put_str(options, "driver", blkdev->fileproto);
1096 }
1097
1098 /* setup via xenbus -> create new block driver instance */
1099 xen_pv_printf(&blkdev->xendev, 2, "create new bdrv (xenbus setup)\n");
1100 blkdev->blk = blk_new_open(blkdev->filename, NULL, options,
1101 qflags, &local_err);
1102 if (!blkdev->blk) {
1103 xen_pv_printf(&blkdev->xendev, 0, "error: %s\n",
1104 error_get_pretty(local_err));
1105 error_free(local_err);
1106 return -1;
1107 }
1108 blk_set_enable_write_cache(blkdev->blk, !writethrough);
1109 } else {
1110 /* setup via qemu cmdline -> already setup for us */
1111 xen_pv_printf(&blkdev->xendev, 2,
1112 "get configured bdrv (cmdline setup)\n");
1113 blkdev->blk = blk_by_legacy_dinfo(blkdev->dinfo);
1114 if (blk_is_read_only(blkdev->blk) && !readonly) {
1115 xen_pv_printf(&blkdev->xendev, 0, "Unexpected read-only drive");
1116 blkdev->blk = NULL;
1117 return -1;
1118 }
1119 /* blkdev->blk is not create by us, we get a reference
1120 * so we can blk_unref() unconditionally */
1121 blk_ref(blkdev->blk);
1122 }
1123 blk_attach_dev_legacy(blkdev->blk, blkdev);
1124 blkdev->file_size = blk_getlength(blkdev->blk);
1125 if (blkdev->file_size < 0) {
1126 BlockDriverState *bs = blk_bs(blkdev->blk);
1127 const char *drv_name = bs ? bdrv_get_format_name(bs) : NULL;
1128 xen_pv_printf(&blkdev->xendev, 1, "blk_getlength: %d (%s) | drv %s\n",
1129 (int)blkdev->file_size, strerror(-blkdev->file_size),
1130 drv_name ?: "-");
1131 blkdev->file_size = 0;
1132 }
1133
1134 xen_pv_printf(xendev, 1, "type \"%s\", fileproto \"%s\", filename \"%s\","
1135 " size %" PRId64 " (%" PRId64 " MB)\n",
1136 blkdev->type, blkdev->fileproto, blkdev->filename,
1137 blkdev->file_size, blkdev->file_size >> 20);
1138
1139 /* Fill in number of sector size and number of sectors */
1140 xenstore_write_be_int(&blkdev->xendev, "sector-size", blkdev->file_blk);
1141 xenstore_write_be_int64(&blkdev->xendev, "sectors",
1142 blkdev->file_size / blkdev->file_blk);
1143
1144 if (xenstore_read_fe_int(&blkdev->xendev, "ring-page-order",
1145 &order) == -1) {
1146 blkdev->nr_ring_ref = 1;
1147
1148 if (xenstore_read_fe_int(&blkdev->xendev, "ring-ref",
1149 &ring_ref) == -1) {
1150 return -1;
1151 }
1152 blkdev->ring_ref[0] = ring_ref;
1153
1154 } else if (order >= 0 && order <= MAX_RING_PAGE_ORDER) {
1155 blkdev->nr_ring_ref = 1 << order;
1156
1157 for (i = 0; i < blkdev->nr_ring_ref; i++) {
1158 char *key;
1159
1160 key = g_strdup_printf("ring-ref%u", i);
1161 if (!key) {
1162 return -1;
1163 }
1164
1165 if (xenstore_read_fe_int(&blkdev->xendev, key,
1166 &ring_ref) == -1) {
1167 g_free(key);
1168 return -1;
1169 }
1170 blkdev->ring_ref[i] = ring_ref;
1171
1172 g_free(key);
1173 }
1174 } else {
1175 xen_pv_printf(xendev, 0, "invalid ring-page-order: %d\n",
1176 order);
1177 return -1;
1178 }
1179
1180 if (xenstore_read_fe_int(&blkdev->xendev, "event-channel",
1181 &blkdev->xendev.remote_port) == -1) {
1182 return -1;
1183 }
1184 if (xenstore_read_fe_int(&blkdev->xendev, "feature-persistent", &pers)) {
1185 blkdev->feature_persistent = FALSE;
1186 } else {
1187 blkdev->feature_persistent = !!pers;
1188 }
1189
1190 if (!blkdev->xendev.protocol) {
1191 blkdev->protocol = BLKIF_PROTOCOL_NATIVE;
1192 } else if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_NATIVE) == 0) {
1193 blkdev->protocol = BLKIF_PROTOCOL_NATIVE;
1194 } else if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_32) == 0) {
1195 blkdev->protocol = BLKIF_PROTOCOL_X86_32;
1196 } else if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_64) == 0) {
1197 blkdev->protocol = BLKIF_PROTOCOL_X86_64;
1198 } else {
1199 blkdev->protocol = BLKIF_PROTOCOL_NATIVE;
1200 }
1201
1202 ring_size = XC_PAGE_SIZE * blkdev->nr_ring_ref;
1203 switch (blkdev->protocol) {
1204 case BLKIF_PROTOCOL_NATIVE:
1205 {
1206 blkdev->max_requests = __CONST_RING_SIZE(blkif, ring_size);
1207 break;
1208 }
1209 case BLKIF_PROTOCOL_X86_32:
1210 {
1211 blkdev->max_requests = __CONST_RING_SIZE(blkif_x86_32, ring_size);
1212 break;
1213 }
1214 case BLKIF_PROTOCOL_X86_64:
1215 {
1216 blkdev->max_requests = __CONST_RING_SIZE(blkif_x86_64, ring_size);
1217 break;
1218 }
1219 default:
1220 return -1;
1221 }
1222
1223 /* Calculate the maximum number of grants needed by ioreqs */
1224 max_grants = MAX_GRANTS(blkdev->max_requests,
1225 BLKIF_MAX_SEGMENTS_PER_REQUEST);
1226 /* Add on the number needed for the ring pages */
1227 max_grants += blkdev->nr_ring_ref;
1228
1229 if (xengnttab_set_max_grants(blkdev->xendev.gnttabdev, max_grants)) {
1230 xen_pv_printf(xendev, 0, "xengnttab_set_max_grants failed: %s\n",
1231 strerror(errno));
1232 return -1;
1233 }
1234
1235 domids = g_malloc0_n(blkdev->nr_ring_ref, sizeof(uint32_t));
1236 for (i = 0; i < blkdev->nr_ring_ref; i++) {
1237 domids[i] = blkdev->xendev.dom;
1238 }
1239
1240 blkdev->sring = xengnttab_map_grant_refs(blkdev->xendev.gnttabdev,
1241 blkdev->nr_ring_ref,
1242 domids,
1243 blkdev->ring_ref,
1244 PROT_READ | PROT_WRITE);
1245
1246 g_free(domids);
1247
1248 if (!blkdev->sring) {
1249 return -1;
1250 }
1251
1252 blkdev->cnt_map++;
1253
1254 switch (blkdev->protocol) {
1255 case BLKIF_PROTOCOL_NATIVE:
1256 {
1257 blkif_sring_t *sring_native = blkdev->sring;
1258 BACK_RING_INIT(&blkdev->rings.native, sring_native, ring_size);
1259 break;
1260 }
1261 case BLKIF_PROTOCOL_X86_32:
1262 {
1263 blkif_x86_32_sring_t *sring_x86_32 = blkdev->sring;
1264
1265 BACK_RING_INIT(&blkdev->rings.x86_32_part, sring_x86_32, ring_size);
1266 break;
1267 }
1268 case BLKIF_PROTOCOL_X86_64:
1269 {
1270 blkif_x86_64_sring_t *sring_x86_64 = blkdev->sring;
1271
1272 BACK_RING_INIT(&blkdev->rings.x86_64_part, sring_x86_64, ring_size);
1273 break;
1274 }
1275 }
1276
1277 if (blkdev->feature_persistent) {
1278 /* Init persistent grants */
1279 blkdev->max_grants = blkdev->max_requests *
1280 BLKIF_MAX_SEGMENTS_PER_REQUEST;
1281 blkdev->persistent_gnts = g_tree_new_full((GCompareDataFunc)int_cmp,
1282 NULL, NULL,
1283 batch_maps ?
1284 (GDestroyNotify)g_free :
1285 (GDestroyNotify)destroy_grant);
1286 blkdev->persistent_regions = NULL;
1287 blkdev->persistent_gnt_count = 0;
1288 }
1289
1290 xen_be_bind_evtchn(&blkdev->xendev);
1291
1292 xen_pv_printf(&blkdev->xendev, 1, "ok: proto %s, nr-ring-ref %u, "
1293 "remote port %d, local port %d\n",
1294 blkdev->xendev.protocol, blkdev->nr_ring_ref,
1295 blkdev->xendev.remote_port, blkdev->xendev.local_port);
1296 return 0;
1297 }
1298
1299 static void blk_disconnect(struct XenDevice *xendev)
1300 {
1301 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
1302
1303 if (blkdev->blk) {
1304 blk_detach_dev(blkdev->blk, blkdev);
1305 blk_unref(blkdev->blk);
1306 blkdev->blk = NULL;
1307 }
1308 xen_pv_unbind_evtchn(&blkdev->xendev);
1309
1310 if (blkdev->sring) {
1311 xengnttab_unmap(blkdev->xendev.gnttabdev, blkdev->sring,
1312 blkdev->nr_ring_ref);
1313 blkdev->cnt_map--;
1314 blkdev->sring = NULL;
1315 }
1316
1317 /*
1318 * Unmap persistent grants before switching to the closed state
1319 * so the frontend can free them.
1320 *
1321 * In the !batch_maps case g_tree_destroy will take care of unmapping
1322 * the grant, but in the batch_maps case we need to iterate over every
1323 * region in persistent_regions and unmap it.
1324 */
1325 if (blkdev->feature_persistent) {
1326 g_tree_destroy(blkdev->persistent_gnts);
1327 assert(batch_maps || blkdev->persistent_gnt_count == 0);
1328 if (batch_maps) {
1329 blkdev->persistent_gnt_count = 0;
1330 g_slist_foreach(blkdev->persistent_regions,
1331 (GFunc)remove_persistent_region, blkdev);
1332 g_slist_free(blkdev->persistent_regions);
1333 }
1334 blkdev->feature_persistent = false;
1335 }
1336 }
1337
1338 static int blk_free(struct XenDevice *xendev)
1339 {
1340 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
1341 struct ioreq *ioreq;
1342
1343 if (blkdev->blk || blkdev->sring) {
1344 blk_disconnect(xendev);
1345 }
1346
1347 while (!QLIST_EMPTY(&blkdev->freelist)) {
1348 ioreq = QLIST_FIRST(&blkdev->freelist);
1349 QLIST_REMOVE(ioreq, list);
1350 qemu_iovec_destroy(&ioreq->v);
1351 g_free(ioreq);
1352 }
1353
1354 g_free(blkdev->params);
1355 g_free(blkdev->mode);
1356 g_free(blkdev->type);
1357 g_free(blkdev->dev);
1358 g_free(blkdev->devtype);
1359 qemu_bh_delete(blkdev->bh);
1360 return 0;
1361 }
1362
1363 static void blk_event(struct XenDevice *xendev)
1364 {
1365 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
1366
1367 qemu_bh_schedule(blkdev->bh);
1368 }
1369
1370 struct XenDevOps xen_blkdev_ops = {
1371 .size = sizeof(struct XenBlkDev),
1372 .flags = DEVOPS_FLAG_NEED_GNTDEV,
1373 .alloc = blk_alloc,
1374 .init = blk_init,
1375 .initialise = blk_connect,
1376 .disconnect = blk_disconnect,
1377 .event = blk_event,
1378 .free = blk_free,
1379 };