hw/net: fix mcf_fec driver receiver
[qemu.git] / dma-helpers.c
1 /*
2 * DMA helper functions
3 *
4 * Copyright (c) 2009 Red Hat
5 *
6 * This work is licensed under the terms of the GNU General Public License
7 * (GNU GPL), version 2 or later.
8 */
9
10 #include "sysemu/block-backend.h"
11 #include "sysemu/dma.h"
12 #include "trace.h"
13 #include "qemu/thread.h"
14 #include "qemu/main-loop.h"
15
16 /* #define DEBUG_IOMMU */
17
18 int dma_memory_set(AddressSpace *as, dma_addr_t addr, uint8_t c, dma_addr_t len)
19 {
20 dma_barrier(as, DMA_DIRECTION_FROM_DEVICE);
21
22 #define FILLBUF_SIZE 512
23 uint8_t fillbuf[FILLBUF_SIZE];
24 int l;
25 bool error = false;
26
27 memset(fillbuf, c, FILLBUF_SIZE);
28 while (len > 0) {
29 l = len < FILLBUF_SIZE ? len : FILLBUF_SIZE;
30 error |= address_space_rw(as, addr, MEMTXATTRS_UNSPECIFIED,
31 fillbuf, l, true);
32 len -= l;
33 addr += l;
34 }
35
36 return error;
37 }
38
39 void qemu_sglist_init(QEMUSGList *qsg, DeviceState *dev, int alloc_hint,
40 AddressSpace *as)
41 {
42 qsg->sg = g_malloc(alloc_hint * sizeof(ScatterGatherEntry));
43 qsg->nsg = 0;
44 qsg->nalloc = alloc_hint;
45 qsg->size = 0;
46 qsg->as = as;
47 qsg->dev = dev;
48 object_ref(OBJECT(dev));
49 }
50
51 void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len)
52 {
53 if (qsg->nsg == qsg->nalloc) {
54 qsg->nalloc = 2 * qsg->nalloc + 1;
55 qsg->sg = g_realloc(qsg->sg, qsg->nalloc * sizeof(ScatterGatherEntry));
56 }
57 qsg->sg[qsg->nsg].base = base;
58 qsg->sg[qsg->nsg].len = len;
59 qsg->size += len;
60 ++qsg->nsg;
61 }
62
63 void qemu_sglist_destroy(QEMUSGList *qsg)
64 {
65 object_unref(OBJECT(qsg->dev));
66 g_free(qsg->sg);
67 memset(qsg, 0, sizeof(*qsg));
68 }
69
70 typedef struct {
71 BlockAIOCB common;
72 BlockBackend *blk;
73 BlockAIOCB *acb;
74 QEMUSGList *sg;
75 uint64_t sector_num;
76 DMADirection dir;
77 int sg_cur_index;
78 dma_addr_t sg_cur_byte;
79 QEMUIOVector iov;
80 QEMUBH *bh;
81 DMAIOFunc *io_func;
82 } DMAAIOCB;
83
84 static void dma_blk_cb(void *opaque, int ret);
85
86 static void reschedule_dma(void *opaque)
87 {
88 DMAAIOCB *dbs = (DMAAIOCB *)opaque;
89
90 qemu_bh_delete(dbs->bh);
91 dbs->bh = NULL;
92 dma_blk_cb(dbs, 0);
93 }
94
95 static void dma_blk_unmap(DMAAIOCB *dbs)
96 {
97 int i;
98
99 for (i = 0; i < dbs->iov.niov; ++i) {
100 dma_memory_unmap(dbs->sg->as, dbs->iov.iov[i].iov_base,
101 dbs->iov.iov[i].iov_len, dbs->dir,
102 dbs->iov.iov[i].iov_len);
103 }
104 qemu_iovec_reset(&dbs->iov);
105 }
106
107 static void dma_complete(DMAAIOCB *dbs, int ret)
108 {
109 trace_dma_complete(dbs, ret, dbs->common.cb);
110
111 dma_blk_unmap(dbs);
112 if (dbs->common.cb) {
113 dbs->common.cb(dbs->common.opaque, ret);
114 }
115 qemu_iovec_destroy(&dbs->iov);
116 if (dbs->bh) {
117 qemu_bh_delete(dbs->bh);
118 dbs->bh = NULL;
119 }
120 qemu_aio_unref(dbs);
121 }
122
123 static void dma_blk_cb(void *opaque, int ret)
124 {
125 DMAAIOCB *dbs = (DMAAIOCB *)opaque;
126 dma_addr_t cur_addr, cur_len;
127 void *mem;
128
129 trace_dma_blk_cb(dbs, ret);
130
131 dbs->acb = NULL;
132 dbs->sector_num += dbs->iov.size / 512;
133
134 if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) {
135 dma_complete(dbs, ret);
136 return;
137 }
138 dma_blk_unmap(dbs);
139
140 while (dbs->sg_cur_index < dbs->sg->nsg) {
141 cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte;
142 cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte;
143 mem = dma_memory_map(dbs->sg->as, cur_addr, &cur_len, dbs->dir);
144 if (!mem)
145 break;
146 qemu_iovec_add(&dbs->iov, mem, cur_len);
147 dbs->sg_cur_byte += cur_len;
148 if (dbs->sg_cur_byte == dbs->sg->sg[dbs->sg_cur_index].len) {
149 dbs->sg_cur_byte = 0;
150 ++dbs->sg_cur_index;
151 }
152 }
153
154 if (dbs->iov.size == 0) {
155 trace_dma_map_wait(dbs);
156 dbs->bh = aio_bh_new(blk_get_aio_context(dbs->blk),
157 reschedule_dma, dbs);
158 cpu_register_map_client(dbs->bh);
159 return;
160 }
161
162 if (dbs->iov.size & ~BDRV_SECTOR_MASK) {
163 qemu_iovec_discard_back(&dbs->iov, dbs->iov.size & ~BDRV_SECTOR_MASK);
164 }
165
166 dbs->acb = dbs->io_func(dbs->blk, dbs->sector_num, &dbs->iov,
167 dbs->iov.size / 512, dma_blk_cb, dbs);
168 assert(dbs->acb);
169 }
170
171 static void dma_aio_cancel(BlockAIOCB *acb)
172 {
173 DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common);
174
175 trace_dma_aio_cancel(dbs);
176
177 if (dbs->acb) {
178 blk_aio_cancel_async(dbs->acb);
179 }
180 if (dbs->bh) {
181 cpu_unregister_map_client(dbs->bh);
182 qemu_bh_delete(dbs->bh);
183 dbs->bh = NULL;
184 }
185 }
186
187
188 static const AIOCBInfo dma_aiocb_info = {
189 .aiocb_size = sizeof(DMAAIOCB),
190 .cancel_async = dma_aio_cancel,
191 };
192
193 BlockAIOCB *dma_blk_io(
194 BlockBackend *blk, QEMUSGList *sg, uint64_t sector_num,
195 DMAIOFunc *io_func, BlockCompletionFunc *cb,
196 void *opaque, DMADirection dir)
197 {
198 DMAAIOCB *dbs = blk_aio_get(&dma_aiocb_info, blk, cb, opaque);
199
200 trace_dma_blk_io(dbs, blk, sector_num, (dir == DMA_DIRECTION_TO_DEVICE));
201
202 dbs->acb = NULL;
203 dbs->blk = blk;
204 dbs->sg = sg;
205 dbs->sector_num = sector_num;
206 dbs->sg_cur_index = 0;
207 dbs->sg_cur_byte = 0;
208 dbs->dir = dir;
209 dbs->io_func = io_func;
210 dbs->bh = NULL;
211 qemu_iovec_init(&dbs->iov, sg->nsg);
212 dma_blk_cb(dbs, 0);
213 return &dbs->common;
214 }
215
216
217 BlockAIOCB *dma_blk_read(BlockBackend *blk,
218 QEMUSGList *sg, uint64_t sector,
219 void (*cb)(void *opaque, int ret), void *opaque)
220 {
221 return dma_blk_io(blk, sg, sector, blk_aio_readv, cb, opaque,
222 DMA_DIRECTION_FROM_DEVICE);
223 }
224
225 BlockAIOCB *dma_blk_write(BlockBackend *blk,
226 QEMUSGList *sg, uint64_t sector,
227 void (*cb)(void *opaque, int ret), void *opaque)
228 {
229 return dma_blk_io(blk, sg, sector, blk_aio_writev, cb, opaque,
230 DMA_DIRECTION_TO_DEVICE);
231 }
232
233
234 static uint64_t dma_buf_rw(uint8_t *ptr, int32_t len, QEMUSGList *sg,
235 DMADirection dir)
236 {
237 uint64_t resid;
238 int sg_cur_index;
239
240 resid = sg->size;
241 sg_cur_index = 0;
242 len = MIN(len, resid);
243 while (len > 0) {
244 ScatterGatherEntry entry = sg->sg[sg_cur_index++];
245 int32_t xfer = MIN(len, entry.len);
246 dma_memory_rw(sg->as, entry.base, ptr, xfer, dir);
247 ptr += xfer;
248 len -= xfer;
249 resid -= xfer;
250 }
251
252 return resid;
253 }
254
255 uint64_t dma_buf_read(uint8_t *ptr, int32_t len, QEMUSGList *sg)
256 {
257 return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_FROM_DEVICE);
258 }
259
260 uint64_t dma_buf_write(uint8_t *ptr, int32_t len, QEMUSGList *sg)
261 {
262 return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_TO_DEVICE);
263 }
264
265 void dma_acct_start(BlockBackend *blk, BlockAcctCookie *cookie,
266 QEMUSGList *sg, enum BlockAcctType type)
267 {
268 block_acct_start(blk_get_stats(blk), cookie, sg->size, type);
269 }