aio: add AioPollFn and io_poll() interface
[qemu.git] / aio-posix.c
1 /*
2 * QEMU aio implementation
3 *
4 * Copyright IBM, Corp. 2008
5 *
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
14 */
15
16 #include "qemu/osdep.h"
17 #include "qemu-common.h"
18 #include "block/block.h"
19 #include "qemu/queue.h"
20 #include "qemu/sockets.h"
21 #ifdef CONFIG_EPOLL_CREATE1
22 #include <sys/epoll.h>
23 #endif
24
25 struct AioHandler
26 {
27 GPollFD pfd;
28 IOHandler *io_read;
29 IOHandler *io_write;
30 int deleted;
31 void *opaque;
32 bool is_external;
33 QLIST_ENTRY(AioHandler) node;
34 };
35
36 #ifdef CONFIG_EPOLL_CREATE1
37
38 /* The fd number threashold to switch to epoll */
39 #define EPOLL_ENABLE_THRESHOLD 64
40
41 static void aio_epoll_disable(AioContext *ctx)
42 {
43 ctx->epoll_available = false;
44 if (!ctx->epoll_enabled) {
45 return;
46 }
47 ctx->epoll_enabled = false;
48 close(ctx->epollfd);
49 }
50
51 static inline int epoll_events_from_pfd(int pfd_events)
52 {
53 return (pfd_events & G_IO_IN ? EPOLLIN : 0) |
54 (pfd_events & G_IO_OUT ? EPOLLOUT : 0) |
55 (pfd_events & G_IO_HUP ? EPOLLHUP : 0) |
56 (pfd_events & G_IO_ERR ? EPOLLERR : 0);
57 }
58
59 static bool aio_epoll_try_enable(AioContext *ctx)
60 {
61 AioHandler *node;
62 struct epoll_event event;
63
64 QLIST_FOREACH(node, &ctx->aio_handlers, node) {
65 int r;
66 if (node->deleted || !node->pfd.events) {
67 continue;
68 }
69 event.events = epoll_events_from_pfd(node->pfd.events);
70 event.data.ptr = node;
71 r = epoll_ctl(ctx->epollfd, EPOLL_CTL_ADD, node->pfd.fd, &event);
72 if (r) {
73 return false;
74 }
75 }
76 ctx->epoll_enabled = true;
77 return true;
78 }
79
80 static void aio_epoll_update(AioContext *ctx, AioHandler *node, bool is_new)
81 {
82 struct epoll_event event;
83 int r;
84 int ctl;
85
86 if (!ctx->epoll_enabled) {
87 return;
88 }
89 if (!node->pfd.events) {
90 ctl = EPOLL_CTL_DEL;
91 } else {
92 event.data.ptr = node;
93 event.events = epoll_events_from_pfd(node->pfd.events);
94 ctl = is_new ? EPOLL_CTL_ADD : EPOLL_CTL_MOD;
95 }
96
97 r = epoll_ctl(ctx->epollfd, ctl, node->pfd.fd, &event);
98 if (r) {
99 aio_epoll_disable(ctx);
100 }
101 }
102
103 static int aio_epoll(AioContext *ctx, GPollFD *pfds,
104 unsigned npfd, int64_t timeout)
105 {
106 AioHandler *node;
107 int i, ret = 0;
108 struct epoll_event events[128];
109
110 assert(npfd == 1);
111 assert(pfds[0].fd == ctx->epollfd);
112 if (timeout > 0) {
113 ret = qemu_poll_ns(pfds, npfd, timeout);
114 }
115 if (timeout <= 0 || ret > 0) {
116 ret = epoll_wait(ctx->epollfd, events,
117 sizeof(events) / sizeof(events[0]),
118 timeout);
119 if (ret <= 0) {
120 goto out;
121 }
122 for (i = 0; i < ret; i++) {
123 int ev = events[i].events;
124 node = events[i].data.ptr;
125 node->pfd.revents = (ev & EPOLLIN ? G_IO_IN : 0) |
126 (ev & EPOLLOUT ? G_IO_OUT : 0) |
127 (ev & EPOLLHUP ? G_IO_HUP : 0) |
128 (ev & EPOLLERR ? G_IO_ERR : 0);
129 }
130 }
131 out:
132 return ret;
133 }
134
135 static bool aio_epoll_enabled(AioContext *ctx)
136 {
137 /* Fall back to ppoll when external clients are disabled. */
138 return !aio_external_disabled(ctx) && ctx->epoll_enabled;
139 }
140
141 static bool aio_epoll_check_poll(AioContext *ctx, GPollFD *pfds,
142 unsigned npfd, int64_t timeout)
143 {
144 if (!ctx->epoll_available) {
145 return false;
146 }
147 if (aio_epoll_enabled(ctx)) {
148 return true;
149 }
150 if (npfd >= EPOLL_ENABLE_THRESHOLD) {
151 if (aio_epoll_try_enable(ctx)) {
152 return true;
153 } else {
154 aio_epoll_disable(ctx);
155 }
156 }
157 return false;
158 }
159
160 #else
161
162 static void aio_epoll_update(AioContext *ctx, AioHandler *node, bool is_new)
163 {
164 }
165
166 static int aio_epoll(AioContext *ctx, GPollFD *pfds,
167 unsigned npfd, int64_t timeout)
168 {
169 assert(false);
170 }
171
172 static bool aio_epoll_enabled(AioContext *ctx)
173 {
174 return false;
175 }
176
177 static bool aio_epoll_check_poll(AioContext *ctx, GPollFD *pfds,
178 unsigned npfd, int64_t timeout)
179 {
180 return false;
181 }
182
183 #endif
184
185 static AioHandler *find_aio_handler(AioContext *ctx, int fd)
186 {
187 AioHandler *node;
188
189 QLIST_FOREACH(node, &ctx->aio_handlers, node) {
190 if (node->pfd.fd == fd)
191 if (!node->deleted)
192 return node;
193 }
194
195 return NULL;
196 }
197
198 void aio_set_fd_handler(AioContext *ctx,
199 int fd,
200 bool is_external,
201 IOHandler *io_read,
202 IOHandler *io_write,
203 AioPollFn *io_poll,
204 void *opaque)
205 {
206 AioHandler *node;
207 bool is_new = false;
208 bool deleted = false;
209
210 node = find_aio_handler(ctx, fd);
211
212 /* Are we deleting the fd handler? */
213 if (!io_read && !io_write) {
214 if (node == NULL) {
215 return;
216 }
217
218 g_source_remove_poll(&ctx->source, &node->pfd);
219
220 /* If the lock is held, just mark the node as deleted */
221 if (ctx->walking_handlers) {
222 node->deleted = 1;
223 node->pfd.revents = 0;
224 } else {
225 /* Otherwise, delete it for real. We can't just mark it as
226 * deleted because deleted nodes are only cleaned up after
227 * releasing the walking_handlers lock.
228 */
229 QLIST_REMOVE(node, node);
230 deleted = true;
231 }
232 } else {
233 if (node == NULL) {
234 /* Alloc and insert if it's not already there */
235 node = g_new0(AioHandler, 1);
236 node->pfd.fd = fd;
237 QLIST_INSERT_HEAD(&ctx->aio_handlers, node, node);
238
239 g_source_add_poll(&ctx->source, &node->pfd);
240 is_new = true;
241 }
242 /* Update handler with latest information */
243 node->io_read = io_read;
244 node->io_write = io_write;
245 node->opaque = opaque;
246 node->is_external = is_external;
247
248 node->pfd.events = (io_read ? G_IO_IN | G_IO_HUP | G_IO_ERR : 0);
249 node->pfd.events |= (io_write ? G_IO_OUT | G_IO_ERR : 0);
250 }
251
252 aio_epoll_update(ctx, node, is_new);
253 aio_notify(ctx);
254 if (deleted) {
255 g_free(node);
256 }
257 }
258
259 void aio_set_event_notifier(AioContext *ctx,
260 EventNotifier *notifier,
261 bool is_external,
262 EventNotifierHandler *io_read,
263 AioPollFn *io_poll)
264 {
265 aio_set_fd_handler(ctx, event_notifier_get_fd(notifier), is_external,
266 (IOHandler *)io_read, NULL, io_poll, notifier);
267 }
268
269 bool aio_prepare(AioContext *ctx)
270 {
271 return false;
272 }
273
274 bool aio_pending(AioContext *ctx)
275 {
276 AioHandler *node;
277
278 QLIST_FOREACH(node, &ctx->aio_handlers, node) {
279 int revents;
280
281 revents = node->pfd.revents & node->pfd.events;
282 if (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR) && node->io_read &&
283 aio_node_check(ctx, node->is_external)) {
284 return true;
285 }
286 if (revents & (G_IO_OUT | G_IO_ERR) && node->io_write &&
287 aio_node_check(ctx, node->is_external)) {
288 return true;
289 }
290 }
291
292 return false;
293 }
294
295 /*
296 * Note that dispatch_fds == false has the side-effect of post-poning the
297 * freeing of deleted handlers.
298 */
299 bool aio_dispatch(AioContext *ctx, bool dispatch_fds)
300 {
301 AioHandler *node = NULL;
302 bool progress = false;
303
304 /*
305 * If there are callbacks left that have been queued, we need to call them.
306 * Do not call select in this case, because it is possible that the caller
307 * does not need a complete flush (as is the case for aio_poll loops).
308 */
309 if (aio_bh_poll(ctx)) {
310 progress = true;
311 }
312
313 /*
314 * We have to walk very carefully in case aio_set_fd_handler is
315 * called while we're walking.
316 */
317 if (dispatch_fds) {
318 node = QLIST_FIRST(&ctx->aio_handlers);
319 }
320 while (node) {
321 AioHandler *tmp;
322 int revents;
323
324 ctx->walking_handlers++;
325
326 revents = node->pfd.revents & node->pfd.events;
327 node->pfd.revents = 0;
328
329 if (!node->deleted &&
330 (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR)) &&
331 aio_node_check(ctx, node->is_external) &&
332 node->io_read) {
333 node->io_read(node->opaque);
334
335 /* aio_notify() does not count as progress */
336 if (node->opaque != &ctx->notifier) {
337 progress = true;
338 }
339 }
340 if (!node->deleted &&
341 (revents & (G_IO_OUT | G_IO_ERR)) &&
342 aio_node_check(ctx, node->is_external) &&
343 node->io_write) {
344 node->io_write(node->opaque);
345 progress = true;
346 }
347
348 tmp = node;
349 node = QLIST_NEXT(node, node);
350
351 ctx->walking_handlers--;
352
353 if (!ctx->walking_handlers && tmp->deleted) {
354 QLIST_REMOVE(tmp, node);
355 g_free(tmp);
356 }
357 }
358
359 /* Run our timers */
360 progress |= timerlistgroup_run_timers(&ctx->tlg);
361
362 return progress;
363 }
364
365 /* These thread-local variables are used only in a small part of aio_poll
366 * around the call to the poll() system call. In particular they are not
367 * used while aio_poll is performing callbacks, which makes it much easier
368 * to think about reentrancy!
369 *
370 * Stack-allocated arrays would be perfect but they have size limitations;
371 * heap allocation is expensive enough that we want to reuse arrays across
372 * calls to aio_poll(). And because poll() has to be called without holding
373 * any lock, the arrays cannot be stored in AioContext. Thread-local data
374 * has none of the disadvantages of these three options.
375 */
376 static __thread GPollFD *pollfds;
377 static __thread AioHandler **nodes;
378 static __thread unsigned npfd, nalloc;
379 static __thread Notifier pollfds_cleanup_notifier;
380
381 static void pollfds_cleanup(Notifier *n, void *unused)
382 {
383 g_assert(npfd == 0);
384 g_free(pollfds);
385 g_free(nodes);
386 nalloc = 0;
387 }
388
389 static void add_pollfd(AioHandler *node)
390 {
391 if (npfd == nalloc) {
392 if (nalloc == 0) {
393 pollfds_cleanup_notifier.notify = pollfds_cleanup;
394 qemu_thread_atexit_add(&pollfds_cleanup_notifier);
395 nalloc = 8;
396 } else {
397 g_assert(nalloc <= INT_MAX);
398 nalloc *= 2;
399 }
400 pollfds = g_renew(GPollFD, pollfds, nalloc);
401 nodes = g_renew(AioHandler *, nodes, nalloc);
402 }
403 nodes[npfd] = node;
404 pollfds[npfd] = (GPollFD) {
405 .fd = node->pfd.fd,
406 .events = node->pfd.events,
407 };
408 npfd++;
409 }
410
411 bool aio_poll(AioContext *ctx, bool blocking)
412 {
413 AioHandler *node;
414 int i, ret;
415 bool progress;
416 int64_t timeout;
417
418 aio_context_acquire(ctx);
419 progress = false;
420
421 /* aio_notify can avoid the expensive event_notifier_set if
422 * everything (file descriptors, bottom halves, timers) will
423 * be re-evaluated before the next blocking poll(). This is
424 * already true when aio_poll is called with blocking == false;
425 * if blocking == true, it is only true after poll() returns,
426 * so disable the optimization now.
427 */
428 if (blocking) {
429 atomic_add(&ctx->notify_me, 2);
430 }
431
432 ctx->walking_handlers++;
433
434 assert(npfd == 0);
435
436 /* fill pollfds */
437
438 if (!aio_epoll_enabled(ctx)) {
439 QLIST_FOREACH(node, &ctx->aio_handlers, node) {
440 if (!node->deleted && node->pfd.events
441 && aio_node_check(ctx, node->is_external)) {
442 add_pollfd(node);
443 }
444 }
445 }
446
447 timeout = blocking ? aio_compute_timeout(ctx) : 0;
448
449 /* wait until next event */
450 if (timeout) {
451 aio_context_release(ctx);
452 }
453 if (aio_epoll_check_poll(ctx, pollfds, npfd, timeout)) {
454 AioHandler epoll_handler;
455
456 epoll_handler.pfd.fd = ctx->epollfd;
457 epoll_handler.pfd.events = G_IO_IN | G_IO_OUT | G_IO_HUP | G_IO_ERR;
458 npfd = 0;
459 add_pollfd(&epoll_handler);
460 ret = aio_epoll(ctx, pollfds, npfd, timeout);
461 } else {
462 ret = qemu_poll_ns(pollfds, npfd, timeout);
463 }
464 if (blocking) {
465 atomic_sub(&ctx->notify_me, 2);
466 }
467 if (timeout) {
468 aio_context_acquire(ctx);
469 }
470
471 aio_notify_accept(ctx);
472
473 /* if we have any readable fds, dispatch event */
474 if (ret > 0) {
475 for (i = 0; i < npfd; i++) {
476 nodes[i]->pfd.revents = pollfds[i].revents;
477 }
478 }
479
480 npfd = 0;
481 ctx->walking_handlers--;
482
483 /* Run dispatch even if there were no readable fds to run timers */
484 if (aio_dispatch(ctx, ret > 0)) {
485 progress = true;
486 }
487
488 aio_context_release(ctx);
489
490 return progress;
491 }
492
493 void aio_context_setup(AioContext *ctx)
494 {
495 #ifdef CONFIG_EPOLL_CREATE1
496 assert(!ctx->epollfd);
497 ctx->epollfd = epoll_create1(EPOLL_CLOEXEC);
498 if (ctx->epollfd == -1) {
499 fprintf(stderr, "Failed to create epoll instance: %s", strerror(errno));
500 ctx->epoll_available = false;
501 } else {
502 ctx->epoll_available = true;
503 }
504 #endif
505 }