Merge tag 'for-upstream' of git://repo.or.cz/qemu/kevin into staging
[qemu.git] / util / qemu-coroutine-lock.c
1 /*
2 * coroutine queues and locks
3 *
4 * Copyright (c) 2011 Kevin Wolf <kwolf@redhat.com>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 *
24 * The lock-free mutex implementation is based on OSv
25 * (core/lfmutex.cc, include/lockfree/mutex.hh).
26 * Copyright (C) 2013 Cloudius Systems, Ltd.
27 */
28
29 #include "qemu/osdep.h"
30 #include "qemu/coroutine.h"
31 #include "qemu/coroutine_int.h"
32 #include "qemu/processor.h"
33 #include "qemu/queue.h"
34 #include "block/aio.h"
35 #include "trace.h"
36
37 void qemu_co_queue_init(CoQueue *queue)
38 {
39 QSIMPLEQ_INIT(&queue->entries);
40 }
41
42 void coroutine_fn qemu_co_queue_wait_impl(CoQueue *queue, QemuLockable *lock)
43 {
44 Coroutine *self = qemu_coroutine_self();
45 QSIMPLEQ_INSERT_TAIL(&queue->entries, self, co_queue_next);
46
47 if (lock) {
48 qemu_lockable_unlock(lock);
49 }
50
51 /* There is no race condition here. Other threads will call
52 * aio_co_schedule on our AioContext, which can reenter this
53 * coroutine but only after this yield and after the main loop
54 * has gone through the next iteration.
55 */
56 qemu_coroutine_yield();
57 assert(qemu_in_coroutine());
58
59 /* TODO: OSv implements wait morphing here, where the wakeup
60 * primitive automatically places the woken coroutine on the
61 * mutex's queue. This avoids the thundering herd effect.
62 * This could be implemented for CoMutexes, but not really for
63 * other cases of QemuLockable.
64 */
65 if (lock) {
66 qemu_lockable_lock(lock);
67 }
68 }
69
70 bool qemu_co_enter_next_impl(CoQueue *queue, QemuLockable *lock)
71 {
72 Coroutine *next;
73
74 next = QSIMPLEQ_FIRST(&queue->entries);
75 if (!next) {
76 return false;
77 }
78
79 QSIMPLEQ_REMOVE_HEAD(&queue->entries, co_queue_next);
80 if (lock) {
81 qemu_lockable_unlock(lock);
82 }
83 aio_co_wake(next);
84 if (lock) {
85 qemu_lockable_lock(lock);
86 }
87 return true;
88 }
89
90 bool coroutine_fn qemu_co_queue_next(CoQueue *queue)
91 {
92 /* No unlock/lock needed in coroutine context. */
93 return qemu_co_enter_next_impl(queue, NULL);
94 }
95
96 void qemu_co_enter_all_impl(CoQueue *queue, QemuLockable *lock)
97 {
98 while (qemu_co_enter_next_impl(queue, lock)) {
99 /* just loop */
100 }
101 }
102
103 void coroutine_fn qemu_co_queue_restart_all(CoQueue *queue)
104 {
105 /* No unlock/lock needed in coroutine context. */
106 qemu_co_enter_all_impl(queue, NULL);
107 }
108
109 bool qemu_co_queue_empty(CoQueue *queue)
110 {
111 return QSIMPLEQ_FIRST(&queue->entries) == NULL;
112 }
113
114 /* The wait records are handled with a multiple-producer, single-consumer
115 * lock-free queue. There cannot be two concurrent pop_waiter() calls
116 * because pop_waiter() can only be called while mutex->handoff is zero.
117 * This can happen in three cases:
118 * - in qemu_co_mutex_unlock, before the hand-off protocol has started.
119 * In this case, qemu_co_mutex_lock will see mutex->handoff == 0 and
120 * not take part in the handoff.
121 * - in qemu_co_mutex_lock, if it steals the hand-off responsibility from
122 * qemu_co_mutex_unlock. In this case, qemu_co_mutex_unlock will fail
123 * the cmpxchg (it will see either 0 or the next sequence value) and
124 * exit. The next hand-off cannot begin until qemu_co_mutex_lock has
125 * woken up someone.
126 * - in qemu_co_mutex_unlock, if it takes the hand-off token itself.
127 * In this case another iteration starts with mutex->handoff == 0;
128 * a concurrent qemu_co_mutex_lock will fail the cmpxchg, and
129 * qemu_co_mutex_unlock will go back to case (1).
130 *
131 * The following functions manage this queue.
132 */
133 typedef struct CoWaitRecord {
134 Coroutine *co;
135 QSLIST_ENTRY(CoWaitRecord) next;
136 } CoWaitRecord;
137
138 static void push_waiter(CoMutex *mutex, CoWaitRecord *w)
139 {
140 w->co = qemu_coroutine_self();
141 QSLIST_INSERT_HEAD_ATOMIC(&mutex->from_push, w, next);
142 }
143
144 static void move_waiters(CoMutex *mutex)
145 {
146 QSLIST_HEAD(, CoWaitRecord) reversed;
147 QSLIST_MOVE_ATOMIC(&reversed, &mutex->from_push);
148 while (!QSLIST_EMPTY(&reversed)) {
149 CoWaitRecord *w = QSLIST_FIRST(&reversed);
150 QSLIST_REMOVE_HEAD(&reversed, next);
151 QSLIST_INSERT_HEAD(&mutex->to_pop, w, next);
152 }
153 }
154
155 static CoWaitRecord *pop_waiter(CoMutex *mutex)
156 {
157 CoWaitRecord *w;
158
159 if (QSLIST_EMPTY(&mutex->to_pop)) {
160 move_waiters(mutex);
161 if (QSLIST_EMPTY(&mutex->to_pop)) {
162 return NULL;
163 }
164 }
165 w = QSLIST_FIRST(&mutex->to_pop);
166 QSLIST_REMOVE_HEAD(&mutex->to_pop, next);
167 return w;
168 }
169
170 static bool has_waiters(CoMutex *mutex)
171 {
172 return QSLIST_EMPTY(&mutex->to_pop) || QSLIST_EMPTY(&mutex->from_push);
173 }
174
175 void qemu_co_mutex_init(CoMutex *mutex)
176 {
177 memset(mutex, 0, sizeof(*mutex));
178 }
179
180 static void coroutine_fn qemu_co_mutex_wake(CoMutex *mutex, Coroutine *co)
181 {
182 /* Read co before co->ctx; pairs with smp_wmb() in
183 * qemu_coroutine_enter().
184 */
185 smp_read_barrier_depends();
186 mutex->ctx = co->ctx;
187 aio_co_wake(co);
188 }
189
190 static void coroutine_fn qemu_co_mutex_lock_slowpath(AioContext *ctx,
191 CoMutex *mutex)
192 {
193 Coroutine *self = qemu_coroutine_self();
194 CoWaitRecord w;
195 unsigned old_handoff;
196
197 trace_qemu_co_mutex_lock_entry(mutex, self);
198 push_waiter(mutex, &w);
199
200 /* This is the "Responsibility Hand-Off" protocol; a lock() picks from
201 * a concurrent unlock() the responsibility of waking somebody up.
202 */
203 old_handoff = qatomic_mb_read(&mutex->handoff);
204 if (old_handoff &&
205 has_waiters(mutex) &&
206 qatomic_cmpxchg(&mutex->handoff, old_handoff, 0) == old_handoff) {
207 /* There can be no concurrent pops, because there can be only
208 * one active handoff at a time.
209 */
210 CoWaitRecord *to_wake = pop_waiter(mutex);
211 Coroutine *co = to_wake->co;
212 if (co == self) {
213 /* We got the lock ourselves! */
214 assert(to_wake == &w);
215 mutex->ctx = ctx;
216 return;
217 }
218
219 qemu_co_mutex_wake(mutex, co);
220 }
221
222 qemu_coroutine_yield();
223 trace_qemu_co_mutex_lock_return(mutex, self);
224 }
225
226 void coroutine_fn qemu_co_mutex_lock(CoMutex *mutex)
227 {
228 AioContext *ctx = qemu_get_current_aio_context();
229 Coroutine *self = qemu_coroutine_self();
230 int waiters, i;
231
232 /* Running a very small critical section on pthread_mutex_t and CoMutex
233 * shows that pthread_mutex_t is much faster because it doesn't actually
234 * go to sleep. What happens is that the critical section is shorter
235 * than the latency of entering the kernel and thus FUTEX_WAIT always
236 * fails. With CoMutex there is no such latency but you still want to
237 * avoid wait and wakeup. So introduce it artificially.
238 */
239 i = 0;
240 retry_fast_path:
241 waiters = qatomic_cmpxchg(&mutex->locked, 0, 1);
242 if (waiters != 0) {
243 while (waiters == 1 && ++i < 1000) {
244 if (qatomic_read(&mutex->ctx) == ctx) {
245 break;
246 }
247 if (qatomic_read(&mutex->locked) == 0) {
248 goto retry_fast_path;
249 }
250 cpu_relax();
251 }
252 waiters = qatomic_fetch_inc(&mutex->locked);
253 }
254
255 if (waiters == 0) {
256 /* Uncontended. */
257 trace_qemu_co_mutex_lock_uncontended(mutex, self);
258 mutex->ctx = ctx;
259 } else {
260 qemu_co_mutex_lock_slowpath(ctx, mutex);
261 }
262 mutex->holder = self;
263 self->locks_held++;
264 }
265
266 void coroutine_fn qemu_co_mutex_unlock(CoMutex *mutex)
267 {
268 Coroutine *self = qemu_coroutine_self();
269
270 trace_qemu_co_mutex_unlock_entry(mutex, self);
271
272 assert(mutex->locked);
273 assert(mutex->holder == self);
274 assert(qemu_in_coroutine());
275
276 mutex->ctx = NULL;
277 mutex->holder = NULL;
278 self->locks_held--;
279 if (qatomic_fetch_dec(&mutex->locked) == 1) {
280 /* No waiting qemu_co_mutex_lock(). Pfew, that was easy! */
281 return;
282 }
283
284 for (;;) {
285 CoWaitRecord *to_wake = pop_waiter(mutex);
286 unsigned our_handoff;
287
288 if (to_wake) {
289 qemu_co_mutex_wake(mutex, to_wake->co);
290 break;
291 }
292
293 /* Some concurrent lock() is in progress (we know this because
294 * mutex->locked was >1) but it hasn't yet put itself on the wait
295 * queue. Pick a sequence number for the handoff protocol (not 0).
296 */
297 if (++mutex->sequence == 0) {
298 mutex->sequence = 1;
299 }
300
301 our_handoff = mutex->sequence;
302 qatomic_mb_set(&mutex->handoff, our_handoff);
303 if (!has_waiters(mutex)) {
304 /* The concurrent lock has not added itself yet, so it
305 * will be able to pick our handoff.
306 */
307 break;
308 }
309
310 /* Try to do the handoff protocol ourselves; if somebody else has
311 * already taken it, however, we're done and they're responsible.
312 */
313 if (qatomic_cmpxchg(&mutex->handoff, our_handoff, 0) != our_handoff) {
314 break;
315 }
316 }
317
318 trace_qemu_co_mutex_unlock_return(mutex, self);
319 }
320
321 struct CoRwTicket {
322 bool read;
323 Coroutine *co;
324 QSIMPLEQ_ENTRY(CoRwTicket) next;
325 };
326
327 void qemu_co_rwlock_init(CoRwlock *lock)
328 {
329 qemu_co_mutex_init(&lock->mutex);
330 lock->owners = 0;
331 QSIMPLEQ_INIT(&lock->tickets);
332 }
333
334 /* Releases the internal CoMutex. */
335 static void qemu_co_rwlock_maybe_wake_one(CoRwlock *lock)
336 {
337 CoRwTicket *tkt = QSIMPLEQ_FIRST(&lock->tickets);
338 Coroutine *co = NULL;
339
340 /*
341 * Setting lock->owners here prevents rdlock and wrlock from
342 * sneaking in between unlock and wake.
343 */
344
345 if (tkt) {
346 if (tkt->read) {
347 if (lock->owners >= 0) {
348 lock->owners++;
349 co = tkt->co;
350 }
351 } else {
352 if (lock->owners == 0) {
353 lock->owners = -1;
354 co = tkt->co;
355 }
356 }
357 }
358
359 if (co) {
360 QSIMPLEQ_REMOVE_HEAD(&lock->tickets, next);
361 qemu_co_mutex_unlock(&lock->mutex);
362 aio_co_wake(co);
363 } else {
364 qemu_co_mutex_unlock(&lock->mutex);
365 }
366 }
367
368 void qemu_co_rwlock_rdlock(CoRwlock *lock)
369 {
370 Coroutine *self = qemu_coroutine_self();
371
372 qemu_co_mutex_lock(&lock->mutex);
373 /* For fairness, wait if a writer is in line. */
374 if (lock->owners == 0 || (lock->owners > 0 && QSIMPLEQ_EMPTY(&lock->tickets))) {
375 lock->owners++;
376 qemu_co_mutex_unlock(&lock->mutex);
377 } else {
378 CoRwTicket my_ticket = { true, self };
379
380 QSIMPLEQ_INSERT_TAIL(&lock->tickets, &my_ticket, next);
381 qemu_co_mutex_unlock(&lock->mutex);
382 qemu_coroutine_yield();
383 assert(lock->owners >= 1);
384
385 /* Possibly wake another reader, which will wake the next in line. */
386 qemu_co_mutex_lock(&lock->mutex);
387 qemu_co_rwlock_maybe_wake_one(lock);
388 }
389
390 self->locks_held++;
391 }
392
393 void qemu_co_rwlock_unlock(CoRwlock *lock)
394 {
395 Coroutine *self = qemu_coroutine_self();
396
397 assert(qemu_in_coroutine());
398 self->locks_held--;
399
400 qemu_co_mutex_lock(&lock->mutex);
401 if (lock->owners > 0) {
402 lock->owners--;
403 } else {
404 assert(lock->owners == -1);
405 lock->owners = 0;
406 }
407
408 qemu_co_rwlock_maybe_wake_one(lock);
409 }
410
411 void qemu_co_rwlock_downgrade(CoRwlock *lock)
412 {
413 qemu_co_mutex_lock(&lock->mutex);
414 assert(lock->owners == -1);
415 lock->owners = 1;
416
417 /* Possibly wake another reader, which will wake the next in line. */
418 qemu_co_rwlock_maybe_wake_one(lock);
419 }
420
421 void qemu_co_rwlock_wrlock(CoRwlock *lock)
422 {
423 Coroutine *self = qemu_coroutine_self();
424
425 qemu_co_mutex_lock(&lock->mutex);
426 if (lock->owners == 0) {
427 lock->owners = -1;
428 qemu_co_mutex_unlock(&lock->mutex);
429 } else {
430 CoRwTicket my_ticket = { false, qemu_coroutine_self() };
431
432 QSIMPLEQ_INSERT_TAIL(&lock->tickets, &my_ticket, next);
433 qemu_co_mutex_unlock(&lock->mutex);
434 qemu_coroutine_yield();
435 assert(lock->owners == -1);
436 }
437
438 self->locks_held++;
439 }
440
441 void qemu_co_rwlock_upgrade(CoRwlock *lock)
442 {
443 qemu_co_mutex_lock(&lock->mutex);
444 assert(lock->owners > 0);
445 /* For fairness, wait if a writer is in line. */
446 if (lock->owners == 1 && QSIMPLEQ_EMPTY(&lock->tickets)) {
447 lock->owners = -1;
448 qemu_co_mutex_unlock(&lock->mutex);
449 } else {
450 CoRwTicket my_ticket = { false, qemu_coroutine_self() };
451
452 lock->owners--;
453 QSIMPLEQ_INSERT_TAIL(&lock->tickets, &my_ticket, next);
454 qemu_co_rwlock_maybe_wake_one(lock);
455 qemu_coroutine_yield();
456 assert(lock->owners == -1);
457 }
458 }