mips: Ensure PC update with MTC0 single-stepping
[qemu.git] / util / qemu-thread-posix.c
1 /*
2 * Wrappers around mutex/cond/thread functions
3 *
4 * Copyright Red Hat, Inc. 2009
5 *
6 * Author:
7 * Marcelo Tosatti <mtosatti@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
11 *
12 */
13 #include <stdlib.h>
14 #include <stdio.h>
15 #include <errno.h>
16 #include <time.h>
17 #include <signal.h>
18 #include <stdint.h>
19 #include <string.h>
20 #include <limits.h>
21 #include <unistd.h>
22 #include <sys/time.h>
23 #ifdef __linux__
24 #include <sys/syscall.h>
25 #include <linux/futex.h>
26 #endif
27 #include "qemu/thread.h"
28 #include "qemu/atomic.h"
29
30 static bool name_threads;
31
32 void qemu_thread_naming(bool enable)
33 {
34 name_threads = enable;
35
36 #ifndef CONFIG_THREAD_SETNAME_BYTHREAD
37 /* This is a debugging option, not fatal */
38 if (enable) {
39 fprintf(stderr, "qemu: thread naming not supported on this host\n");
40 }
41 #endif
42 }
43
44 static void error_exit(int err, const char *msg)
45 {
46 fprintf(stderr, "qemu: %s: %s\n", msg, strerror(err));
47 abort();
48 }
49
50 void qemu_mutex_init(QemuMutex *mutex)
51 {
52 int err;
53 pthread_mutexattr_t mutexattr;
54
55 pthread_mutexattr_init(&mutexattr);
56 pthread_mutexattr_settype(&mutexattr, PTHREAD_MUTEX_ERRORCHECK);
57 err = pthread_mutex_init(&mutex->lock, &mutexattr);
58 pthread_mutexattr_destroy(&mutexattr);
59 if (err)
60 error_exit(err, __func__);
61 }
62
63 void qemu_mutex_destroy(QemuMutex *mutex)
64 {
65 int err;
66
67 err = pthread_mutex_destroy(&mutex->lock);
68 if (err)
69 error_exit(err, __func__);
70 }
71
72 void qemu_mutex_lock(QemuMutex *mutex)
73 {
74 int err;
75
76 err = pthread_mutex_lock(&mutex->lock);
77 if (err)
78 error_exit(err, __func__);
79 }
80
81 int qemu_mutex_trylock(QemuMutex *mutex)
82 {
83 return pthread_mutex_trylock(&mutex->lock);
84 }
85
86 void qemu_mutex_unlock(QemuMutex *mutex)
87 {
88 int err;
89
90 err = pthread_mutex_unlock(&mutex->lock);
91 if (err)
92 error_exit(err, __func__);
93 }
94
95 void qemu_cond_init(QemuCond *cond)
96 {
97 int err;
98
99 err = pthread_cond_init(&cond->cond, NULL);
100 if (err)
101 error_exit(err, __func__);
102 }
103
104 void qemu_cond_destroy(QemuCond *cond)
105 {
106 int err;
107
108 err = pthread_cond_destroy(&cond->cond);
109 if (err)
110 error_exit(err, __func__);
111 }
112
113 void qemu_cond_signal(QemuCond *cond)
114 {
115 int err;
116
117 err = pthread_cond_signal(&cond->cond);
118 if (err)
119 error_exit(err, __func__);
120 }
121
122 void qemu_cond_broadcast(QemuCond *cond)
123 {
124 int err;
125
126 err = pthread_cond_broadcast(&cond->cond);
127 if (err)
128 error_exit(err, __func__);
129 }
130
131 void qemu_cond_wait(QemuCond *cond, QemuMutex *mutex)
132 {
133 int err;
134
135 err = pthread_cond_wait(&cond->cond, &mutex->lock);
136 if (err)
137 error_exit(err, __func__);
138 }
139
140 void qemu_sem_init(QemuSemaphore *sem, int init)
141 {
142 int rc;
143
144 #if defined(__APPLE__) || defined(__NetBSD__)
145 rc = pthread_mutex_init(&sem->lock, NULL);
146 if (rc != 0) {
147 error_exit(rc, __func__);
148 }
149 rc = pthread_cond_init(&sem->cond, NULL);
150 if (rc != 0) {
151 error_exit(rc, __func__);
152 }
153 if (init < 0) {
154 error_exit(EINVAL, __func__);
155 }
156 sem->count = init;
157 #else
158 rc = sem_init(&sem->sem, 0, init);
159 if (rc < 0) {
160 error_exit(errno, __func__);
161 }
162 #endif
163 }
164
165 void qemu_sem_destroy(QemuSemaphore *sem)
166 {
167 int rc;
168
169 #if defined(__APPLE__) || defined(__NetBSD__)
170 rc = pthread_cond_destroy(&sem->cond);
171 if (rc < 0) {
172 error_exit(rc, __func__);
173 }
174 rc = pthread_mutex_destroy(&sem->lock);
175 if (rc < 0) {
176 error_exit(rc, __func__);
177 }
178 #else
179 rc = sem_destroy(&sem->sem);
180 if (rc < 0) {
181 error_exit(errno, __func__);
182 }
183 #endif
184 }
185
186 void qemu_sem_post(QemuSemaphore *sem)
187 {
188 int rc;
189
190 #if defined(__APPLE__) || defined(__NetBSD__)
191 pthread_mutex_lock(&sem->lock);
192 if (sem->count == UINT_MAX) {
193 rc = EINVAL;
194 } else {
195 sem->count++;
196 rc = pthread_cond_signal(&sem->cond);
197 }
198 pthread_mutex_unlock(&sem->lock);
199 if (rc != 0) {
200 error_exit(rc, __func__);
201 }
202 #else
203 rc = sem_post(&sem->sem);
204 if (rc < 0) {
205 error_exit(errno, __func__);
206 }
207 #endif
208 }
209
210 static void compute_abs_deadline(struct timespec *ts, int ms)
211 {
212 struct timeval tv;
213 gettimeofday(&tv, NULL);
214 ts->tv_nsec = tv.tv_usec * 1000 + (ms % 1000) * 1000000;
215 ts->tv_sec = tv.tv_sec + ms / 1000;
216 if (ts->tv_nsec >= 1000000000) {
217 ts->tv_sec++;
218 ts->tv_nsec -= 1000000000;
219 }
220 }
221
222 int qemu_sem_timedwait(QemuSemaphore *sem, int ms)
223 {
224 int rc;
225 struct timespec ts;
226
227 #if defined(__APPLE__) || defined(__NetBSD__)
228 rc = 0;
229 compute_abs_deadline(&ts, ms);
230 pthread_mutex_lock(&sem->lock);
231 while (sem->count == 0) {
232 rc = pthread_cond_timedwait(&sem->cond, &sem->lock, &ts);
233 if (rc == ETIMEDOUT) {
234 break;
235 }
236 if (rc != 0) {
237 error_exit(rc, __func__);
238 }
239 }
240 if (rc != ETIMEDOUT) {
241 --sem->count;
242 }
243 pthread_mutex_unlock(&sem->lock);
244 return (rc == ETIMEDOUT ? -1 : 0);
245 #else
246 if (ms <= 0) {
247 /* This is cheaper than sem_timedwait. */
248 do {
249 rc = sem_trywait(&sem->sem);
250 } while (rc == -1 && errno == EINTR);
251 if (rc == -1 && errno == EAGAIN) {
252 return -1;
253 }
254 } else {
255 compute_abs_deadline(&ts, ms);
256 do {
257 rc = sem_timedwait(&sem->sem, &ts);
258 } while (rc == -1 && errno == EINTR);
259 if (rc == -1 && errno == ETIMEDOUT) {
260 return -1;
261 }
262 }
263 if (rc < 0) {
264 error_exit(errno, __func__);
265 }
266 return 0;
267 #endif
268 }
269
270 void qemu_sem_wait(QemuSemaphore *sem)
271 {
272 int rc;
273
274 #if defined(__APPLE__) || defined(__NetBSD__)
275 pthread_mutex_lock(&sem->lock);
276 while (sem->count == 0) {
277 rc = pthread_cond_wait(&sem->cond, &sem->lock);
278 if (rc != 0) {
279 error_exit(rc, __func__);
280 }
281 }
282 --sem->count;
283 pthread_mutex_unlock(&sem->lock);
284 #else
285 do {
286 rc = sem_wait(&sem->sem);
287 } while (rc == -1 && errno == EINTR);
288 if (rc < 0) {
289 error_exit(errno, __func__);
290 }
291 #endif
292 }
293
294 #ifdef __linux__
295 #define futex(...) syscall(__NR_futex, __VA_ARGS__)
296
297 static inline void futex_wake(QemuEvent *ev, int n)
298 {
299 futex(ev, FUTEX_WAKE, n, NULL, NULL, 0);
300 }
301
302 static inline void futex_wait(QemuEvent *ev, unsigned val)
303 {
304 futex(ev, FUTEX_WAIT, (int) val, NULL, NULL, 0);
305 }
306 #else
307 static inline void futex_wake(QemuEvent *ev, int n)
308 {
309 if (n == 1) {
310 pthread_cond_signal(&ev->cond);
311 } else {
312 pthread_cond_broadcast(&ev->cond);
313 }
314 }
315
316 static inline void futex_wait(QemuEvent *ev, unsigned val)
317 {
318 pthread_mutex_lock(&ev->lock);
319 if (ev->value == val) {
320 pthread_cond_wait(&ev->cond, &ev->lock);
321 }
322 pthread_mutex_unlock(&ev->lock);
323 }
324 #endif
325
326 /* Valid transitions:
327 * - free->set, when setting the event
328 * - busy->set, when setting the event, followed by futex_wake
329 * - set->free, when resetting the event
330 * - free->busy, when waiting
331 *
332 * set->busy does not happen (it can be observed from the outside but
333 * it really is set->free->busy).
334 *
335 * busy->free provably cannot happen; to enforce it, the set->free transition
336 * is done with an OR, which becomes a no-op if the event has concurrently
337 * transitioned to free or busy.
338 */
339
340 #define EV_SET 0
341 #define EV_FREE 1
342 #define EV_BUSY -1
343
344 void qemu_event_init(QemuEvent *ev, bool init)
345 {
346 #ifndef __linux__
347 pthread_mutex_init(&ev->lock, NULL);
348 pthread_cond_init(&ev->cond, NULL);
349 #endif
350
351 ev->value = (init ? EV_SET : EV_FREE);
352 }
353
354 void qemu_event_destroy(QemuEvent *ev)
355 {
356 #ifndef __linux__
357 pthread_mutex_destroy(&ev->lock);
358 pthread_cond_destroy(&ev->cond);
359 #endif
360 }
361
362 void qemu_event_set(QemuEvent *ev)
363 {
364 if (atomic_mb_read(&ev->value) != EV_SET) {
365 if (atomic_xchg(&ev->value, EV_SET) == EV_BUSY) {
366 /* There were waiters, wake them up. */
367 futex_wake(ev, INT_MAX);
368 }
369 }
370 }
371
372 void qemu_event_reset(QemuEvent *ev)
373 {
374 if (atomic_mb_read(&ev->value) == EV_SET) {
375 /*
376 * If there was a concurrent reset (or even reset+wait),
377 * do nothing. Otherwise change EV_SET->EV_FREE.
378 */
379 atomic_or(&ev->value, EV_FREE);
380 }
381 }
382
383 void qemu_event_wait(QemuEvent *ev)
384 {
385 unsigned value;
386
387 value = atomic_mb_read(&ev->value);
388 if (value != EV_SET) {
389 if (value == EV_FREE) {
390 /*
391 * Leave the event reset and tell qemu_event_set that there
392 * are waiters. No need to retry, because there cannot be
393 * a concurent busy->free transition. After the CAS, the
394 * event will be either set or busy.
395 */
396 if (atomic_cmpxchg(&ev->value, EV_FREE, EV_BUSY) == EV_SET) {
397 return;
398 }
399 }
400 futex_wait(ev, EV_BUSY);
401 }
402 }
403
404 /* Attempt to set the threads name; note that this is for debug, so
405 * we're not going to fail if we can't set it.
406 */
407 static void qemu_thread_set_name(QemuThread *thread, const char *name)
408 {
409 #ifdef CONFIG_PTHREAD_SETNAME_NP
410 pthread_setname_np(thread->thread, name);
411 #endif
412 }
413
414 void qemu_thread_create(QemuThread *thread, const char *name,
415 void *(*start_routine)(void*),
416 void *arg, int mode)
417 {
418 sigset_t set, oldset;
419 int err;
420 pthread_attr_t attr;
421
422 err = pthread_attr_init(&attr);
423 if (err) {
424 error_exit(err, __func__);
425 }
426 if (mode == QEMU_THREAD_DETACHED) {
427 err = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
428 if (err) {
429 error_exit(err, __func__);
430 }
431 }
432
433 /* Leave signal handling to the iothread. */
434 sigfillset(&set);
435 pthread_sigmask(SIG_SETMASK, &set, &oldset);
436 err = pthread_create(&thread->thread, &attr, start_routine, arg);
437 if (err)
438 error_exit(err, __func__);
439
440 if (name_threads) {
441 qemu_thread_set_name(thread, name);
442 }
443
444 pthread_sigmask(SIG_SETMASK, &oldset, NULL);
445
446 pthread_attr_destroy(&attr);
447 }
448
449 void qemu_thread_get_self(QemuThread *thread)
450 {
451 thread->thread = pthread_self();
452 }
453
454 bool qemu_thread_is_self(QemuThread *thread)
455 {
456 return pthread_equal(pthread_self(), thread->thread);
457 }
458
459 void qemu_thread_exit(void *retval)
460 {
461 pthread_exit(retval);
462 }
463
464 void *qemu_thread_join(QemuThread *thread)
465 {
466 int err;
467 void *ret;
468
469 err = pthread_join(thread->thread, &ret);
470 if (err) {
471 error_exit(err, __func__);
472 }
473 return ret;
474 }