3 * \brief Thread synchronisation primitives.
7 * Copyright (c) 2007, 2008, 2009, 2010, 2012, ETH Zurich.
10 * This file is distributed under the terms in the attached LICENSE file.
11 * If you do not find this file, copies can be found by writing to:
12 * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
15 #include <barrelfish/barrelfish.h>
16 #include <barrelfish/dispatch.h>
17 #include <barrelfish/dispatcher_arch.h>
18 #include <trace/trace.h>
19 #include <trace_definitions/trace_defs.h>
20 #include "threads_priv.h"
23 #define trace_event(a,b,c) ((void)0)
28 * \brief Initialise a condition variable
30 * \param cond Condition variable pointer
32 void thread_cond_init(struct thread_cond *cond)
39 * \brief Wait for a condition variable
41 * This function waits for the given condition variable to be signalled
42 * (through thread_cond_signal() or thread_cond_broadcast()) before
43 * returning, while atomically unlocking the mutex pointed to by
46 * \param cond Condition variable pointer
47 * \param mutex Optional pointer to mutex to unlock.
49 void thread_cond_wait(struct thread_cond *cond, struct thread_mutex *mutex)
51 dispatcher_handle_t disp = disp_disable();
53 trace_event(TRACE_SUBSYS_THREADS, TRACE_EVENT_THREADS_COND_WAIT_ENTER,
56 acquire_spinlock(&cond->lock);
60 struct thread *wakeup = thread_mutex_unlock_disabled(disp, mutex);
63 errval_t err = domain_wakeup_on_disabled(wakeup->disp, wakeup, disp);
64 assert_disabled(err_is_ok(err));
68 // Block on the condition variable and release spinlock
69 thread_block_and_release_spinlock_disabled(disp, &cond->queue, &cond->lock);
71 // Re-acquire the mutex
73 thread_mutex_lock(mutex);
76 trace_event(TRACE_SUBSYS_THREADS, TRACE_EVENT_THREADS_COND_WAIT_LEAVE,
81 * \brief Signal a condition variable
83 * This function signals the condition variable, and wakes up one
84 * thread waiting on it.
86 * \param cond Condition variable pointer
88 void thread_cond_signal(struct thread_cond *cond)
90 struct thread *wakeup = NULL;
91 errval_t err = SYS_ERR_OK;
93 trace_event(TRACE_SUBSYS_THREADS, TRACE_EVENT_THREADS_COND_SIGNAL,
96 // Wakeup one waiting thread
97 dispatcher_handle_t disp = disp_disable();
98 acquire_spinlock(&cond->lock);
99 if (cond->queue != NULL) {
100 wakeup = thread_unblock_one_disabled(disp, &cond->queue, NULL);
102 err = domain_wakeup_on_disabled(wakeup->disp, wakeup, disp);
105 release_spinlock(&cond->lock);
108 if(err_is_fail(err)) {
109 USER_PANIC_ERR(err, "remote wakeup from condition signal");
113 // XXX: Need directed yield to inter-disp thread
119 * \brief Broadcast signal a condition variable
121 * This function signals the condition variable, and wakes up all
122 * threads waiting on it.
124 * \param cond Condition variable pointer
126 void thread_cond_broadcast(struct thread_cond *cond)
128 struct thread *wakeupq = NULL;
129 bool foreignwakeup = false;
131 trace_event(TRACE_SUBSYS_THREADS, TRACE_EVENT_THREADS_COND_BROADCAST,
134 // Wakeup all waiting threads
135 dispatcher_handle_t disp = disp_disable();
136 acquire_spinlock(&cond->lock);
137 wakeupq = thread_unblock_all_disabled(disp, &cond->queue, NULL);
138 release_spinlock(&cond->lock);
141 foreignwakeup = (wakeupq != NULL);
142 // Now, wakeup all on foreign dispatchers
143 while (wakeupq != NULL) {
144 struct thread *wakeup = wakeupq;
145 wakeupq = wakeupq->next;
146 errval_t err = domain_wakeup_on(wakeup->disp, wakeup);
147 if(err_is_fail(err)) {
148 USER_PANIC_ERR(err, "remote wakeup from condition broadcast");
153 // XXX: Need directed yield to inter-disp thread
159 * \brief Initialise a mutex
161 * \param mutex Mutex pointer
163 void thread_mutex_init(struct thread_mutex *mutex)
166 mutex->holder = NULL;
172 * \brief Lock a mutex
174 * This blocks until the given mutex is unlocked, and then atomically locks it.
176 * \param mutex Mutex pointer
178 void thread_mutex_lock(struct thread_mutex *mutex)
180 dispatcher_handle_t handle = disp_disable();
181 struct dispatcher_generic *disp_gen = get_dispatcher_generic(handle);
183 trace_event(TRACE_SUBSYS_THREADS, TRACE_EVENT_THREADS_MUTEX_LOCK_ENTER,
186 acquire_spinlock(&mutex->lock);
187 while (mutex->locked > 0) {
188 thread_block_and_release_spinlock_disabled(handle, &mutex->queue,
190 acquire_spinlock(&mutex->lock);
193 mutex->holder = disp_gen->current;
194 release_spinlock(&mutex->lock);
197 trace_event(TRACE_SUBSYS_THREADS, TRACE_EVENT_THREADS_MUTEX_LOCK_LEAVE,
202 * \brief Lock a mutex
204 * This blocks until the given mutex is unlocked, and then atomically locks it.
206 * \param mutex Mutex pointer
208 void thread_mutex_lock_nested(struct thread_mutex *mutex)
210 dispatcher_handle_t handle = disp_disable();
211 struct dispatcher_generic *disp_gen = get_dispatcher_generic(handle);
213 trace_event(TRACE_SUBSYS_THREADS, TRACE_EVENT_THREADS_MUTEX_LOCK_NESTED_ENTER,
216 acquire_spinlock(&mutex->lock);
217 while (mutex->locked > 0
218 && mutex->holder != disp_gen->current) {
219 thread_block_and_release_spinlock_disabled(handle, &mutex->queue,
221 acquire_spinlock(&mutex->lock);
224 mutex->holder = disp_gen->current;
225 release_spinlock(&mutex->lock);
228 trace_event(TRACE_SUBSYS_THREADS, TRACE_EVENT_THREADS_MUTEX_LOCK_NESTED_LEAVE,
233 * \brief Try to lock a mutex
235 * If the given mutex is unlocked, this atomically locks it and returns true,
236 * otherwise it returns false immediately.
238 * \param mutex Mutex pointer
240 * \returns true if lock acquired, false otherwise
242 bool thread_mutex_trylock(struct thread_mutex *mutex)
244 trace_event(TRACE_SUBSYS_THREADS, TRACE_EVENT_THREADS_MUTEX_TRYLOCK,
247 // Try first to avoid contention
248 if (mutex->locked > 0) {
252 dispatcher_handle_t handle = disp_disable();
253 struct dispatcher_generic *disp_gen = get_dispatcher_generic(handle);
256 acquire_spinlock(&mutex->lock);
257 if (mutex->locked > 0) {
262 mutex->holder = disp_gen->current;
264 release_spinlock(&mutex->lock);
271 * \brief Unlock a mutex, while disabled
273 * This function unlocks the given mutex. It may only be called while disabled.
275 * \param disp Dispatcher pointer
276 * \param mutex Mutex pointer
278 * \return Pointer to thread to be woken on foreign dispatcher
280 struct thread *thread_mutex_unlock_disabled(dispatcher_handle_t handle,
281 struct thread_mutex *mutex)
283 struct thread *ft = NULL;
285 trace_event(TRACE_SUBSYS_THREADS, TRACE_EVENT_THREADS_MUTEX_UNLOCK,
288 acquire_spinlock(&mutex->lock);
289 assert_disabled(mutex->locked > 0);
291 if(mutex->locked == 1) {
292 // Wakeup one waiting thread
293 if (mutex->queue != NULL) {
294 // XXX: This assumes dequeueing is off the top of the queue
295 mutex->holder = mutex->queue;
296 ft = thread_unblock_one_disabled(handle, &mutex->queue, NULL);
298 mutex->holder = NULL;
305 release_spinlock(&mutex->lock);
310 * \brief Unlock a mutex
312 * This unlocks the given mutex.
314 * \param mutex Mutex pointer
316 void thread_mutex_unlock(struct thread_mutex *mutex)
318 dispatcher_handle_t disp = disp_disable();
319 struct thread *wakeup = thread_mutex_unlock_disabled(disp, mutex);
320 errval_t err = SYS_ERR_OK;
322 if (wakeup != NULL) {
323 err = domain_wakeup_on_disabled(wakeup->disp, wakeup, disp);
327 if(err_is_fail(err)) {
328 USER_PANIC_ERR(err, "remote wakeup from mutex unlock");
332 // XXX: Need directed yield to inter-disp thread
337 void thread_sem_init(struct thread_sem *sem, unsigned int value)
346 void thread_sem_wait(struct thread_sem *sem)
350 trace_event(TRACE_SUBSYS_THREADS, TRACE_EVENT_THREADS_SEM_WAIT_ENTER,
353 dispatcher_handle_t disp = disp_disable();
354 acquire_spinlock(&sem->lock);
357 // Not possible to decrement -- wait!
358 thread_block_and_release_spinlock_disabled(disp, &sem->queue, &sem->lock);
360 // Decrement possible
362 release_spinlock(&sem->lock);
366 trace_event(TRACE_SUBSYS_THREADS, TRACE_EVENT_THREADS_SEM_WAIT_LEAVE,
370 bool thread_sem_trywait(struct thread_sem *sem)
375 trace_event(TRACE_SUBSYS_THREADS, TRACE_EVENT_THREADS_SEM_TRYWAIT,
378 dispatcher_handle_t disp = disp_disable();
379 acquire_spinlock(&sem->lock);
381 if(sem->value >= 1) {
382 // Decrement possible
387 release_spinlock(&sem->lock);
393 void thread_sem_post(struct thread_sem *sem)
397 trace_event(TRACE_SUBSYS_THREADS, TRACE_EVENT_THREADS_SEM_POST, (uintptr_t)sem);
399 dispatcher_handle_t disp = disp_disable();
400 struct thread *wakeup = NULL;
401 errval_t err = SYS_ERR_OK;
402 acquire_spinlock(&sem->lock);
405 if(sem->value == 0 && sem->queue != NULL) {
406 wakeup = thread_unblock_one_disabled(disp, &sem->queue, NULL);
412 err = domain_wakeup_on_disabled(wakeup->disp, wakeup, disp);
413 assert_disabled(err_is_ok(err));
416 release_spinlock(&sem->lock);
419 if(err_is_fail(err)) {
420 USER_PANIC_ERR(err, "remote wakeup from semaphore post");
424 // XXX: Need directed yield to inter-disp thread