3 * \brief Thread synchronisation primitives.
7 * Copyright (c) 2007, 2008, 2009, 2010, 2012, ETH Zurich.
10 * This file is distributed under the terms in the attached LICENSE file.
11 * If you do not find this file, copies can be found by writing to:
12 * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
15 #include <barrelfish/barrelfish.h>
16 #include <barrelfish/dispatch.h>
17 #include <barrelfish/dispatcher_arch.h>
18 #include <trace/trace.h>
19 #include <trace_definitions/trace_defs.h>
20 #include "threads_priv.h"
23 #define trace_event(a,b,c) ((void)0)
28 * \brief Initialise a condition variable
30 * \param cond Condition variable pointer
32 void thread_cond_init(struct thread_cond *cond)
39 * \brief Wait for a condition variable
41 * This function waits for the given condition variable to be signalled
42 * (through thread_cond_signal() or thread_cond_broadcast()) before
43 * returning, while atomically unlocking the mutex pointed to by
46 * \param cond Condition variable pointer
47 * \param mutex Optional pointer to mutex to unlock.
49 void thread_cond_wait(struct thread_cond *cond, struct thread_mutex *mutex)
51 dispatcher_handle_t disp = disp_disable();
53 trace_event(TRACE_SUBSYS_THREADS, TRACE_EVENT_THREADS_COND_WAIT_ENTER,
56 acquire_spinlock(&cond->lock);
60 struct thread *wakeup = thread_mutex_unlock_disabled(disp, mutex);
63 errval_t err = domain_wakeup_on_disabled(wakeup->disp, wakeup, disp);
64 assert_disabled(err_is_ok(err));
68 // Block on the condition variable and release spinlock
69 thread_block_and_release_spinlock_disabled(disp, &cond->queue, &cond->lock);
71 // Re-acquire the mutex
73 thread_mutex_lock(mutex);
76 trace_event(TRACE_SUBSYS_THREADS, TRACE_EVENT_THREADS_COND_WAIT_LEAVE,
81 * \brief Signal a condition variable
83 * This function signals the condition variable, and wakes up one
84 * thread waiting on it.
86 * \param cond Condition variable pointer
88 void thread_cond_signal(struct thread_cond *cond)
90 struct thread *wakeup = NULL;
91 errval_t err = SYS_ERR_OK;
93 trace_event(TRACE_SUBSYS_THREADS, TRACE_EVENT_THREADS_COND_SIGNAL,
96 // Wakeup one waiting thread
97 dispatcher_handle_t disp = disp_disable();
98 acquire_spinlock(&cond->lock);
99 if (cond->queue != NULL) {
100 wakeup = thread_unblock_one_disabled(disp, &cond->queue, NULL);
102 err = domain_wakeup_on_disabled(wakeup->disp, wakeup, disp);
105 release_spinlock(&cond->lock);
108 if(err_is_fail(err)) {
109 USER_PANIC_ERR(err, "remote wakeup from condition signal");
113 // XXX: Need directed yield to inter-disp thread
119 * \brief Broadcast signal a condition variable
121 * This function signals the condition variable, and wakes up all
122 * threads waiting on it.
124 * \param cond Condition variable pointer
126 void thread_cond_broadcast(struct thread_cond *cond)
128 struct thread *wakeupq = NULL;
129 bool foreignwakeup = false;
131 trace_event(TRACE_SUBSYS_THREADS, TRACE_EVENT_THREADS_COND_BROADCAST,
134 // Wakeup all waiting threads
135 dispatcher_handle_t disp = disp_disable();
136 acquire_spinlock(&cond->lock);
137 wakeupq = thread_unblock_all_disabled(disp, &cond->queue, NULL);
138 release_spinlock(&cond->lock);
141 foreignwakeup = (wakeupq != NULL);
142 // Now, wakeup all on foreign dispatchers
143 while (wakeupq != NULL) {
144 struct thread *wakeup = wakeupq;
145 wakeupq = wakeupq->next;
146 errval_t err = domain_wakeup_on(wakeup->disp, wakeup);
147 if(err_is_fail(err)) {
148 USER_PANIC_ERR(err, "remote wakeup from condition broadcast");
153 // XXX: Need directed yield to inter-disp thread
159 * \brief Initialise a mutex
161 * \param mutex Mutex pointer
163 void thread_mutex_init(struct thread_mutex *mutex)
166 mutex->holder = NULL;
172 * \brief Lock a mutex
174 * This blocks until the given mutex is unlocked, and then atomically locks it.
176 * \param mutex Mutex pointer
178 void thread_mutex_lock(struct thread_mutex *mutex)
180 dispatcher_handle_t handle = disp_disable();
181 struct dispatcher_generic *disp_gen = get_dispatcher_generic(handle);
183 trace_event(TRACE_SUBSYS_THREADS, TRACE_EVENT_THREADS_MUTEX_LOCK_ENTER,
186 acquire_spinlock(&mutex->lock);
187 while (mutex->locked > 0) {
188 thread_block_and_release_spinlock_disabled(handle, &mutex->queue,
190 handle = disp_disable();
191 disp_gen = get_dispatcher_generic(handle);
192 acquire_spinlock(&mutex->lock);
195 mutex->holder = disp_gen->current;
196 release_spinlock(&mutex->lock);
199 trace_event(TRACE_SUBSYS_THREADS, TRACE_EVENT_THREADS_MUTEX_LOCK_LEAVE,
204 * \brief Lock a mutex
206 * This blocks until the given mutex is unlocked, and then atomically locks it.
208 * \param mutex Mutex pointer
210 void thread_mutex_lock_nested(struct thread_mutex *mutex)
212 dispatcher_handle_t handle = disp_disable();
213 struct dispatcher_generic *disp_gen = get_dispatcher_generic(handle);
215 trace_event(TRACE_SUBSYS_THREADS, TRACE_EVENT_THREADS_MUTEX_LOCK_NESTED_ENTER,
218 acquire_spinlock(&mutex->lock);
219 while (mutex->locked > 0
220 && mutex->holder != disp_gen->current) {
221 thread_block_and_release_spinlock_disabled(handle, &mutex->queue,
223 handle = disp_disable();
224 disp_gen = get_dispatcher_generic(handle);
225 acquire_spinlock(&mutex->lock);
228 mutex->holder = disp_gen->current;
229 release_spinlock(&mutex->lock);
232 trace_event(TRACE_SUBSYS_THREADS, TRACE_EVENT_THREADS_MUTEX_LOCK_NESTED_LEAVE,
237 * \brief Try to lock a mutex
239 * If the given mutex is unlocked, this atomically locks it and returns true,
240 * otherwise it returns false immediately.
242 * \param mutex Mutex pointer
244 * \returns true if lock acquired, false otherwise
246 bool thread_mutex_trylock(struct thread_mutex *mutex)
248 trace_event(TRACE_SUBSYS_THREADS, TRACE_EVENT_THREADS_MUTEX_TRYLOCK,
251 // Try first to avoid contention
252 if (mutex->locked > 0) {
256 dispatcher_handle_t handle = disp_disable();
257 struct dispatcher_generic *disp_gen = get_dispatcher_generic(handle);
260 acquire_spinlock(&mutex->lock);
261 if (mutex->locked > 0) {
266 mutex->holder = disp_gen->current;
268 release_spinlock(&mutex->lock);
275 * \brief Unlock a mutex, while disabled
277 * This function unlocks the given mutex. It may only be called while disabled.
279 * \param disp Dispatcher pointer
280 * \param mutex Mutex pointer
282 * \return Pointer to thread to be woken on foreign dispatcher
284 struct thread *thread_mutex_unlock_disabled(dispatcher_handle_t handle,
285 struct thread_mutex *mutex)
287 struct thread *ft = NULL;
289 trace_event(TRACE_SUBSYS_THREADS, TRACE_EVENT_THREADS_MUTEX_UNLOCK,
292 acquire_spinlock(&mutex->lock);
293 assert_disabled(mutex->locked > 0);
295 if(mutex->locked == 1) {
296 // Wakeup one waiting thread
297 if (mutex->queue != NULL) {
298 // XXX: This assumes dequeueing is off the top of the queue
299 mutex->holder = mutex->queue;
300 ft = thread_unblock_one_disabled(handle, &mutex->queue, NULL);
302 mutex->holder = NULL;
309 release_spinlock(&mutex->lock);
314 * \brief Unlock a mutex
316 * This unlocks the given mutex.
318 * \param mutex Mutex pointer
320 void thread_mutex_unlock(struct thread_mutex *mutex)
322 dispatcher_handle_t disp = disp_disable();
323 struct thread *wakeup = thread_mutex_unlock_disabled(disp, mutex);
324 errval_t err = SYS_ERR_OK;
326 if (wakeup != NULL) {
327 err = domain_wakeup_on_disabled(wakeup->disp, wakeup, disp);
331 if(err_is_fail(err)) {
332 USER_PANIC_ERR(err, "remote wakeup from mutex unlock");
336 // XXX: Need directed yield to inter-disp thread
341 void thread_sem_init(struct thread_sem *sem, unsigned int value)
350 void thread_sem_wait(struct thread_sem *sem)
354 trace_event(TRACE_SUBSYS_THREADS, TRACE_EVENT_THREADS_SEM_WAIT_ENTER,
357 dispatcher_handle_t disp = disp_disable();
358 acquire_spinlock(&sem->lock);
360 while (sem->value < 1) {
361 // Not possible to decrement -- wait!
362 thread_block_and_release_spinlock_disabled(disp, &sem->queue, &sem->lock);
363 disp = disp_disable();
364 acquire_spinlock(&sem->lock);
367 // Decrement possible
369 release_spinlock(&sem->lock);
372 trace_event(TRACE_SUBSYS_THREADS, TRACE_EVENT_THREADS_SEM_WAIT_LEAVE,
376 bool thread_sem_trywait(struct thread_sem *sem)
381 trace_event(TRACE_SUBSYS_THREADS, TRACE_EVENT_THREADS_SEM_TRYWAIT,
384 dispatcher_handle_t disp = disp_disable();
385 acquire_spinlock(&sem->lock);
387 if(sem->value >= 1) {
388 // Decrement possible
393 release_spinlock(&sem->lock);
399 void thread_sem_post(struct thread_sem *sem)
403 trace_event(TRACE_SUBSYS_THREADS, TRACE_EVENT_THREADS_SEM_POST, (uintptr_t)sem);
405 dispatcher_handle_t disp = disp_disable();
406 struct thread *wakeup = NULL;
407 errval_t err = SYS_ERR_OK;
408 acquire_spinlock(&sem->lock);
411 if(sem->value == 0 && sem->queue != NULL) {
412 wakeup = thread_unblock_one_disabled(disp, &sem->queue, NULL);
418 err = domain_wakeup_on_disabled(wakeup->disp, wakeup, disp);
419 assert_disabled(err_is_ok(err));
422 release_spinlock(&sem->lock);
425 if(err_is_fail(err)) {
426 USER_PANIC_ERR(err, "remote wakeup from semaphore post");
430 // XXX: Need directed yield to inter-disp thread