2 * Copyright (c) 2009, 2010, 2012, ETH Zurich.
5 * This file is distributed under the terms in the attached LICENSE file.
6 * If you do not find this file, copies can be found by writing to:
7 * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
10 #ifndef LIBBARRELFISH_THREADS_PRIV_H
11 #define LIBBARRELFISH_THREADS_PRIV_H
13 #include <barrelfish/dispatcher_arch.h>
14 #include <barrelfish/except.h>
16 /// Maximum number of thread-local storage keys
19 /** \brief TLS dynamic thread vector data structure
21 * See: ELF handling for thread-local storage. Ulrich Drepper, Dec 2005.
22 * http://www.akkadia.org/drepper/tls.pdf
25 uintptr_t gen; ///< Generation count
26 void *dtv[0]; ///< Variable-length array of pointers to TLS blocks
30 THREAD_STATE_NULL = 0,
31 THREAD_STATE_RUNNABLE,
36 /** \brief A thread of execution / thread control block (TCB)
38 * NB: on some ABIs (namely x86_{32,64}), the TLS blocks for initially-loaded
39 * (i.e. not dlopen()ed) modules _precede_ this structure in memory. Therefore
40 * it's not safe to directly malloc() or free() a thread structure.
43 /* XXX: The offsets of the first two fields (self pointer and dispatcher
44 * pointer) are depended upon by the ABI and/or assembly code. Don't change!
46 struct thread *self; ///< Points to itself
47 dispatcher_handle_t disp; ///< Dispatcher affinity
48 struct tls_dtv *tls_dtv; ///< TLS thread vector
49 struct thread *next, *prev; ///< Next/prev threads in list
50 arch_registers_state_t regs; ///< Register state snapshot
51 void *stack; ///< Malloced stack area
52 void *stack_top; ///< Stack bounds
53 void *exception_stack; ///< Stack for exception handling
54 void *exception_stack_top; ///< Bounds of exception stack
55 exception_handler_fn exception_handler; ///< Exception handler, or NULL
56 void *userptr; ///< User's thread local pointer
57 void *userptrs[MAX_TLS]; ///< User's thread local pointers
58 uintptr_t yield_epoch; ///< Yield epoch
59 void *wakeup_reason; ///< Value returned from block()
60 coreid_t coreid; ///< XXX: Core ID affinity
61 int return_value; ///< Value returned on exit
62 struct thread_cond exit_condition; ///< Thread exit condition
63 struct thread_mutex exit_lock; ///< Protects exited state
64 enum thread_state state; ///< Thread state
65 bool paused; ///< Thread is paused (not runnable)
66 bool detached; ///< true if detached
67 bool joining; ///< true if someone is joining
68 bool in_exception; ///< true if running exception handler
69 bool used_fpu; ///< Ever used FPU?
70 #if defined(__x86_64__)
71 uint16_t thread_seg_selector; ///< Segment selector for TCB
73 arch_registers_fpu_state_t fpu_state; ///< FPU state
74 void *slab; ///< Base of slab block containing this TCB
75 uintptr_t id; ///< User-defined thread identifier
77 uint32_t token_number; ///< RPC next token
78 uint32_t token; ///< Token to be received
79 struct waitset_chanstate *channel; ///< on right channel
81 bool rpc_in_progress; ///< RPC in progress
82 errval_t async_error; ///< RPC async error
83 uint32_t outgoing_token; ///< Token of outgoing message
86 void thread_enqueue(struct thread *thread, struct thread **queue);
87 struct thread *thread_dequeue(struct thread **queue);
88 void thread_remove_from_queue(struct thread **queue, struct thread *thread);
90 /* must only be called by dispatcher, while disabled */
91 void thread_init_disabled(dispatcher_handle_t handle, bool init_domain);
93 /// Returns true if there is non-threaded work to be done on this dispatcher
94 /// (ie. if we still need to run)
95 static inline bool havework_disabled(dispatcher_handle_t handle)
97 struct dispatcher_generic *disp = get_dispatcher_generic(handle);
98 return disp->runq != NULL
99 #ifdef CONFIG_INTERCONNECT_DRIVER_LMP
100 || disp->lmp_send_events_list != NULL
102 || disp->polled_channels != NULL
106 void *thread_block(struct thread **queue);
107 void *thread_block_disabled(dispatcher_handle_t handle, struct thread **queue);
108 void *thread_block_and_release_spinlock_disabled(dispatcher_handle_t handle,
109 struct thread **queue,
110 spinlock_t *spinlock);
111 struct thread *thread_unblock_one(struct thread **queue, void *reason);
112 struct thread *thread_unblock_one_disabled(dispatcher_handle_t handle,
113 struct thread **queue, void *reason);
114 struct thread *thread_unblock_all_disabled(dispatcher_handle_t handle,
115 struct thread **queue, void *reason);
117 struct thread *thread_create_unrunnable(thread_func_t start_func, void *arg,
120 void thread_init_remote(dispatcher_handle_t handle, struct thread *thread);
121 void threads_prepare_to_span(dispatcher_handle_t newdh);
123 void thread_run_disabled(dispatcher_handle_t handle);
124 void thread_deliver_exception_disabled(dispatcher_handle_t handle,
125 enum exception_type type, int subtype,
126 void *addr, arch_registers_state_t *regs);
128 #endif // LIBBARRELFISH_THREADS_PRIV_H