afad998e3739a73eb94efb8879b7021e84a26082
[barrelfish] / lib / barrelfish / include / threads_priv.h
1 /*
2  * Copyright (c) 2009, 2010, 2012, ETH Zurich.
3  * All rights reserved.
4  *
5  * This file is distributed under the terms in the attached LICENSE file.
6  * If you do not find this file, copies can be found by writing to:
7  * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
8  */
9
10 #ifndef LIBBARRELFISH_THREADS_PRIV_H
11 #define LIBBARRELFISH_THREADS_PRIV_H
12
13 #include <barrelfish/dispatcher_arch.h>
14 #include <barrelfish/except.h>
15
16 /// Maximum number of thread-local storage keys
17 #define MAX_TLS         16
18
19 /// Maximum number of buffered capability receive slots
20 #define MAX_RECV_SLOTS   4
21
22 /** \brief TLS dynamic thread vector data structure
23  *
24  * See: ELF handling for thread-local storage. Ulrich Drepper, Dec 2005.
25  * http://www.akkadia.org/drepper/tls.pdf
26  */
27 struct tls_dtv {
28     uintptr_t gen; ///< Generation count
29     void *dtv[0];  ///< Variable-length array of pointers to TLS blocks
30 };
31
32 enum thread_state {
33     THREAD_STATE_NULL = 0,
34     THREAD_STATE_RUNNABLE,
35     THREAD_STATE_BLOCKED,
36     THREAD_STATE_EXITED
37 };
38
39 /** \brief A thread of execution / thread control block (TCB)
40  *
41  * NB: on some ABIs (namely x86_{32,64}), the TLS blocks for initially-loaded
42  * (i.e. not dlopen()ed) modules _precede_ this structure in memory. Therefore
43  * it's not safe to directly malloc() or free() a thread structure.
44  */
45 struct thread {
46     /* XXX: The offsets of the first two fields (self pointer and dispatcher
47      * pointer) are depended upon by the ABI and/or assembly code. Don't change!
48      */
49     struct thread       *self;              ///< Points to itself
50     dispatcher_handle_t disp;               ///< Dispatcher affinity
51     struct tls_dtv      *tls_dtv;           ///< TLS thread vector
52     struct thread       *next, *prev;       ///< Next/prev threads in list
53     arch_registers_state_t regs;            ///< Register state snapshot
54     void                *stack;             ///< Malloced stack area
55     void                *stack_top;         ///< Stack bounds
56     void                *exception_stack;   ///< Stack for exception handling
57     void                *exception_stack_top; ///< Bounds of exception stack
58     exception_handler_fn exception_handler; ///< Exception handler, or NULL
59     void                *userptr;           ///< User's thread local pointer
60     void                *userptrs[MAX_TLS]; ///< User's thread local pointers
61     uintptr_t           yield_epoch;        ///< Yield epoch
62     void                *wakeup_reason;     ///< Value returned from block()
63     coreid_t            coreid;             ///< XXX: Core ID affinity
64     int                 return_value;       ///< Value returned on exit
65     struct thread_cond  exit_condition;     ///< Thread exit condition
66     struct thread_mutex exit_lock;          ///< Protects exited state
67     enum thread_state   state;              ///< Thread state
68     bool                paused;             ///< Thread is paused (not runnable)
69     bool                detached;           ///< true if detached
70     bool                joining;            ///< true if someone is joining
71     bool                in_exception;       ///< true if running exception handler
72     bool                used_fpu;           ///< Ever used FPU?
73 #if defined(__x86_64__)
74     uint16_t            thread_seg_selector; ///< Segment selector for TCB
75 #endif
76     arch_registers_fpu_state_t fpu_state;   ///< FPU state
77     void                *slab;              ///< Base of slab block containing this TCB
78     uintptr_t           id;                 ///< User-defined thread identifier
79
80     uint32_t            token_number;       ///< RPC next token
81     uint32_t            token;              ///< Token to be received
82     struct waitset_chanstate *channel;      ///< on right channel
83
84     bool    rpc_in_progress;                ///< RPC in progress
85     errval_t    async_error;                ///< RPC async error
86     uint32_t    outgoing_token;             ///< Token of outgoing message
87     struct capref recv_slots[MAX_RECV_SLOTS];///< Queued cap recv slots
88     int8_t recv_slot_count;                 ///< number of currently queued recv slots
89     struct waitset_chanstate *local_trigger; ///< Trigger for a local thread event
90 };
91
92 void thread_enqueue(struct thread *thread, struct thread **queue);
93 struct thread *thread_dequeue(struct thread **queue);
94 void thread_remove_from_queue(struct thread **queue, struct thread *thread);
95
96 /* must only be called by dispatcher, while disabled */
97 void thread_init_disabled(dispatcher_handle_t handle, bool init_domain);
98
99 /// Returns true if there is non-threaded work to be done on this dispatcher
100 /// (ie. if we still need to run)
101 static inline bool havework_disabled(dispatcher_handle_t handle)
102 {
103     struct dispatcher_generic *disp = get_dispatcher_generic(handle);
104     return disp->runq != NULL
105 #ifdef CONFIG_INTERCONNECT_DRIVER_LMP
106             || disp->lmp_send_events_list != NULL
107 #endif
108             || disp->polled_channels != NULL
109             || disp->notificators != NULL
110             || disp->ump_send_events_list != NULL
111             ;
112 }
113
114 void *thread_block(struct thread **queue);
115 void *thread_block_disabled(dispatcher_handle_t handle, struct thread **queue);
116 void *thread_block_and_release_spinlock_disabled(dispatcher_handle_t handle,
117                                                  struct thread **queue,
118                                                  spinlock_t *spinlock);
119 struct thread *thread_unblock_one(struct thread **queue, void *reason);
120 struct thread *thread_unblock_one_disabled(dispatcher_handle_t handle,
121                                            struct thread **queue, void *reason);
122 struct thread *thread_unblock_all_disabled(dispatcher_handle_t handle,
123                                            struct thread **queue, void *reason);
124
125 struct thread *thread_create_unrunnable(thread_func_t start_func, void *arg,
126                                         size_t stacksize);
127
128 void thread_init_remote(dispatcher_handle_t handle, struct thread *thread);
129 void threads_prepare_to_span(dispatcher_handle_t newdh);
130
131 void thread_run_disabled(dispatcher_handle_t handle);
132 void thread_deliver_exception_disabled(dispatcher_handle_t handle,
133                                        enum exception_type type, int subtype,
134                                        void *addr, arch_registers_state_t *regs);
135
136 #endif // LIBBARRELFISH_THREADS_PRIV_H