3 * \brief Manage domain spanning cores
5 * \bug Need to specify how big the default thread on the spanned dispatcher
6 * should be because we cannot dynamically grow our stacks
8 * \bug Can only do domain_new_dispatcher() when no other dispatchers have
9 * threads (except for the internal interdisp-thread).
13 * Copyright (c) 2009, 2010, 2011, 2012, ETH Zurich.
14 * All rights reserved.
16 * This file is distributed under the terms in the attached LICENSE file.
17 * If you do not find this file, copies can be found by writing to:
18 * ETH Zurich D-INFK, CAB F.78, Universitaetstr. 6, CH-8092 Zurich,
19 * Attn: Systems Group.
23 #include <barrelfish/barrelfish.h>
24 #include <barrelfish/curdispatcher_arch.h>
25 #include <barrelfish/dispatcher_arch.h>
26 #include <barrelfish/waitset_chan.h>
27 #include <arch/registers.h>
28 #include <barrelfish/dispatch.h>
29 #include <if/interdisp_defs.h>
30 #include "arch/threads.h"
32 #include <if/monitor_defs.h>
33 #include "threads_priv.h"
34 #include "waitset_chan_priv.h"
36 ///< Struct to maintain per dispatcher domain library state
38 iref_t iref; ///< Iref for the interdisp service
39 struct interdisp_binding *b[MAX_CPUS];
40 struct waitset interdisp_ws;
41 struct thread *default_waitset_handler;
42 struct thread *remote_wakeup_queue;
43 struct waitset_chanstate remote_wakeup_event;
47 ///< Struct to send init information to the dispatcher that was spanned
48 struct remote_core_state {
49 iref_t iref; ///< Iref of the interdisp service to connect to
50 uint8_t core_id; ///< Core id of the domain that spanned this dispatcher
51 struct span_domain_state *span_domain_state; ///< Reference to the span_domain_state of the "server"
52 bool initialized; ///< true if remote core is fully initialized
53 int cnt; ///< Used to count dispatcher connected
56 ///< Struct for spanning domains state machine
57 struct span_domain_state {
58 struct thread *thread; ///< Thread to run on remote core
59 uint8_t core_id; ///< Id of the remote core
60 errval_t err; ///< To propagate error value
61 domain_spanned_callback_t callback; ///< Callback for when domain has spanned
62 void *callback_arg; ///< Optional argument to pass with callback
63 struct capref frame; ///< Dispatcher frame
64 struct capref vroot; ///< VRoot cap
65 struct event_queue_node event_qnode; ///< Event queue node
66 struct waitset_chanstate initev; ///< Dispatcher initialized event
67 bool initialized; ///< True if remote initialized
70 ///< Array of all interdisp IREFs in the domain
71 static iref_t allirefs[MAX_CPUS];
73 static void dispatcher_initialized_handler(void *arg)
75 struct span_domain_state *span_domain_state = arg;
77 struct domain_state *domain_state = get_domain_state();
79 // XXX: Tell currently active interdisp-threads to handle default waitset
80 for(int i = 0; i < MAX_CPUS; i++) {
81 struct interdisp_binding *b = domain_state->b[i];
83 if(disp_get_core_id() != i &&
84 span_domain_state->core_id != i && b != NULL) {
85 errval_t err = b->tx_vtbl.span_slave_done(b, NOP_CONT);
86 assert(err_is_ok(err));
90 /* Upcall into the domain_new_dispatcher callback if registered */
91 if (span_domain_state->callback) {
92 span_domain_state->callback(span_domain_state->callback_arg, SYS_ERR_OK);
94 span_domain_state->initialized = 1;
95 //free(span_domain_state);
99 * \brief Handled for dispatcher_initialized msg type
101 * Called when a recently spanned dispatcher has initialized.
102 * Store it's connection object, and upcall into the registered callback
104 static void dispatcher_initialized(struct interdisp_binding *st, genvaddr_t id)
106 struct span_domain_state *span_domain_state = (struct span_domain_state*)(uintptr_t)id;
108 // Signal the default waitset of this event
109 struct event_closure closure = {
110 .handler = dispatcher_initialized_handler,
111 .arg = span_domain_state,
113 waitset_chanstate_init(&span_domain_state->initev, CHANTYPE_EVENT_QUEUE);
114 errval_t err = waitset_chan_trigger_closure(get_default_waitset(),
115 &span_domain_state->initev,
117 if(err_is_fail(err)) {
118 USER_PANIC_ERR(err, "Triggering default waitset");
122 static void send_cap_request(struct interdisp_binding *st,
123 struct capref cap, genvaddr_t info)
125 errval_t err = SYS_ERR_OK, err2;
126 struct capref *dest = (struct capref *)(uintptr_t)info;
128 err = cap_copy(*dest, cap);
129 if(err_is_fail(err)) {
130 err_push(err, LIB_ERR_CAP_COPY_FAIL);
131 DEBUG_ERR(err, "cap_copy");
135 err = cap_destroy(cap);
136 if(err_is_fail(err)) {
137 err_push(err, LIB_ERR_CAP_DELETE_FAIL);
138 DEBUG_ERR(err, "cap_destroy default");
144 err2 = st->tx_vtbl.send_cap_reply(st, NOP_CONT, err);
145 if (err_is_fail(err2)) {
146 DEBUG_ERR(err, "Failed to send send_cap_reply");
150 static errval_t send_cap_err = SYS_ERR_OK;
151 static bool cap_received = false;
153 static void send_cap_reply(struct interdisp_binding *st, errval_t err)
159 static void create_thread_request(struct interdisp_binding *b,
160 genvaddr_t funcaddr, genvaddr_t argaddr,
161 uint64_t stacksize, genvaddr_t req)
163 thread_func_t start_func = (thread_func_t)(uintptr_t)funcaddr;
164 void *arg = (void *)(uintptr_t)argaddr;
165 struct thread *newthread;
167 // XXX: Probably want to return pointer to thread struct to caller
169 newthread = thread_create_varstack(start_func, arg, stacksize);
171 newthread = thread_create(start_func, arg);
173 assert(newthread != NULL);
174 errval_t err = b->tx_vtbl.create_thread_reply(b, NOP_CONT, SYS_ERR_OK,
175 (genvaddr_t)(lvaddr_t)newthread,
177 assert(err_is_ok(err));
180 struct create_thread_req {
181 struct thread *thread;
185 static void create_thread_reply(struct interdisp_binding *b,
186 errval_t err, genvaddr_t thread, genvaddr_t req)
188 assert(err_is_ok(err));
190 struct create_thread_req *r = (struct create_thread_req*)(lvaddr_t)req;
191 r->thread = (struct thread *)(lvaddr_t)thread;
192 r->reply_received = true;
195 static void wakeup_thread_request(struct interdisp_binding *b,
198 coreid_t core_id = disp_get_core_id();
199 struct thread *wakeup = (struct thread *)(uintptr_t)taddr;
200 dispatcher_handle_t handle = disp_disable();
201 struct dispatcher_generic *disp_gen = get_dispatcher_generic(handle);
202 /* assert_disabled(wakeup->disp == handle); */
203 assert_disabled(wakeup->coreid == core_id);
204 wakeup->disp = handle;
205 thread_enqueue(wakeup, &disp_gen->runq);
209 static void join_thread_request(struct interdisp_binding *b,
210 genvaddr_t taddr, genvaddr_t req)
212 struct thread *thread = (struct thread *)(lvaddr_t)taddr;
213 assert(thread->coreid == disp_get_core_id());
215 errval_t err = thread_join(thread, &retval);
216 err = b->tx_vtbl.join_thread_reply(b, NOP_CONT, err, retval, req);
217 assert(err_is_ok(err));
220 struct join_thread_req {
226 static void join_thread_reply(struct interdisp_binding *b,
227 errval_t err, uint64_t retval, genvaddr_t req)
229 struct join_thread_req *r = (struct join_thread_req *)(lvaddr_t)req;
232 r->reply_received = true;
236 * XXX: The whole span_slave*() thing is a hack to allow all
237 * dispatchers to wait on both the monitor and interdisp waitsets
238 * while we bind to all.
241 static int span_slave_thread(void *arg)
243 errval_t err = thread_detach(thread_self());
244 assert(err_is_ok(err));
247 event_dispatch(get_default_waitset());
253 static void span_slave_request(struct interdisp_binding *b)
255 USER_PANIC("shouldn't be called");
256 thread_create(span_slave_thread, NULL);
259 static void span_slave_done_handler(void *cs)
261 USER_PANIC("shouldn't be called");
266 static void span_slave_done_request(struct interdisp_binding *b)
268 USER_PANIC("shouldn't be called");
269 struct waitset_chanstate *cs = malloc(sizeof(struct waitset_chanstate));
271 // Signal the default waitset of this event
272 struct event_closure closure = {
273 .handler = span_slave_done_handler,
276 waitset_chanstate_init(cs, CHANTYPE_EVENT_QUEUE);
277 errval_t err = waitset_chan_trigger_closure(get_default_waitset(), cs,
279 if(err_is_fail(err)) {
280 USER_PANIC_ERR(err, "Triggering default waitset");
284 static void span_eager_connect_request(struct interdisp_binding *b,
287 struct domain_state *domain_state = get_domain_state();
289 /* Store the sending core's connection */
290 domain_state->b[core_id] = b;
293 static struct interdisp_rx_vtbl interdisp_vtbl = {
294 .dispatcher_initialized = dispatcher_initialized,
296 .send_cap_request = send_cap_request,
297 .send_cap_reply = send_cap_reply,
299 .wakeup_thread = wakeup_thread_request,
300 .create_thread_request = create_thread_request,
301 .create_thread_reply = create_thread_reply,
303 .join_thread_request = join_thread_request,
304 .join_thread_reply = join_thread_reply,
306 // XXX: Hack to allow domain_new_dispatcher() to proceed when not all
307 // default waitsets are serviced
308 .span_slave = span_slave_request,
309 .span_slave_done = span_slave_done_request,
310 .span_eager_connect = span_eager_connect_request,
314 * \brief Called when the "client" connects to "server"
316 * Make the connection a "server" connection, free unnecessary state.
317 * Send init msg to the dispatcher that spanned this dispatcher.
319 static void client_connected(void *st, errval_t err,
320 struct interdisp_binding *b)
322 struct remote_core_state *state = (struct remote_core_state*)st;
323 struct domain_state *domain_state = get_domain_state();
325 if(err_is_fail(err)) {
326 DEBUG_ERR(err, "binding to interdisp service");
330 /* Set it on the domain library state */
331 b->rx_vtbl = interdisp_vtbl;
332 domain_state->b[state->cnt] = b;
334 // Send it our core id
335 err = b->tx_vtbl.span_eager_connect(b, NOP_CONT, disp_get_core_id());
336 if(err_is_fail(err)) {
337 USER_PANIC_ERR(err, "sending span_eager_connect");
340 // Connect to next active dispatcher
343 if(state->cnt == disp_get_core_id()) {
346 } while(allirefs[state->cnt] == NULL_IREF && state->cnt < MAX_CPUS);
348 if(state->cnt < MAX_CPUS) {
349 err = interdisp_bind(allirefs[state->cnt], client_connected,
350 state, &domain_state->interdisp_ws,
351 IDC_BIND_FLAGS_DEFAULT);
352 if(err_is_fail(err)) {
353 USER_PANIC_ERR(err, "Binding to inter-dispatcher service");
356 struct interdisp_binding *sb = domain_state->b[state->core_id];
357 /* Send initialized msg to the dispatcher that spanned us */
358 errval_t err2 = sb->tx_vtbl.
359 dispatcher_initialized(sb, NOP_CONT,
360 (uintptr_t)state->span_domain_state);
361 if (err_is_fail(err2)) {
362 DEBUG_ERR(err, "failed to send initalized msg");
366 state->initialized = true;
370 static errval_t server_connected(void *st, struct interdisp_binding *b)
372 b->rx_vtbl = interdisp_vtbl;
377 * \brief Called when domain gets a interdisp service.
378 * It will set it on the domain_state.
380 static void server_listening(void *st, errval_t err, iref_t iref)
382 if(err_is_fail(err)) {
383 DEBUG_ERR(err, "interdisp service export");
387 struct domain_state *domain_state = get_domain_state();
388 domain_state->iref = iref;
390 // Also set in the global array
391 allirefs[disp_get_core_id()] = iref;
392 domain_state->conditional = true;
396 * \brief Called on the inter-disp handler thread, when another thread
397 * on this dispatcher wants to wakeup a thread on a foreign dispatcher.
399 static void handle_wakeup_on(void *arg)
401 struct domain_state *domain_state = get_domain_state();
404 assert(domain_state != NULL);
406 // Dequeue all (disable to ensure mutual exclusion -- per dispatcher)
408 struct thread *thread = NULL;
410 dispatcher_handle_t disp = disp_disable();
411 if(domain_state->remote_wakeup_queue != NULL) {
412 thread = thread_dequeue(&domain_state->remote_wakeup_queue);
416 // Break if queue empty
422 /* coreid_t core_id = disp_handle_get_core_id(thread->disp); */
423 coreid_t core_id = thread->coreid;
425 assert(domain_state->b[core_id] != NULL);
427 struct interdisp_binding *b = domain_state->b[core_id];
428 err = b->tx_vtbl.wakeup_thread(b, NOP_CONT, (genvaddr_t)(uintptr_t)thread);
429 if (err_is_fail(err)) {
430 USER_PANIC_ERR(err, "wakeup_thread");
436 * \brief Handler thread for inter-dispatcher messages
437 * \param arg Pointer to inter-dispatcher waitset
438 * \return 0 on successful exit
440 static int interdisp_msg_handler(void *arg)
442 struct waitset *ws = arg;
446 errval_t err = event_dispatch(ws);
447 if(err_is_fail(err)) {
448 USER_PANIC_ERR(err, "error on event dispatch");
456 * \brief Runs enabled on the remote core to initialize the dispatcher
458 static int remote_core_init_enabled(void *arg)
461 struct remote_core_state *remote_core_state =
462 (struct remote_core_state*)arg;
464 /* Initialize the barrelfish library */
465 err = barrelfish_init_onthread(NULL);
466 if (err_is_fail(err)) {
467 DEBUG_ERR(err, "barrelfish_init_onthread failed");
472 // Connect to all dispatchers eagerly
473 remote_core_state->cnt = 0;
474 while(allirefs[remote_core_state->cnt] == NULL_IREF && remote_core_state->cnt < MAX_CPUS) {
475 remote_core_state->cnt++;
476 if(remote_core_state->cnt == disp_get_core_id()) {
477 remote_core_state->cnt++;
480 // Don't move before barrelfish_init_onthread()
481 struct domain_state *st = get_domain_state();
482 if(remote_core_state->cnt != MAX_CPUS) {
483 err = interdisp_bind(allirefs[remote_core_state->cnt], client_connected,
484 remote_core_state, &st->interdisp_ws,
485 IDC_BIND_FLAGS_DEFAULT);
486 if(err_is_fail(err)) {
487 USER_PANIC_ERR(err, "Failure binding to inter-dispatcher service");
491 while(!remote_core_state->initialized) {
492 event_dispatch(get_default_waitset());
495 /* Free unnecessary state */
496 free(remote_core_state);
498 /* XXX: create a thread that will handle the default waitset */
499 st->default_waitset_handler = thread_create(span_slave_thread, NULL);
500 assert(st->default_waitset_handler != NULL);
504 return interdisp_msg_handler(&st->interdisp_ws);
508 * \brief Runs disabled on the remote core to initialize
510 static void remote_core_init_disabled(struct thread *thread)
512 dispatcher_handle_t disp = thread->disp;
514 /* Initialize the dispatcher */
515 disp_init_disabled(disp);
517 /* Initialize the threads library, and call remote_core_init_enabled */
518 thread_init_remote(disp, thread);
522 * \brief Initialize the domain library
524 * Registers a iref with the monitor to offer the interdisp service on this core
525 * Does not block for completion.
527 errval_t domain_init(void)
530 struct domain_state *domain_state = malloc(sizeof(struct domain_state));
532 return LIB_ERR_MALLOC_FAIL;
534 set_domain_state(domain_state);
536 domain_state->iref = 0;
537 domain_state->default_waitset_handler = NULL;
538 domain_state->remote_wakeup_queue = NULL;
539 waitset_chanstate_init(&domain_state->remote_wakeup_event,
540 CHANTYPE_EVENT_QUEUE);
541 for (int i = 0; i < MAX_CPUS; i++) {
542 domain_state->b[i] = NULL;
545 waitset_init(&domain_state->interdisp_ws);
546 domain_state->conditional = false;
547 err = interdisp_export(NULL, server_listening, server_connected,
548 &domain_state->interdisp_ws, IDC_EXPORT_FLAGS_DEFAULT);
549 if (err_is_fail(err)) {
553 // XXX: Wait for the export to finish before returning
554 while(!domain_state->conditional) {
555 messages_wait_and_handle_next();
562 * \brief Handler to continue spanning domain state machine
564 static void span_domain_reply(struct monitor_binding *mb,
565 errval_t msgerr, uintptr_t domain_id)
567 /* On success, no further action needed */
568 if (err_is_ok(msgerr)) {
572 /* On failure, release resources and notify the caller */
573 struct span_domain_state *span_domain_state =
574 (struct span_domain_state*)domain_id;
575 errval_t err = cap_destroy(span_domain_state->frame);
576 if (err_is_fail(err)) {
577 err_push(msgerr, LIB_ERR_CAP_DESTROY);
580 if (span_domain_state->callback) { /* Use the callback to return error */
581 span_domain_state->callback(span_domain_state->callback_arg, msgerr);
582 } else { /* Use debug_err if no callback registered */
583 DEBUG_ERR(msgerr, "Failure in span_domain_reply");
585 free(span_domain_state);
588 static void span_domain_request_sender(void *arg)
590 struct monitor_binding *mb = arg;
591 struct span_domain_state *st = mb->st;
593 errval_t err = mb->tx_vtbl.
594 span_domain_request(mb, NOP_CONT, (uintptr_t)st, st->core_id, st->vroot,
596 if (err_is_ok(err)) {
597 event_mutex_unlock(&mb->mutex);
598 } else if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
599 /* Wait to use the monitor binding */
600 err = mb->register_send(mb, mb->waitset,
601 MKCONT(span_domain_request_sender,mb));
602 if(err_is_fail(err)) { // shouldn't fail, as we have the mutex
603 USER_PANIC_ERR(err, "register_send");
605 } else { // permanent error
606 event_mutex_unlock(&mb->mutex);
607 err = err_push(err, MON_CLIENT_ERR_SPAN_DOMAIN_REQUEST);
608 DEBUG_ERR(err, "span_domain_request");
612 static void span_domain_request_sender_wrapper(void *st)
614 struct monitor_binding *mb = get_monitor_binding();
616 span_domain_request_sender(mb);
620 * \brief Since we cannot dynamically grow our stack yet, we need a
621 * verion that will create threads on remote core with variable stack size
623 * \bug this is a hack
625 static errval_t domain_new_dispatcher_varstack(coreid_t core_id,
626 domain_spanned_callback_t callback,
627 void *callback_arg, size_t stack_size)
629 assert(core_id != disp_get_core_id());
632 struct domain_state *domain_state = get_domain_state();
633 struct monitor_binding *mb = get_monitor_binding();
634 assert(domain_state != NULL);
636 /* Set reply handler */
637 mb->rx_vtbl.span_domain_reply = span_domain_reply;
639 while(domain_state->iref == 0) { /* If not initialized, wait */
640 messages_wait_and_handle_next();
643 /* Create the remote_core_state passed to the new dispatcher */
644 struct remote_core_state *remote_core_state =
645 calloc(1, sizeof(struct remote_core_state));
646 if (!remote_core_state) {
647 return LIB_ERR_MALLOC_FAIL;
649 remote_core_state->core_id = disp_get_core_id();
650 remote_core_state->iref = domain_state->iref;
652 /* Create the thread for the new dispatcher to init on */
653 struct thread *newthread =
654 thread_create_unrunnable(remote_core_init_enabled,
655 (void*)remote_core_state, stack_size);
656 if (newthread == NULL) {
657 return LIB_ERR_THREAD_CREATE;
660 /* Save the state for later steps of the spanning state machine */
661 struct span_domain_state *span_domain_state =
662 malloc(sizeof(struct span_domain_state));
663 if (!span_domain_state) {
664 return LIB_ERR_MALLOC_FAIL;
666 span_domain_state->thread = newthread;
667 span_domain_state->core_id = core_id;
668 span_domain_state->callback = callback;
669 span_domain_state->callback_arg = callback_arg;
671 /* Give remote_core_state pointer to span_domain_state */
672 remote_core_state->span_domain_state = span_domain_state;
674 /* Start spanning domain state machine by sending vroot to the monitor */
675 struct capref vroot = {
680 /* Create new dispatcher frame */
682 size_t dispsize = ((size_t)1) << DISPATCHER_FRAME_BITS;
683 err = frame_alloc(&frame, dispsize, &dispsize);
684 if (err_is_fail(err)) {
685 return err_push(err, LIB_ERR_FRAME_ALLOC);
689 err = vspace_map_one_frame((void **)&dispaddr, dispsize, frame, NULL, NULL);
690 if (err_is_fail(err)) {
691 return err_push(err, LIB_ERR_VSPACE_MAP);
694 dispatcher_handle_t handle = dispaddr;
695 struct dispatcher_shared_generic *disp =
696 get_dispatcher_shared_generic(handle);
697 struct dispatcher_generic *disp_gen = get_dispatcher_generic(handle);
698 arch_registers_state_t *disabled_area =
699 dispatcher_get_disabled_save_area(handle);
701 /* Set dispatcher on the newthread */
702 span_domain_state->thread->disp = handle;
703 span_domain_state->frame = frame;
704 span_domain_state->vroot = vroot;
706 /* Setup dispatcher */
707 disp->udisp = (lvaddr_t)handle;
708 disp->disabled = true;
710 disp_gen->core_id = span_domain_state->core_id;
711 // Setup the dispatcher to run remote_core_init_disabled
712 // and pass the created thread as an argument
713 registers_set_initial(disabled_area, span_domain_state->thread,
714 (lvaddr_t)remote_core_init_disabled,
715 (lvaddr_t)&disp_gen->stack[DISPATCHER_STACK_WORDS],
716 (uintptr_t)span_domain_state->thread, 0, 0, 0);
717 // Give dispatcher a unique name for debugging
718 snprintf(disp->name, DISP_NAME_LEN, "%s%d", disp_name(),
719 span_domain_state->core_id);
722 // XXX: share LDT state between all dispatchers
723 // this needs to happen before the remote core starts, otherwise the segment
724 // selectors in the new thread state are invalid
725 struct dispatcher_shared_x86_64 *disp_x64
726 = get_dispatcher_shared_x86_64(handle);
727 struct dispatcher_shared_x86_64 *mydisp_x64
728 = get_dispatcher_shared_x86_64(curdispatcher());
730 disp_x64->ldt_base = mydisp_x64->ldt_base;
731 disp_x64->ldt_npages = mydisp_x64->ldt_npages;
734 threads_prepare_to_span(handle);
736 // Setup new local thread for inter-dispatcher messages, if not already done
737 static struct thread *interdisp_thread = NULL;
738 if(interdisp_thread == NULL) {
739 interdisp_thread = thread_create(interdisp_msg_handler,
740 &domain_state->interdisp_ws);
741 err = thread_detach(interdisp_thread);
742 assert(err_is_ok(err));
746 // XXX: Tell currently active interdisp-threads to handle default waitset
747 for(int i = 0; i < MAX_CPUS; i++) {
748 struct interdisp_binding *b = domain_state->b[i];
750 if(disp_get_core_id() != i && b != NULL) {
751 err = b->tx_vtbl.span_slave(b, NOP_CONT);
752 assert(err_is_ok(err));
758 /* XXX: create a thread that will handle the default waitset */
759 if (domain_state->default_waitset_handler == NULL) {
760 domain_state->default_waitset_handler
761 = thread_create(span_slave_thread, NULL);
762 assert(domain_state->default_waitset_handler != NULL);
765 /* Wait to use the monitor binding */
766 struct monitor_binding *mcb = get_monitor_binding();
767 event_mutex_enqueue_lock(&mcb->mutex, &span_domain_state->event_qnode,
768 (struct event_closure) {
769 .handler = span_domain_request_sender_wrapper,
770 .arg = span_domain_state });
773 while(!span_domain_state->initialized) {
774 event_dispatch(get_default_waitset());
778 free(span_domain_state);
785 * \brief Creates a dispatcher on a remote core
787 * \param core_id Id of the core to create the dispatcher on
788 * \param callback Callback to use when new dispatcher is created
790 * The new dispatcher is created with the same vroot, sharing the same vspace.
791 * The new dispatcher also has a urpc connection to the core that created it.
793 errval_t domain_new_dispatcher(coreid_t core_id,
794 domain_spanned_callback_t callback,
797 return domain_new_dispatcher_varstack(core_id, callback, callback_arg,
798 THREADS_DEFAULT_STACK_BYTES);
801 errval_t domain_send_cap(coreid_t core_id, struct capref cap)
804 struct domain_state *domain_state = get_domain_state();
805 if (!domain_state->b[core_id]) {
806 return LIB_ERR_NO_SPANNED_DISP;
809 send_cap_err = SYS_ERR_OK;
810 cap_received = false;
812 struct interdisp_binding *b = domain_state->b[core_id];
813 err = b->tx_vtbl.send_cap_request(b, NOP_CONT, cap, (uintptr_t)&cap);
814 if (err_is_fail(err)) {
815 return err_push(err, LIB_ERR_SEND_CAP_REQUEST);
819 // TODO: Handled on different thread
820 /* while(!cap_received) { */
821 /* messages_wait_and_handle_next(); */
828 * \brief Wakeup a thread on a foreign dispatcher while disabled.
830 * \param core_id Core ID to wakeup on
831 * \param thread Pointer to thread to wakeup
832 * \param mydisp Dispatcher this function is running on
834 * \return SYS_ERR_OK on success.
836 static errval_t domain_wakeup_on_coreid_disabled(coreid_t core_id,
837 struct thread *thread,
838 dispatcher_handle_t mydisp)
840 struct domain_state *ds = get_domain_state();
842 // XXX: Ugly hack to allow waking up on a core id we don't have a
843 // dispatcher handler for
844 thread->coreid = core_id;
847 assert_disabled(ds != NULL);
848 if (ds->b[core_id] == NULL) {
849 return LIB_ERR_NO_SPANNED_DISP;
852 thread_enqueue(thread, &ds->remote_wakeup_queue);
854 // Signal the inter-disp waitset of this event
855 struct event_closure closure = {
856 .handler = handle_wakeup_on
859 waitset_chan_trigger_closure_disabled(&ds->interdisp_ws,
860 &ds->remote_wakeup_event,
863 assert_disabled(err_is_ok(err) ||
864 err_no(err) == LIB_ERR_CHAN_ALREADY_REGISTERED);
869 errval_t domain_wakeup_on_disabled(dispatcher_handle_t disp,
870 struct thread *thread,
871 dispatcher_handle_t mydisp)
873 coreid_t core_id = disp_handle_get_core_id(disp);
875 // TODO: Can't wakeup on anyone else than the owning dispatcher yet
876 assert_disabled(disp == thread->disp);
878 return domain_wakeup_on_coreid_disabled(core_id, thread, mydisp);
881 errval_t domain_wakeup_on(dispatcher_handle_t disp,
882 struct thread *thread)
884 dispatcher_handle_t mydisp = disp_disable();
885 errval_t err = domain_wakeup_on_disabled(disp, thread, mydisp);
890 errval_t domain_thread_move_to(struct thread *thread, coreid_t core_id)
892 assert(thread == thread_self());
893 dispatcher_handle_t mydisp = disp_disable();
894 struct dispatcher_generic *disp_gen = get_dispatcher_generic(mydisp);
895 struct dispatcher_shared_generic *disp =
896 get_dispatcher_shared_generic(mydisp);
898 struct thread *next = thread->next;
899 thread_remove_from_queue(&disp_gen->runq, thread);
901 errval_t err = domain_wakeup_on_coreid_disabled(core_id, thread, mydisp);
902 if(err_is_fail(err)) {
903 thread_enqueue(thread, &disp_gen->runq);
908 // run the next thread, if any
909 if (next != thread) {
910 disp_gen->current = next;
911 disp_resume(mydisp, &next->regs);
913 disp_gen->current = NULL;
914 disp->haswork = havework_disabled(mydisp);
915 disp_yield_disabled(mydisp);
918 USER_PANIC("should never be reached");
921 errval_t domain_thread_create_on_varstack(coreid_t core_id,
922 thread_func_t start_func,
923 void *arg, size_t stacksize,
924 struct thread **newthread)
926 if (disp_get_core_id() == core_id) {
927 struct thread *th = NULL;
928 if (stacksize == 0) {
929 th = thread_create(start_func, arg);
931 th = thread_create_varstack(start_func, arg, stacksize);
939 return LIB_ERR_THREAD_CREATE;
942 struct domain_state *domain_state = get_domain_state();
945 if (domain_state->b[core_id] == NULL) {
946 return LIB_ERR_NO_SPANNED_DISP;
949 struct interdisp_binding *b = domain_state->b[core_id];
950 struct create_thread_req *req = malloc(sizeof(*req));
951 req->reply_received = false;
952 // use special waitset to make sure loop exits properly.
953 struct waitset ws, *old_ws = b->waitset;
955 b->change_waitset(b, &ws);
956 err = b->tx_vtbl.create_thread_request(b, NOP_CONT,
957 (genvaddr_t)(uintptr_t)start_func,
958 (genvaddr_t)(uintptr_t)arg,
960 (genvaddr_t)(lvaddr_t)req);
961 if (err_is_fail(err)) {
965 while (!req->reply_received) {
970 *newthread = req->thread;
974 b->change_waitset(b, old_ws);
980 errval_t domain_thread_create_on(coreid_t core_id, thread_func_t start_func,
981 void *arg, struct thread **newthread)
983 return domain_thread_create_on_varstack(core_id, start_func, arg, 0, newthread);
986 errval_t domain_thread_join(struct thread *thread, int *retval)
988 coreid_t core_id = thread->coreid;
989 debug_printf("%s: joining %p, coreid %d\n", __FUNCTION__, thread, core_id);
990 if (disp_get_core_id() == core_id) {
991 return thread_join(thread, retval);
993 struct domain_state *domain_state = get_domain_state();
996 if (domain_state->b[core_id] == NULL) {
997 return LIB_ERR_NO_SPANNED_DISP;
1000 struct interdisp_binding *b = domain_state->b[core_id];
1001 struct join_thread_req *req = malloc(sizeof(*req));
1002 req->reply_received = false;
1003 // use special waitset to make sure loop exits properly.
1004 struct waitset ws, *old_ws = b->waitset;
1006 b->change_waitset(b, &ws);
1007 err = b->tx_vtbl.join_thread_request(b, NOP_CONT,
1008 (genvaddr_t)(lvaddr_t)thread,
1009 (genvaddr_t)(lvaddr_t)req);
1010 if (err_is_fail(err)) {
1014 while (!req->reply_received) {
1015 event_dispatch(&ws);
1017 // change waitset back
1018 b->change_waitset(b, old_ws);
1021 *retval = req->retval;
1031 * \brief set the core_id.
1033 * Code using this should do a kernel_cap invocation to get the core_id first.
1035 void disp_set_core_id(coreid_t core_id)
1037 dispatcher_handle_t handle = curdispatcher();
1038 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1039 disp->core_id = core_id;
1043 * \brief returns the address and the size of the EH frame
1045 * \param eh_frame returned virtual address of the EH frame
1046 * \param eh_frame_size returned size of the EH frame
1048 void disp_get_eh_frame(lvaddr_t *eh_frame,
1049 size_t *eh_frame_size)
1051 dispatcher_handle_t handle = curdispatcher();
1052 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1054 *eh_frame = disp->eh_frame;
1056 if (eh_frame_size) {
1057 *eh_frame_size = disp->eh_frame_size;
1062 * \brief returns the address and the size of the EH frame header
1064 * \param eh_frame returned virtual address of the EH frame
1065 * \param eh_frame_size returned size of the EH frame
1067 void disp_get_eh_frame_hdr(lvaddr_t *eh_frame_hdr,
1068 size_t *eh_frame_hdr_size)
1070 dispatcher_handle_t handle = curdispatcher();
1071 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1073 *eh_frame_hdr = disp->eh_frame_hdr;
1075 if (eh_frame_hdr_size) {
1076 *eh_frame_hdr_size = disp->eh_frame_hdr_size;
1081 * \brief returns the core_id stored in disp_priv struct
1083 coreid_t disp_get_core_id(void)
1085 dispatcher_handle_t handle = curdispatcher();
1086 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1087 return disp->core_id;
1091 * \brief returns the current core_id stored in disp_shared struct
1093 coreid_t disp_get_current_core_id(void)
1095 dispatcher_handle_t handle = curdispatcher();
1096 struct dispatcher_shared_generic* disp = get_dispatcher_shared_generic(handle);
1097 return disp->curr_core_id;
1101 * \brief returns the domain_id stored in disp_priv struct
1103 domainid_t disp_get_domain_id(void)
1105 dispatcher_handle_t handle = curdispatcher();
1106 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1107 return disp->domain_id;
1111 * \brief returns the core_id stored in disp_priv struct
1113 coreid_t disp_handle_get_core_id(dispatcher_handle_t handle)
1115 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1116 return disp->core_id;
1119 struct waitset *get_default_waitset(void)
1121 dispatcher_handle_t handle = curdispatcher();
1122 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1123 return &disp->core_state.c.default_waitset;
1127 * \brief set the monitor client binding on the dispatcher priv
1129 void set_monitor_binding(struct monitor_binding *b)
1131 dispatcher_handle_t handle = curdispatcher();
1132 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1133 disp->core_state.c.monitor_binding = b;
1137 * \brief Returns the monitor client binding on the dispatcher priv
1139 struct monitor_binding *get_monitor_binding(void)
1141 dispatcher_handle_t handle = curdispatcher();
1142 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1143 return disp->core_state.c.monitor_binding;
1148 * \brief set the blocking rpc monitor client binding on the dispatcher priv
1150 void set_monitor_blocking_rpc_client(struct monitor_blocking_rpc_client *st)
1152 dispatcher_handle_t handle = curdispatcher();
1153 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1154 disp->core_state.c.monitor_blocking_rpc_client = st;
1158 * \brief Returns the blocking rpc monitor client binding on the
1161 struct monitor_blocking_rpc_client *get_monitor_blocking_rpc_client(void)
1163 dispatcher_handle_t handle = curdispatcher();
1164 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1165 return disp->core_state.c.monitor_blocking_rpc_client;
1169 * \brief set the mem client on the dispatcher priv
1171 void set_mem_client(struct mem_rpc_client *st)
1173 dispatcher_handle_t handle = curdispatcher();
1174 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1175 disp->core_state.c.mem_st = st;
1179 * \brief Returns the mem client on the dispatcher priv
1181 struct mem_rpc_client *get_mem_client(void)
1183 dispatcher_handle_t handle = curdispatcher();
1184 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1185 return disp->core_state.c.mem_st;
1189 * \brief Returns a pointer to the current vspace on the dispatcher priv
1191 struct vspace *get_current_vspace(void)
1193 dispatcher_handle_t handle = curdispatcher();
1194 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1195 return &disp->core_state.vspace_state.vspace;
1199 * \brief Returns a pointer to the current pinned state on the dispatcher priv
1201 struct pinned_state *get_current_pinned_state(void)
1203 dispatcher_handle_t handle = curdispatcher();
1204 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1205 return &disp->core_state.pinned_state;
1209 * \brief Returns a pointer to the current pmap on the dispatcher priv
1211 struct pmap *get_current_pmap(void)
1213 dispatcher_handle_t handle = curdispatcher();
1214 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1215 return (struct pmap*)&disp->core_state.vspace_state.pmap;
1219 * \brief Returns a pointer to the morecore state on the dispatcher priv
1221 struct morecore_state *get_morecore_state(void)
1223 dispatcher_handle_t handle = curdispatcher();
1224 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1225 return &disp->core_state.c.morecore_state;
1229 * \brief Returns a pointer to the ram_alloc state on the dispatcher priv
1231 struct ram_alloc_state *get_ram_alloc_state(void)
1233 dispatcher_handle_t handle = curdispatcher();
1234 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1235 return &disp->core_state.c.ram_alloc_state;
1239 * \brief Returns a pointer to the ram_alloc state on the dispatcher priv
1241 struct skb_state *get_skb_state(void)
1243 dispatcher_handle_t handle = curdispatcher();
1244 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1245 return &disp->core_state.c.skb_state;
1249 * \brief Returns a pointer to the octopus rpc client on the dispatcher priv
1251 struct octopus_rpc_client *get_octopus_rpc_client(void)
1253 dispatcher_handle_t handle = curdispatcher();
1254 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1255 return disp->core_state.c.octopus_rpc_client;
1259 * \brief Sets the octopus rpc client on the dispatcher priv
1261 void set_octopus_rpc_client(struct octopus_rpc_client *c)
1263 dispatcher_handle_t handle = curdispatcher();
1264 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1265 disp->core_state.c.octopus_rpc_client = c;
1269 * \brief Returns a pointer to the chips_context state on the dispatcher priv
1271 struct spawn_rpc_client *get_spawn_rpc_client(coreid_t core)
1273 dispatcher_handle_t handle = curdispatcher();
1274 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1275 assert(core < MAX_CPUS);
1276 return disp->core_state.c.spawn_rpc_clients[core];
1280 * \brief set the chips_context state on the dispatcher priv
1282 void set_spawn_rpc_client(coreid_t core, struct spawn_rpc_client *c)
1284 dispatcher_handle_t handle = curdispatcher();
1285 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1286 assert(core < MAX_CPUS);
1287 disp->core_state.c.spawn_rpc_clients[core] = c;
1290 struct arrakis_rpc_client *get_arrakis_rpc_client(coreid_t core)
1292 dispatcher_handle_t handle = curdispatcher();
1293 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1294 assert(core < MAX_CPUS);
1295 return disp->core_state.c.arrakis_rpc_clients[core];
1299 * \brief set the chips_context state on the dispatcher priv
1301 void set_arrakis_rpc_client(coreid_t core, struct arrakis_rpc_client *c)
1303 dispatcher_handle_t handle = curdispatcher();
1304 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1305 assert(core < MAX_CPUS);
1306 disp->core_state.c.arrakis_rpc_clients[core] = c;
1310 * \brief Returns a pointer to the terminal state on the dispatcher priv
1312 struct terminal_state *get_terminal_state(void)
1314 dispatcher_handle_t handle = curdispatcher();
1315 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1316 return disp->core_state.c.terminal_state;
1320 * \brief set the terminal state on the dispatcher priv
1322 void set_terminal_state(struct terminal_state *st)
1324 dispatcher_handle_t handle = curdispatcher();
1325 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1326 disp->core_state.c.terminal_state = st;
1330 * \brief Returns a pointer to the domain state on the dispatcher priv
1332 struct domain_state *get_domain_state(void)
1334 dispatcher_handle_t handle = curdispatcher();
1335 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1336 return disp->core_state.c.domain_state;
1340 * \brief set the domain state on the dispatcher priv
1342 void set_domain_state(struct domain_state *st)
1344 dispatcher_handle_t handle = curdispatcher();
1345 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1346 disp->core_state.c.domain_state = st;
1350 * \brief Returns a pointer to the spawn state on the dispatcher priv
1352 struct spawn_state *get_spawn_state(void)
1354 dispatcher_handle_t handle = curdispatcher();
1355 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1356 return disp->core_state.c.spawn_state;
1360 * \brief set the spawn state on the dispatcher priv
1362 void set_spawn_state(struct spawn_state *st)
1364 dispatcher_handle_t handle = curdispatcher();
1365 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1366 disp->core_state.c.spawn_state = st;
1370 * \brief Returns a pointer to the spawn state on the dispatcher priv
1372 struct slot_alloc_state *get_slot_alloc_state(void)
1374 dispatcher_handle_t handle = curdispatcher();
1375 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1376 return &disp->core_state.c.slot_alloc_state;