3 * \brief Manage domain spanning cores
5 * \bug Need to specify how big the default thread on the spanned dispatcher
6 * should be because we cannot dynamically grow our stacks
8 * \bug Can only do domain_new_dispatcher() when no other dispatchers have
9 * threads (except for the internal interdisp-thread).
13 * Copyright (c) 2009, 2010, 2011, 2012, ETH Zurich.
14 * All rights reserved.
16 * This file is distributed under the terms in the attached LICENSE file.
17 * If you do not find this file, copies can be found by writing to:
18 * ETH Zurich D-INFK, CAB F.78, Universitaetstr. 6, CH-8092 Zurich,
19 * Attn: Systems Group.
23 #include <barrelfish/barrelfish.h>
24 #include <barrelfish/curdispatcher_arch.h>
25 #include <barrelfish/dispatcher_arch.h>
26 #include <barrelfish/waitset_chan.h>
27 #include <barrelfish_kpi/domain_params.h>
28 #include <arch/registers.h>
29 #include <barrelfish/dispatch.h>
30 #include <if/interdisp_defs.h>
31 #include "arch/threads.h"
33 #include <if/monitor_defs.h>
34 #include "threads_priv.h"
35 #include "waitset_chan_priv.h"
37 ///< Struct to maintain per dispatcher domain library state
39 iref_t iref; ///< Iref for the interdisp service
40 struct interdisp_binding *binding[MAX_CPUS];
41 struct waitset interdisp_ws;
42 struct thread *default_waitset_handler;
43 struct thread *remote_wakeup_queue;
44 struct waitset_chanstate remote_wakeup_event;
48 ///< Struct to send init information to the dispatcher that was spanned
49 struct remote_core_state {
50 iref_t iref; ///< Iref of the interdisp service to connect to
51 uint8_t core_id; ///< Core id of the domain that spanned this dispatcher
52 struct span_domain_state *span_domain_state; ///< Reference to the span_domain_state of the "server"
53 bool initialized; ///< true if remote core is fully initialized
54 int cnt; ///< Used to count dispatcher connected
55 size_t pagesize; ///< the pagesize to be used for the heap
58 ///< Struct for spanning domains state machine
59 struct span_domain_state {
60 struct thread *thread; ///< Thread to run on remote core
61 uint8_t core_id; ///< Id of the remote core
62 errval_t err; ///< To propagate error value
63 domain_spanned_callback_t callback; ///< Callback for when domain has spanned
64 void *callback_arg; ///< Optional argument to pass with callback
65 struct capref frame; ///< Dispatcher frame
66 struct capref vroot; ///< VRoot cap
67 struct event_queue_node event_qnode; ///< Event queue node
68 struct waitset_chanstate initev; ///< Dispatcher initialized event
69 bool initialized; ///< True if remote initialized
72 ///< Array of all interdisp IREFs in the domain
73 static iref_t allirefs[MAX_CPUS];
75 static void dispatcher_initialized_handler(void *arg)
77 struct span_domain_state *span_domain_state = arg;
79 struct domain_state *domain_state = get_domain_state();
81 // XXX: Tell currently active interdisp-threads to handle default waitset
82 for(int i = 0; i < MAX_CPUS; i++) {
83 struct interdisp_binding *b = domain_state->binding[i];
85 if(disp_get_core_id() != i &&
86 span_domain_state->core_id != i && b != NULL) {
87 errval_t err = b->tx_vtbl.span_slave_done(b, NOP_CONT);
88 assert(err_is_ok(err));
92 /* Upcall into the domain_new_dispatcher callback if registered */
93 if (span_domain_state->callback) {
94 span_domain_state->callback(span_domain_state->callback_arg, SYS_ERR_OK);
96 span_domain_state->initialized = 1;
97 //free(span_domain_state);
101 * \brief Handled for dispatcher_initialized msg type
103 * Called when a recently spanned dispatcher has initialized.
104 * Store it's connection object, and upcall into the registered callback
106 static void dispatcher_initialized(struct interdisp_binding *st, genvaddr_t id)
108 struct span_domain_state *span_domain_state = (struct span_domain_state*)(uintptr_t)id;
110 // Signal the default waitset of this event
111 struct event_closure closure = {
112 .handler = dispatcher_initialized_handler,
113 .arg = span_domain_state,
115 waitset_chanstate_init(&span_domain_state->initev, CHANTYPE_EVENT_QUEUE);
116 errval_t err = waitset_chan_trigger_closure(get_default_waitset(),
117 &span_domain_state->initev,
119 if(err_is_fail(err)) {
120 USER_PANIC_ERR(err, "Triggering default waitset");
124 static void send_cap_request(struct interdisp_binding *st,
125 struct capref cap, genvaddr_t info)
127 errval_t err = SYS_ERR_OK, err2;
128 struct capref *dest = (struct capref *)(uintptr_t)info;
130 err = cap_copy(*dest, cap);
131 if(err_is_fail(err)) {
132 err_push(err, LIB_ERR_CAP_COPY_FAIL);
133 DEBUG_ERR(err, "cap_copy");
137 err = cap_destroy(cap);
138 if(err_is_fail(err)) {
139 err_push(err, LIB_ERR_CAP_DELETE_FAIL);
140 DEBUG_ERR(err, "cap_destroy default");
146 err2 = st->tx_vtbl.send_cap_reply(st, NOP_CONT, err);
147 if (err_is_fail(err2)) {
148 DEBUG_ERR(err, "Failed to send send_cap_reply");
152 static errval_t send_cap_err = SYS_ERR_OK;
153 static bool cap_received = false;
155 static void send_cap_reply(struct interdisp_binding *st, errval_t err)
161 static void create_thread_request(struct interdisp_binding *b,
162 genvaddr_t funcaddr, genvaddr_t argaddr,
163 uint64_t stacksize, genvaddr_t req)
165 thread_func_t start_func = (thread_func_t)(uintptr_t)funcaddr;
166 void *arg = (void *)(uintptr_t)argaddr;
167 struct thread *newthread;
169 // XXX: Probably want to return pointer to thread struct to caller
171 newthread = thread_create_varstack(start_func, arg, stacksize);
173 newthread = thread_create(start_func, arg);
175 assert(newthread != NULL);
176 errval_t err = b->tx_vtbl.create_thread_reply(b, NOP_CONT, SYS_ERR_OK,
177 (genvaddr_t)(lvaddr_t)newthread,
179 assert(err_is_ok(err));
182 struct create_thread_req {
183 struct thread *thread;
187 static void create_thread_reply(struct interdisp_binding *b,
188 errval_t err, genvaddr_t thread, genvaddr_t req)
190 assert(err_is_ok(err));
192 struct create_thread_req *r = (struct create_thread_req*)(lvaddr_t)req;
193 r->thread = (struct thread *)(lvaddr_t)thread;
194 r->reply_received = true;
197 static void wakeup_thread_request(struct interdisp_binding *b,
200 coreid_t core_id = disp_get_core_id();
201 struct thread *wakeup = (struct thread *)(uintptr_t)taddr;
202 dispatcher_handle_t handle = disp_disable();
203 struct dispatcher_generic *disp_gen = get_dispatcher_generic(handle);
204 /* assert_disabled(wakeup->disp == handle); */
205 assert_disabled(wakeup->coreid == core_id);
206 wakeup->disp = handle;
207 thread_enqueue(wakeup, &disp_gen->runq);
211 static void join_thread_request(struct interdisp_binding *b,
212 genvaddr_t taddr, genvaddr_t req)
214 struct thread *thread = (struct thread *)(lvaddr_t)taddr;
215 assert(thread->coreid == disp_get_core_id());
217 errval_t err = thread_join(thread, &retval);
218 err = b->tx_vtbl.join_thread_reply(b, NOP_CONT, err, retval, req);
219 assert(err_is_ok(err));
222 struct join_thread_req {
228 static void join_thread_reply(struct interdisp_binding *b,
229 errval_t err, uint64_t retval, genvaddr_t req)
231 struct join_thread_req *r = (struct join_thread_req *)(lvaddr_t)req;
234 r->reply_received = true;
238 * XXX: The whole span_slave*() thing is a hack to allow all
239 * dispatchers to wait on both the monitor and interdisp waitsets
240 * while we bind to all.
243 static int span_slave_thread(void *arg)
245 errval_t err = thread_detach(thread_self());
246 assert(err_is_ok(err));
249 event_dispatch(get_default_waitset());
255 static void span_slave_request(struct interdisp_binding *b)
257 USER_PANIC("shouldn't be called");
258 thread_create(span_slave_thread, NULL);
261 static void span_slave_done_handler(void *cs)
263 USER_PANIC("shouldn't be called");
268 static void span_slave_done_request(struct interdisp_binding *b)
270 USER_PANIC("shouldn't be called");
271 struct waitset_chanstate *cs = malloc(sizeof(struct waitset_chanstate));
273 // Signal the default waitset of this event
274 struct event_closure closure = {
275 .handler = span_slave_done_handler,
278 waitset_chanstate_init(cs, CHANTYPE_EVENT_QUEUE);
279 errval_t err = waitset_chan_trigger_closure(get_default_waitset(), cs,
281 if(err_is_fail(err)) {
282 USER_PANIC_ERR(err, "Triggering default waitset");
286 static void span_eager_connect_request(struct interdisp_binding *b,
289 struct domain_state *domain_state = get_domain_state();
291 /* Store the sending core's connection */
292 domain_state->binding[core_id] = b;
295 static struct interdisp_rx_vtbl interdisp_vtbl = {
296 .dispatcher_initialized = dispatcher_initialized,
298 .send_cap_request = send_cap_request,
299 .send_cap_reply = send_cap_reply,
301 .wakeup_thread = wakeup_thread_request,
302 .create_thread_request = create_thread_request,
303 .create_thread_reply = create_thread_reply,
305 .join_thread_request = join_thread_request,
306 .join_thread_reply = join_thread_reply,
308 // XXX: Hack to allow domain_new_dispatcher() to proceed when not all
309 // default waitsets are serviced
310 .span_slave = span_slave_request,
311 .span_slave_done = span_slave_done_request,
312 .span_eager_connect = span_eager_connect_request,
316 * \brief Called when the "client" connects to "server"
318 * Make the connection a "server" connection, free unnecessary state.
319 * Send init msg to the dispatcher that spanned this dispatcher.
321 static void client_connected(void *st, errval_t err,
322 struct interdisp_binding *b)
324 struct remote_core_state *state = (struct remote_core_state*)st;
325 struct domain_state *domain_state = get_domain_state();
327 if(err_is_fail(err)) {
328 DEBUG_ERR(err, "binding to interdisp service");
332 /* Set it on the domain library state */
333 b->rx_vtbl = interdisp_vtbl;
334 domain_state->binding[state->cnt] = b;
336 // Send it our core id
337 err = b->tx_vtbl.span_eager_connect(b, NOP_CONT, disp_get_core_id());
338 if(err_is_fail(err)) {
339 USER_PANIC_ERR(err, "sending span_eager_connect");
342 // Connect to next active dispatcher
345 if(state->cnt == disp_get_core_id()) {
348 } while(allirefs[state->cnt] == NULL_IREF && state->cnt < MAX_CPUS);
350 if(state->cnt < MAX_CPUS) {
351 err = interdisp_bind(allirefs[state->cnt], client_connected,
352 state, &domain_state->interdisp_ws,
353 IDC_BIND_FLAGS_DEFAULT);
354 if(err_is_fail(err)) {
355 USER_PANIC_ERR(err, "Binding to inter-dispatcher service");
358 struct interdisp_binding *sb = domain_state->binding[state->core_id];
359 /* Send initialized msg to the dispatcher that spanned us */
360 errval_t err2 = sb->tx_vtbl.
361 dispatcher_initialized(sb, NOP_CONT,
362 (uintptr_t)state->span_domain_state);
363 if (err_is_fail(err2)) {
364 DEBUG_ERR(err, "failed to send initalized msg");
368 state->initialized = true;
372 static errval_t server_connected(void *st, struct interdisp_binding *b)
374 b->rx_vtbl = interdisp_vtbl;
379 * \brief Called when domain gets a interdisp service.
380 * It will set it on the domain_state.
382 static void server_listening(void *st, errval_t err, iref_t iref)
384 if(err_is_fail(err)) {
385 DEBUG_ERR(err, "interdisp service export");
389 struct domain_state *domain_state = get_domain_state();
390 domain_state->iref = iref;
392 // Also set in the global array
393 allirefs[disp_get_core_id()] = iref;
394 domain_state->conditional = true;
398 * \brief Called on the inter-disp handler thread, when another thread
399 * on this dispatcher wants to wakeup a thread on a foreign dispatcher.
401 static void handle_wakeup_on(void *arg)
403 struct domain_state *domain_state = get_domain_state();
406 assert(domain_state != NULL);
408 // Dequeue all (disable to ensure mutual exclusion -- per dispatcher)
410 struct thread *thread = NULL;
412 dispatcher_handle_t disp = disp_disable();
413 if(domain_state->remote_wakeup_queue != NULL) {
414 thread = thread_dequeue(&domain_state->remote_wakeup_queue);
418 // Break if queue empty
424 /* coreid_t core_id = disp_handle_get_core_id(thread->disp); */
425 coreid_t core_id = thread->coreid;
427 assert(domain_state->binding[core_id] != NULL);
429 struct interdisp_binding *b = domain_state->binding[core_id];
430 err = b->tx_vtbl.wakeup_thread(b, NOP_CONT, (genvaddr_t)(uintptr_t)thread);
431 if (err_is_fail(err)) {
432 USER_PANIC_ERR(err, "wakeup_thread");
438 * \brief Handler thread for inter-dispatcher messages
439 * \param arg Pointer to inter-dispatcher waitset
440 * \return 0 on successful exit
442 static int interdisp_msg_handler(void *arg)
444 struct waitset *ws = arg;
448 errval_t err = event_dispatch(ws);
449 if(err_is_fail(err)) {
450 USER_PANIC_ERR(err, "error on event dispatch");
458 * \brief Runs enabled on the remote core to initialize the dispatcher
460 static int remote_core_init_enabled(void *arg)
463 struct remote_core_state *remote_core_state =
464 (struct remote_core_state*)arg;
466 /* construct a temporary spawn param to supply the morecore alignment */
467 struct spawn_domain_params params;
468 memset(¶ms, 0, sizeof(params));
469 params.pagesize = remote_core_state->pagesize;
471 /* Initialize the barrelfish library */
472 err = barrelfish_init_onthread(¶ms);
473 if (err_is_fail(err)) {
474 DEBUG_ERR(err, "barrelfish_init_onthread failed");
479 // Connect to all dispatchers eagerly
480 remote_core_state->cnt = 0;
481 while(allirefs[remote_core_state->cnt] == NULL_IREF && remote_core_state->cnt < MAX_CPUS) {
482 remote_core_state->cnt++;
483 if(remote_core_state->cnt == disp_get_core_id()) {
484 remote_core_state->cnt++;
487 // Don't move before barrelfish_init_onthread()
488 struct domain_state *st = get_domain_state();
489 if(remote_core_state->cnt != MAX_CPUS) {
490 err = interdisp_bind(allirefs[remote_core_state->cnt], client_connected,
491 remote_core_state, &st->interdisp_ws,
492 IDC_BIND_FLAGS_DEFAULT);
493 if(err_is_fail(err)) {
494 USER_PANIC_ERR(err, "Failure binding to inter-dispatcher service");
498 while(!remote_core_state->initialized) {
499 event_dispatch(get_default_waitset());
502 /* Free unnecessary state */
503 free(remote_core_state);
505 /* XXX: create a thread that will handle the default waitset */
506 st->default_waitset_handler = thread_create(span_slave_thread, NULL);
507 assert(st->default_waitset_handler != NULL);
511 return interdisp_msg_handler(&st->interdisp_ws);
515 * \brief Runs disabled on the remote core to initialize
517 static void remote_core_init_disabled(struct thread *thread)
519 dispatcher_handle_t disp = thread->disp;
521 /* Initialize the dispatcher */
522 disp_init_disabled(disp);
524 /* Initialize the threads library, and call remote_core_init_enabled */
525 thread_init_remote(disp, thread);
529 * \brief Initialize the domain library
531 * Registers a iref with the monitor to offer the interdisp service on this core
532 * Does not block for completion.
534 errval_t domain_init(void)
537 struct domain_state *domain_state = malloc(sizeof(struct domain_state));
539 return LIB_ERR_MALLOC_FAIL;
541 set_domain_state(domain_state);
543 domain_state->iref = 0;
544 domain_state->default_waitset_handler = NULL;
545 domain_state->remote_wakeup_queue = NULL;
546 waitset_chanstate_init(&domain_state->remote_wakeup_event,
547 CHANTYPE_EVENT_QUEUE);
548 for (int i = 0; i < MAX_CPUS; i++) {
549 domain_state->binding[i] = NULL;
552 waitset_init(&domain_state->interdisp_ws);
553 domain_state->conditional = false;
554 err = interdisp_export(NULL, server_listening, server_connected,
555 &domain_state->interdisp_ws, IDC_EXPORT_FLAGS_DEFAULT);
556 if (err_is_fail(err)) {
560 // XXX: Wait for the export to finish before returning
561 while(!domain_state->conditional) {
562 messages_wait_and_handle_next();
569 * \brief Handler to continue spanning domain state machine
571 static void span_domain_reply(struct monitor_binding *mb,
572 errval_t msgerr, uintptr_t domain_id)
574 /* On success, no further action needed */
575 if (err_is_ok(msgerr)) {
579 /* On failure, release resources and notify the caller */
580 struct span_domain_state *span_domain_state =
581 (struct span_domain_state*)domain_id;
582 errval_t err = cap_destroy(span_domain_state->frame);
583 if (err_is_fail(err)) {
584 err_push(msgerr, LIB_ERR_CAP_DESTROY);
587 if (span_domain_state->callback) { /* Use the callback to return error */
588 span_domain_state->callback(span_domain_state->callback_arg, msgerr);
589 } else { /* Use debug_err if no callback registered */
590 DEBUG_ERR(msgerr, "Failure in span_domain_reply");
592 free(span_domain_state);
595 static void span_domain_request_sender(void *arg)
597 struct monitor_binding *mb = arg;
598 struct span_domain_state *st = mb->st;
600 errval_t err = mb->tx_vtbl.
601 span_domain_request(mb, NOP_CONT, (uintptr_t)st, st->core_id, st->vroot,
603 if (err_is_ok(err)) {
604 event_mutex_unlock(&mb->mutex);
605 } else if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
606 /* Wait to use the monitor binding */
607 err = mb->register_send(mb, mb->waitset,
608 MKCONT(span_domain_request_sender,mb));
609 if(err_is_fail(err)) { // shouldn't fail, as we have the mutex
610 USER_PANIC_ERR(err, "register_send");
612 } else { // permanent error
613 event_mutex_unlock(&mb->mutex);
614 err = err_push(err, MON_CLIENT_ERR_SPAN_DOMAIN_REQUEST);
615 DEBUG_ERR(err, "span_domain_request");
619 static void span_domain_request_sender_wrapper(void *st)
621 struct monitor_binding *mb = get_monitor_binding();
623 span_domain_request_sender(mb);
627 * \brief Since we cannot dynamically grow our stack yet, we need a
628 * verion that will create threads on remote core with variable stack size
630 * \bug this is a hack
632 static errval_t domain_new_dispatcher_varstack(coreid_t core_id,
633 domain_spanned_callback_t callback,
634 void *callback_arg, size_t stack_size)
636 assert(core_id != disp_get_core_id());
639 struct domain_state *domain_state = get_domain_state();
640 struct monitor_binding *mb = get_monitor_binding();
641 assert(domain_state != NULL);
643 /* Set reply handler */
644 mb->rx_vtbl.span_domain_reply = span_domain_reply;
646 while(domain_state->iref == 0) { /* If not initialized, wait */
647 messages_wait_and_handle_next();
650 /* Create the remote_core_state passed to the new dispatcher */
651 struct remote_core_state *remote_core_state =
652 calloc(1, sizeof(struct remote_core_state));
653 if (!remote_core_state) {
654 return LIB_ERR_MALLOC_FAIL;
656 remote_core_state->core_id = disp_get_core_id();
657 remote_core_state->iref = domain_state->iref;
659 /* get the alignment of the morecore state */
660 struct morecore_state *state = get_morecore_state();
661 remote_core_state->pagesize = state->mmu_state.alignment;
663 /* Create the thread for the new dispatcher to init on */
664 struct thread *newthread =
665 thread_create_unrunnable(remote_core_init_enabled,
666 (void*)remote_core_state, stack_size);
667 if (newthread == NULL) {
668 return LIB_ERR_THREAD_CREATE;
671 /* Save the state for later steps of the spanning state machine */
672 struct span_domain_state *span_domain_state =
673 malloc(sizeof(struct span_domain_state));
674 if (!span_domain_state) {
675 return LIB_ERR_MALLOC_FAIL;
677 span_domain_state->thread = newthread;
678 span_domain_state->core_id = core_id;
679 span_domain_state->callback = callback;
680 span_domain_state->callback_arg = callback_arg;
682 /* Give remote_core_state pointer to span_domain_state */
683 remote_core_state->span_domain_state = span_domain_state;
685 /* Start spanning domain state machine by sending vroot to the monitor */
686 struct capref vroot = {
691 /* Create new dispatcher frame */
693 size_t dispsize = ((size_t)1) << DISPATCHER_FRAME_BITS;
694 err = frame_alloc(&frame, dispsize, &dispsize);
695 if (err_is_fail(err)) {
696 return err_push(err, LIB_ERR_FRAME_ALLOC);
700 err = vspace_map_one_frame((void **)&dispaddr, dispsize, frame, NULL, NULL);
701 if (err_is_fail(err)) {
702 return err_push(err, LIB_ERR_VSPACE_MAP);
705 dispatcher_handle_t handle = dispaddr;
706 struct dispatcher_shared_generic *disp =
707 get_dispatcher_shared_generic(handle);
708 struct dispatcher_generic *disp_gen = get_dispatcher_generic(handle);
709 arch_registers_state_t *disabled_area =
710 dispatcher_get_disabled_save_area(handle);
712 /* Set dispatcher on the newthread */
713 span_domain_state->thread->disp = handle;
714 span_domain_state->frame = frame;
715 span_domain_state->vroot = vroot;
717 /* Setup dispatcher */
718 disp->udisp = (lvaddr_t)handle;
719 disp->disabled = true;
721 disp_gen->core_id = span_domain_state->core_id;
722 // Setup the dispatcher to run remote_core_init_disabled
723 // and pass the created thread as an argument
724 registers_set_initial(disabled_area, span_domain_state->thread,
725 (lvaddr_t)remote_core_init_disabled,
726 (lvaddr_t)&disp_gen->stack[DISPATCHER_STACK_WORDS],
727 (uintptr_t)span_domain_state->thread, 0, 0, 0);
728 // Give dispatcher a unique name for debugging
729 snprintf(disp->name, DISP_NAME_LEN, "%s%d", disp_name(),
730 span_domain_state->core_id);
733 // XXX: share LDT state between all dispatchers
734 // this needs to happen before the remote core starts, otherwise the segment
735 // selectors in the new thread state are invalid
736 struct dispatcher_shared_x86_64 *disp_x64
737 = get_dispatcher_shared_x86_64(handle);
738 struct dispatcher_shared_x86_64 *mydisp_x64
739 = get_dispatcher_shared_x86_64(curdispatcher());
741 disp_x64->ldt_base = mydisp_x64->ldt_base;
742 disp_x64->ldt_npages = mydisp_x64->ldt_npages;
745 threads_prepare_to_span(handle);
747 // Setup new local thread for inter-dispatcher messages, if not already done
748 static struct thread *interdisp_thread = NULL;
749 if(interdisp_thread == NULL) {
750 interdisp_thread = thread_create(interdisp_msg_handler,
751 &domain_state->interdisp_ws);
752 err = thread_detach(interdisp_thread);
753 assert(err_is_ok(err));
757 // XXX: Tell currently active interdisp-threads to handle default waitset
758 for(int i = 0; i < MAX_CPUS; i++) {
759 struct interdisp_binding *b = domain_state->b[i];
761 if(disp_get_core_id() != i && b != NULL) {
762 err = b->tx_vtbl.span_slave(b, NOP_CONT);
763 assert(err_is_ok(err));
769 /* XXX: create a thread that will handle the default waitset */
770 if (domain_state->default_waitset_handler == NULL) {
771 domain_state->default_waitset_handler
772 = thread_create(span_slave_thread, NULL);
773 assert(domain_state->default_waitset_handler != NULL);
776 /* Wait to use the monitor binding */
777 struct monitor_binding *mcb = get_monitor_binding();
778 event_mutex_enqueue_lock(&mcb->mutex, &span_domain_state->event_qnode,
779 (struct event_closure) {
780 .handler = span_domain_request_sender_wrapper,
781 .arg = span_domain_state });
783 while(!span_domain_state->initialized) {
784 event_dispatch(get_default_waitset());
788 free(span_domain_state);
794 * \brief Creates a dispatcher on a remote core
796 * \param core_id Id of the core to create the dispatcher on
797 * \param callback Callback to use when new dispatcher is created
799 * The new dispatcher is created with the same vroot, sharing the same vspace.
800 * The new dispatcher also has a urpc connection to the core that created it.
802 errval_t domain_new_dispatcher(coreid_t core_id,
803 domain_spanned_callback_t callback,
806 return domain_new_dispatcher_varstack(core_id, callback, callback_arg,
807 THREADS_DEFAULT_STACK_BYTES);
810 errval_t domain_send_cap(coreid_t core_id, struct capref cap)
813 struct domain_state *domain_state = get_domain_state();
814 if (!domain_state->binding[core_id]) {
815 return LIB_ERR_NO_SPANNED_DISP;
818 send_cap_err = SYS_ERR_OK;
819 cap_received = false;
821 struct interdisp_binding *b = domain_state->binding[core_id];
822 err = b->tx_vtbl.send_cap_request(b, NOP_CONT, cap, (uintptr_t)&cap);
823 if (err_is_fail(err)) {
824 return err_push(err, LIB_ERR_SEND_CAP_REQUEST);
828 // TODO: Handled on different thread
829 /* while(!cap_received) { */
830 /* messages_wait_and_handle_next(); */
837 * \brief Wakeup a thread on a foreign dispatcher while disabled.
839 * \param core_id Core ID to wakeup on
840 * \param thread Pointer to thread to wakeup
841 * \param mydisp Dispatcher this function is running on
843 * \return SYS_ERR_OK on success.
845 static errval_t domain_wakeup_on_coreid_disabled(coreid_t core_id,
846 struct thread *thread,
847 dispatcher_handle_t mydisp)
849 struct domain_state *ds = get_domain_state();
851 // XXX: Ugly hack to allow waking up on a core id we don't have a
852 // dispatcher handler for
853 thread->coreid = core_id;
856 assert_disabled(ds != NULL);
857 if (ds->binding[core_id] == NULL) {
858 return LIB_ERR_NO_SPANNED_DISP;
861 thread_enqueue(thread, &ds->remote_wakeup_queue);
863 // Signal the inter-disp waitset of this event
864 struct event_closure closure = {
865 .handler = handle_wakeup_on
868 waitset_chan_trigger_closure_disabled(&ds->interdisp_ws,
869 &ds->remote_wakeup_event,
872 assert_disabled(err_is_ok(err) ||
873 err_no(err) == LIB_ERR_CHAN_ALREADY_REGISTERED);
878 errval_t domain_wakeup_on_disabled(dispatcher_handle_t disp,
879 struct thread *thread,
880 dispatcher_handle_t mydisp)
882 coreid_t core_id = disp_handle_get_core_id(disp);
884 // TODO: Can't wakeup on anyone else than the owning dispatcher yet
885 assert_disabled(disp == thread->disp);
887 return domain_wakeup_on_coreid_disabled(core_id, thread, mydisp);
890 errval_t domain_wakeup_on(dispatcher_handle_t disp,
891 struct thread *thread)
893 dispatcher_handle_t mydisp = disp_disable();
894 errval_t err = domain_wakeup_on_disabled(disp, thread, mydisp);
899 errval_t domain_thread_move_to(struct thread *thread, coreid_t core_id)
901 assert(thread == thread_self());
902 dispatcher_handle_t mydisp = disp_disable();
903 struct dispatcher_generic *disp_gen = get_dispatcher_generic(mydisp);
904 struct dispatcher_shared_generic *disp =
905 get_dispatcher_shared_generic(mydisp);
907 struct thread *next = thread->next;
908 thread_remove_from_queue(&disp_gen->runq, thread);
910 errval_t err = domain_wakeup_on_coreid_disabled(core_id, thread, mydisp);
911 if(err_is_fail(err)) {
912 thread_enqueue(thread, &disp_gen->runq);
917 // run the next thread, if any
918 if (next != thread) {
919 disp_gen->current = next;
920 disp_resume(mydisp, &next->regs);
922 disp_gen->current = NULL;
923 disp->haswork = havework_disabled(mydisp);
924 disp_yield_disabled(mydisp);
927 USER_PANIC("should never be reached");
930 errval_t domain_thread_create_on_varstack(coreid_t core_id,
931 thread_func_t start_func,
932 void *arg, size_t stacksize,
933 struct thread **newthread)
935 if (disp_get_core_id() == core_id) {
936 struct thread *th = NULL;
937 if (stacksize == 0) {
938 th = thread_create(start_func, arg);
940 th = thread_create_varstack(start_func, arg, stacksize);
948 return LIB_ERR_THREAD_CREATE;
951 struct domain_state *domain_state = get_domain_state();
954 if (domain_state->binding[core_id] == NULL) {
955 return LIB_ERR_NO_SPANNED_DISP;
958 struct interdisp_binding *b = domain_state->binding[core_id];
959 struct create_thread_req *req = malloc(sizeof(*req));
960 req->reply_received = false;
961 // use special waitset to make sure loop exits properly.
962 struct waitset ws, *old_ws = b->waitset;
964 b->change_waitset(b, &ws);
965 err = b->tx_vtbl.create_thread_request(b, NOP_CONT,
966 (genvaddr_t)(uintptr_t)start_func,
967 (genvaddr_t)(uintptr_t)arg,
969 (genvaddr_t)(lvaddr_t)req);
970 if (err_is_fail(err)) {
974 while (!req->reply_received) {
979 *newthread = req->thread;
983 b->change_waitset(b, old_ws);
989 errval_t domain_thread_create_on(coreid_t core_id, thread_func_t start_func,
990 void *arg, struct thread **newthread)
992 return domain_thread_create_on_varstack(core_id, start_func, arg, 0, newthread);
995 errval_t domain_thread_join(struct thread *thread, int *retval)
997 coreid_t core_id = thread->coreid;
998 if (disp_get_core_id() == core_id) {
999 return thread_join(thread, retval);
1001 struct domain_state *domain_state = get_domain_state();
1004 if (domain_state->binding[core_id] == NULL) {
1005 return LIB_ERR_NO_SPANNED_DISP;
1008 struct interdisp_binding *b = domain_state->binding[core_id];
1009 struct join_thread_req *req = malloc(sizeof(*req));
1010 req->reply_received = false;
1011 // use special waitset to make sure loop exits properly.
1012 struct waitset ws, *old_ws = b->waitset;
1014 b->change_waitset(b, &ws);
1015 err = b->tx_vtbl.join_thread_request(b, NOP_CONT,
1016 (genvaddr_t)(lvaddr_t)thread,
1017 (genvaddr_t)(lvaddr_t)req);
1018 if (err_is_fail(err)) {
1022 while (!req->reply_received) {
1023 event_dispatch(&ws);
1025 // change waitset back
1026 b->change_waitset(b, old_ws);
1029 *retval = req->retval;
1039 * \brief set the core_id.
1041 * Code using this should do a kernel_cap invocation to get the core_id first.
1043 void disp_set_core_id(coreid_t core_id)
1045 dispatcher_handle_t handle = curdispatcher();
1046 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1047 disp->core_id = core_id;
1052 * \brief returns the address and the size of the EH frame
1054 * \param eh_frame returned virtual address of the EH frame
1055 * \param eh_frame_size returned size of the EH frame
1057 void disp_get_eh_frame(lvaddr_t *eh_frame,
1058 size_t *eh_frame_size)
1060 dispatcher_handle_t handle = curdispatcher();
1061 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1063 *eh_frame = disp->eh_frame;
1065 if (eh_frame_size) {
1066 *eh_frame_size = disp->eh_frame_size;
1071 * \brief returns the address and the size of the EH frame header
1073 * \param eh_frame returned virtual address of the EH frame
1074 * \param eh_frame_size returned size of the EH frame
1076 void disp_get_eh_frame_hdr(lvaddr_t *eh_frame_hdr,
1077 size_t *eh_frame_hdr_size)
1079 dispatcher_handle_t handle = curdispatcher();
1080 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1082 *eh_frame_hdr = disp->eh_frame_hdr;
1084 if (eh_frame_hdr_size) {
1085 *eh_frame_hdr_size = disp->eh_frame_hdr_size;
1090 * \brief returns the core_id stored in disp_priv struct
1092 coreid_t disp_get_core_id(void)
1094 dispatcher_handle_t handle = curdispatcher();
1095 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1096 return disp->core_id;
1100 * \brief returns the current core_id stored in disp_shared struct
1102 coreid_t disp_get_current_core_id(void)
1104 dispatcher_handle_t handle = curdispatcher();
1105 struct dispatcher_shared_generic* disp = get_dispatcher_shared_generic(handle);
1106 return disp->curr_core_id;
1110 * \brief returns the domain_id stored in disp_priv struct
1112 domainid_t disp_get_domain_id(void)
1114 dispatcher_handle_t handle = curdispatcher();
1115 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1116 return disp->domain_id;
1120 * \brief returns the core_id stored in disp_priv struct
1122 coreid_t disp_handle_get_core_id(dispatcher_handle_t handle)
1124 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1125 return disp->core_id;
1128 struct waitset *get_default_waitset(void)
1130 dispatcher_handle_t handle = curdispatcher();
1131 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1132 return &disp->core_state.c.default_waitset;
1136 * \brief set the monitor client binding on the dispatcher priv
1138 void set_monitor_binding(struct monitor_binding *b)
1140 dispatcher_handle_t handle = curdispatcher();
1141 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1142 disp->core_state.c.monitor_binding = b;
1146 * \brief Returns the monitor client binding on the dispatcher priv
1148 struct monitor_binding *get_monitor_binding(void)
1150 dispatcher_handle_t handle = curdispatcher();
1151 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1152 return disp->core_state.c.monitor_binding;
1155 struct waitset_chanstate *get_monitor_binding_chanstate(void)
1157 struct monitor_binding *mb = get_monitor_binding();
1158 return mb->get_receiving_chanstate(mb);
1162 * \brief set the blocking rpc monitor client binding on the dispatcher priv
1164 void set_monitor_blocking_binding(struct monitor_blocking_binding *st)
1166 dispatcher_handle_t handle = curdispatcher();
1167 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1168 disp->core_state.c.monitor_blocking_binding = st;
1172 * \brief Returns the blocking rpc monitor client binding on the
1175 struct monitor_blocking_binding *get_monitor_blocking_binding(void)
1177 dispatcher_handle_t handle = curdispatcher();
1178 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1179 return disp->core_state.c.monitor_blocking_binding;
1183 * \brief set the mem client on the dispatcher priv
1185 void set_mem_client(struct mem_binding *st)
1187 dispatcher_handle_t handle = curdispatcher();
1188 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1189 disp->core_state.c.mem_st = st;
1193 * \brief Returns the mem client on the dispatcher priv
1195 struct mem_binding *get_mem_client(void)
1197 dispatcher_handle_t handle = curdispatcher();
1198 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1199 return disp->core_state.c.mem_st;
1203 * \brief Returns a pointer to the current vspace on the dispatcher priv
1205 struct vspace *get_current_vspace(void)
1207 dispatcher_handle_t handle = curdispatcher();
1208 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1209 return &disp->core_state.vspace_state.vspace;
1213 * \brief Returns a pointer to the current pinned state on the dispatcher priv
1215 struct pinned_state *get_current_pinned_state(void)
1217 dispatcher_handle_t handle = curdispatcher();
1218 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1219 return &disp->core_state.pinned_state;
1223 * \brief Returns a pointer to the current pmap on the dispatcher priv
1225 struct pmap *get_current_pmap(void)
1227 dispatcher_handle_t handle = curdispatcher();
1228 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1229 return (struct pmap*)&disp->core_state.vspace_state.pmap;
1233 * \brief Returns a pointer to the morecore state on the dispatcher priv
1235 struct morecore_state *get_morecore_state(void)
1237 dispatcher_handle_t handle = curdispatcher();
1238 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1239 return &disp->core_state.c.morecore_state;
1243 * \brief Returns a pointer to the ram_alloc state on the dispatcher priv
1245 struct ram_alloc_state *get_ram_alloc_state(void)
1247 dispatcher_handle_t handle = curdispatcher();
1248 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1249 return &disp->core_state.c.ram_alloc_state;
1253 * \brief Returns a pointer to the ram_alloc state on the dispatcher priv
1255 struct skb_state *get_skb_state(void)
1257 dispatcher_handle_t handle = curdispatcher();
1258 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1259 return &disp->core_state.c.skb_state;
1263 * \brief Returns a pointer to the octopus rpc client on the dispatcher priv
1265 struct octopus_binding *get_octopus_binding(void)
1267 dispatcher_handle_t handle = curdispatcher();
1268 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1269 return disp->core_state.c.octopus_binding;
1273 * \brief Sets the octopus rpc client on the dispatcher priv
1275 void set_octopus_binding(struct octopus_binding *c)
1277 dispatcher_handle_t handle = curdispatcher();
1278 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1279 disp->core_state.c.octopus_binding = c;
1283 * \brief Returns a pointer to the chips_context state on the dispatcher priv
1285 struct spawn_binding *get_spawn_binding(coreid_t core)
1287 dispatcher_handle_t handle = curdispatcher();
1288 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1289 assert(core < MAX_CPUS);
1290 return disp->core_state.c.spawn_bindings[core];
1294 * \brief set the chips_context state on the dispatcher priv
1296 void set_spawn_binding(coreid_t core, struct spawn_binding *c)
1298 dispatcher_handle_t handle = curdispatcher();
1299 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1300 assert(core < MAX_CPUS);
1301 disp->core_state.c.spawn_bindings[core] = c;
1304 * \brief Returns a pointer to the proc_mgmt rpc client on the dispatcher priv
1306 struct proc_mgmt_binding *get_proc_mgmt_binding(void)
1308 dispatcher_handle_t handle = curdispatcher();
1309 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1310 return disp->core_state.c.proc_mgmt_binding;
1314 * \brief Sets the prog_mgmt rpc client on the dispatcher priv
1316 void set_proc_mgmt_binding(struct proc_mgmt_binding *c)
1318 dispatcher_handle_t handle = curdispatcher();
1319 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1320 disp->core_state.c.proc_mgmt_binding = c;
1323 struct arrakis_binding *get_arrakis_binding(coreid_t core)
1325 dispatcher_handle_t handle = curdispatcher();
1326 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1327 assert(core < MAX_CPUS);
1328 return disp->core_state.c.arrakis_bindings[core];
1332 * \brief set the chips_context state on the dispatcher priv
1334 void set_arrakis_binding(coreid_t core, struct arrakis_binding *c)
1336 dispatcher_handle_t handle = curdispatcher();
1337 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1338 assert(core < MAX_CPUS);
1339 disp->core_state.c.arrakis_bindings[core] = c;
1343 * \brief Returns a pointer to the terminal state on the dispatcher priv
1345 struct terminal_state *get_terminal_state(void)
1347 dispatcher_handle_t handle = curdispatcher();
1348 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1349 return disp->core_state.c.terminal_state;
1353 * \brief set the terminal state on the dispatcher priv
1355 void set_terminal_state(struct terminal_state *st)
1357 dispatcher_handle_t handle = curdispatcher();
1358 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1359 disp->core_state.c.terminal_state = st;
1363 * \brief Returns a pointer to the domain state on the dispatcher priv
1365 struct domain_state *get_domain_state(void)
1367 dispatcher_handle_t handle = curdispatcher();
1368 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1369 return disp->core_state.c.domain_state;
1373 * \brief set the domain state on the dispatcher priv
1375 void set_domain_state(struct domain_state *st)
1377 dispatcher_handle_t handle = curdispatcher();
1378 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1379 disp->core_state.c.domain_state = st;
1383 * \brief Returns a pointer to the spawn state on the dispatcher priv
1385 struct spawn_state *get_spawn_state(void)
1387 dispatcher_handle_t handle = curdispatcher();
1388 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1389 return disp->core_state.c.spawn_state;
1393 * \brief set the spawn state on the dispatcher priv
1395 void set_spawn_state(struct spawn_state *st)
1397 dispatcher_handle_t handle = curdispatcher();
1398 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1399 disp->core_state.c.spawn_state = st;
1403 * \brief Returns a pointer to the spawn state on the dispatcher priv
1405 struct slot_alloc_state *get_slot_alloc_state(void)
1407 dispatcher_handle_t handle = curdispatcher();
1408 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1409 return &disp->core_state.c.slot_alloc_state;