3 * \brief Manage domain spanning cores
5 * \bug Need to specify how big the default thread on the spanned dispatcher
6 * should be because we cannot dynamically grow our stacks
8 * \bug Can only do domain_new_dispatcher() when no other dispatchers have
9 * threads (except for the internal interdisp-thread).
13 * Copyright (c) 2009, 2010, 2011, 2012, ETH Zurich.
14 * All rights reserved.
16 * This file is distributed under the terms in the attached LICENSE file.
17 * If you do not find this file, copies can be found by writing to:
18 * ETH Zurich D-INFK, CAB F.78, Universitaetstr. 6, CH-8092 Zurich,
19 * Attn: Systems Group.
23 #include <barrelfish/barrelfish.h>
24 #include <barrelfish/curdispatcher_arch.h>
25 #include <barrelfish/dispatcher_arch.h>
26 #include <barrelfish/waitset_chan.h>
27 #include <barrelfish_kpi/domain_params.h>
28 #include <arch/registers.h>
29 #include <barrelfish/dispatch.h>
30 #include <if/interdisp_defs.h>
31 #include "arch/threads.h"
33 #include <if/monitor_defs.h>
34 #include "threads_priv.h"
35 #include "waitset_chan_priv.h"
37 ///< Struct to maintain per dispatcher domain library state
39 iref_t iref; ///< Iref for the interdisp service
40 struct interdisp_binding *binding[MAX_CPUS];
41 struct waitset interdisp_ws;
42 struct thread *default_waitset_handler;
43 struct thread *remote_wakeup_queue;
44 struct waitset_chanstate remote_wakeup_event;
48 ///< Struct to send init information to the dispatcher that was spanned
49 struct remote_core_state {
50 iref_t iref; ///< Iref of the interdisp service to connect to
51 uint8_t core_id; ///< Core id of the domain that spanned this dispatcher
52 struct span_domain_state *span_domain_state; ///< Reference to the span_domain_state of the "server"
53 bool initialized; ///< true if remote core is fully initialized
54 int cnt; ///< Used to count dispatcher connected
55 size_t pagesize; ///< the pagesize to be used for the heap
58 ///< Array of all interdisp IREFs in the domain
59 static iref_t allirefs[MAX_CPUS];
61 static void dispatcher_initialized_handler(void *arg)
63 struct span_domain_state *span_domain_state = arg;
65 struct domain_state *domain_state = get_domain_state();
67 // XXX: Tell currently active interdisp-threads to handle default waitset
68 for(int i = 0; i < MAX_CPUS; i++) {
69 struct interdisp_binding *b = domain_state->binding[i];
71 if(disp_get_core_id() != i &&
72 span_domain_state->core_id != i && b != NULL) {
73 errval_t err = b->tx_vtbl.span_slave_done(b, NOP_CONT);
74 assert(err_is_ok(err));
78 /* Upcall into the domain_new_dispatcher callback if registered */
79 if (span_domain_state->callback) {
80 span_domain_state->callback(span_domain_state->callback_arg, SYS_ERR_OK);
82 span_domain_state->initialized = 1;
83 //free(span_domain_state);
87 * \brief Handled for dispatcher_initialized msg type
89 * Called when a recently spanned dispatcher has initialized.
90 * Store it's connection object, and upcall into the registered callback
92 static void dispatcher_initialized(struct interdisp_binding *st, genvaddr_t id)
94 struct span_domain_state *span_domain_state = (struct span_domain_state*)(uintptr_t)id;
96 // Signal the default waitset of this event
97 struct event_closure closure = {
98 .handler = dispatcher_initialized_handler,
99 .arg = span_domain_state,
101 waitset_chanstate_init(&span_domain_state->initev, CHANTYPE_EVENT_QUEUE);
102 errval_t err = waitset_chan_trigger_closure(get_default_waitset(),
103 &span_domain_state->initev,
105 if(err_is_fail(err)) {
106 USER_PANIC_ERR(err, "Triggering default waitset");
110 static void send_cap_request(struct interdisp_binding *st,
111 struct capref cap, genvaddr_t info)
113 errval_t err = SYS_ERR_OK, err2;
114 struct capref *dest = (struct capref *)(uintptr_t)info;
116 err = cap_copy(*dest, cap);
117 if(err_is_fail(err)) {
118 err_push(err, LIB_ERR_CAP_COPY_FAIL);
119 DEBUG_ERR(err, "cap_copy");
123 err = cap_destroy(cap);
124 if(err_is_fail(err)) {
125 err_push(err, LIB_ERR_CAP_DELETE_FAIL);
126 DEBUG_ERR(err, "cap_destroy default");
132 err2 = st->tx_vtbl.send_cap_reply(st, NOP_CONT, err);
133 if (err_is_fail(err2)) {
134 DEBUG_ERR(err, "Failed to send send_cap_reply");
138 static errval_t send_cap_err = SYS_ERR_OK;
139 static bool cap_received = false;
141 static void send_cap_reply(struct interdisp_binding *st, errval_t err)
147 static void create_thread_request(struct interdisp_binding *b,
148 genvaddr_t funcaddr, genvaddr_t argaddr,
149 uint64_t stacksize, genvaddr_t req)
151 thread_func_t start_func = (thread_func_t)(uintptr_t)funcaddr;
152 void *arg = (void *)(uintptr_t)argaddr;
153 struct thread *newthread;
155 // XXX: Probably want to return pointer to thread struct to caller
157 newthread = thread_create_varstack(start_func, arg, stacksize);
159 newthread = thread_create(start_func, arg);
161 assert(newthread != NULL);
162 errval_t err = b->tx_vtbl.create_thread_reply(b, NOP_CONT, SYS_ERR_OK,
163 (genvaddr_t)(lvaddr_t)newthread,
165 assert(err_is_ok(err));
168 struct create_thread_req {
169 struct thread *thread;
173 static void create_thread_reply(struct interdisp_binding *b,
174 errval_t err, genvaddr_t thread, genvaddr_t req)
176 assert(err_is_ok(err));
178 struct create_thread_req *r = (struct create_thread_req*)(lvaddr_t)req;
179 r->thread = (struct thread *)(lvaddr_t)thread;
180 r->reply_received = true;
183 static void wakeup_thread_request(struct interdisp_binding *b,
186 coreid_t core_id = disp_get_core_id();
187 struct thread *wakeup = (struct thread *)(uintptr_t)taddr;
188 dispatcher_handle_t handle = disp_disable();
189 struct dispatcher_generic *disp_gen = get_dispatcher_generic(handle);
190 /* assert_disabled(wakeup->disp == handle); */
191 assert_disabled(wakeup->coreid == core_id);
192 wakeup->disp = handle;
193 thread_enqueue(wakeup, &disp_gen->runq);
197 static void join_thread_request(struct interdisp_binding *b,
198 genvaddr_t taddr, genvaddr_t req)
200 struct thread *thread = (struct thread *)(lvaddr_t)taddr;
201 assert(thread->coreid == disp_get_core_id());
203 errval_t err = thread_join(thread, &retval);
204 err = b->tx_vtbl.join_thread_reply(b, NOP_CONT, err, retval, req);
205 assert(err_is_ok(err));
208 struct join_thread_req {
214 static void join_thread_reply(struct interdisp_binding *b,
215 errval_t err, uint64_t retval, genvaddr_t req)
217 struct join_thread_req *r = (struct join_thread_req *)(lvaddr_t)req;
220 r->reply_received = true;
224 * XXX: The whole span_slave*() thing is a hack to allow all
225 * dispatchers to wait on both the monitor and interdisp waitsets
226 * while we bind to all.
229 static int span_slave_thread(void *arg)
231 errval_t err = thread_detach(thread_self());
232 assert(err_is_ok(err));
235 event_dispatch(get_default_waitset());
241 static void span_slave_request(struct interdisp_binding *b)
243 USER_PANIC("shouldn't be called");
244 thread_create(span_slave_thread, NULL);
247 static void span_slave_done_handler(void *cs)
249 USER_PANIC("shouldn't be called");
254 static void span_slave_done_request(struct interdisp_binding *b)
256 USER_PANIC("shouldn't be called");
257 struct waitset_chanstate *cs = malloc(sizeof(struct waitset_chanstate));
259 // Signal the default waitset of this event
260 struct event_closure closure = {
261 .handler = span_slave_done_handler,
264 waitset_chanstate_init(cs, CHANTYPE_EVENT_QUEUE);
265 errval_t err = waitset_chan_trigger_closure(get_default_waitset(), cs,
267 if(err_is_fail(err)) {
268 USER_PANIC_ERR(err, "Triggering default waitset");
272 static void span_eager_connect_request(struct interdisp_binding *b,
275 struct domain_state *domain_state = get_domain_state();
277 /* Store the sending core's connection */
278 domain_state->binding[core_id] = b;
281 static struct interdisp_rx_vtbl interdisp_vtbl = {
282 .dispatcher_initialized = dispatcher_initialized,
284 .send_cap_request = send_cap_request,
285 .send_cap_reply = send_cap_reply,
287 .wakeup_thread = wakeup_thread_request,
288 .create_thread_request = create_thread_request,
289 .create_thread_reply = create_thread_reply,
291 .join_thread_request = join_thread_request,
292 .join_thread_reply = join_thread_reply,
294 // XXX: Hack to allow domain_new_dispatcher() to proceed when not all
295 // default waitsets are serviced
296 .span_slave = span_slave_request,
297 .span_slave_done = span_slave_done_request,
298 .span_eager_connect = span_eager_connect_request,
302 * \brief Called when the "client" connects to "server"
304 * Make the connection a "server" connection, free unnecessary state.
305 * Send init msg to the dispatcher that spanned this dispatcher.
307 static void client_connected(void *st, errval_t err,
308 struct interdisp_binding *b)
310 struct remote_core_state *state = (struct remote_core_state*)st;
311 struct domain_state *domain_state = get_domain_state();
313 if(err_is_fail(err)) {
314 DEBUG_ERR(err, "binding to interdisp service");
318 /* Set it on the domain library state */
319 b->rx_vtbl = interdisp_vtbl;
320 domain_state->binding[state->cnt] = b;
322 // Send it our core id
323 err = b->tx_vtbl.span_eager_connect(b, NOP_CONT, disp_get_core_id());
324 if(err_is_fail(err)) {
325 USER_PANIC_ERR(err, "sending span_eager_connect");
328 // Connect to next active dispatcher
331 if(state->cnt == disp_get_core_id()) {
334 } while(allirefs[state->cnt] == NULL_IREF && state->cnt < MAX_CPUS);
336 if(state->cnt < MAX_CPUS) {
337 err = interdisp_bind(allirefs[state->cnt], client_connected,
338 state, &domain_state->interdisp_ws,
339 IDC_BIND_FLAGS_DEFAULT);
340 if(err_is_fail(err)) {
341 USER_PANIC_ERR(err, "Binding to inter-dispatcher service");
344 struct interdisp_binding *sb = domain_state->binding[state->core_id];
345 /* Send initialized msg to the dispatcher that spanned us */
346 errval_t err2 = sb->tx_vtbl.
347 dispatcher_initialized(sb, NOP_CONT,
348 (uintptr_t)state->span_domain_state);
349 if (err_is_fail(err2)) {
350 DEBUG_ERR(err, "failed to send initalized msg");
354 state->initialized = true;
358 static errval_t server_connected(void *st, struct interdisp_binding *b)
360 b->rx_vtbl = interdisp_vtbl;
365 * \brief Called when domain gets a interdisp service.
366 * It will set it on the domain_state.
368 static void server_listening(void *st, errval_t err, iref_t iref)
370 if(err_is_fail(err)) {
371 DEBUG_ERR(err, "interdisp service export");
375 struct domain_state *domain_state = get_domain_state();
376 domain_state->iref = iref;
378 // Also set in the global array
379 allirefs[disp_get_core_id()] = iref;
380 domain_state->conditional = true;
384 * \brief Called on the inter-disp handler thread, when another thread
385 * on this dispatcher wants to wakeup a thread on a foreign dispatcher.
387 static void handle_wakeup_on(void *arg)
389 struct domain_state *domain_state = get_domain_state();
392 assert(domain_state != NULL);
394 // Dequeue all (disable to ensure mutual exclusion -- per dispatcher)
396 struct thread *thread = NULL;
398 dispatcher_handle_t disp = disp_disable();
399 if(domain_state->remote_wakeup_queue != NULL) {
400 thread = thread_dequeue(&domain_state->remote_wakeup_queue);
404 // Break if queue empty
410 /* coreid_t core_id = disp_handle_get_core_id(thread->disp); */
411 coreid_t core_id = thread->coreid;
413 assert(domain_state->binding[core_id] != NULL);
415 struct interdisp_binding *b = domain_state->binding[core_id];
416 err = b->tx_vtbl.wakeup_thread(b, NOP_CONT, (genvaddr_t)(uintptr_t)thread);
417 if (err_is_fail(err)) {
418 USER_PANIC_ERR(err, "wakeup_thread");
424 * \brief Handler thread for inter-dispatcher messages
425 * \param arg Pointer to inter-dispatcher waitset
426 * \return 0 on successful exit
428 static int interdisp_msg_handler(void *arg)
430 struct waitset *ws = arg;
433 debug_printf("Looping on inter-dispatcher message handler\n");
435 errval_t err = event_dispatch(ws);
436 if(err_is_fail(err)) {
437 USER_PANIC_ERR(err, "error on event dispatch");
445 * \brief Runs enabled on the remote core to initialize the dispatcher
447 static int remote_core_init_enabled(void *arg)
450 struct remote_core_state *remote_core_state =
451 (struct remote_core_state*)arg;
453 /* construct a temporary spawn param to supply the morecore alignment */
454 struct spawn_domain_params params;
455 memset(¶ms, 0, sizeof(params));
456 params.pagesize = remote_core_state->pagesize;
458 /* Initialize the barrelfish library */
459 err = barrelfish_init_onthread(¶ms);
460 if (err_is_fail(err)) {
461 DEBUG_ERR(err, "barrelfish_init_onthread failed");
466 // Connect to all dispatchers eagerly
467 remote_core_state->cnt = 0;
468 while(allirefs[remote_core_state->cnt] == NULL_IREF && remote_core_state->cnt < MAX_CPUS) {
469 remote_core_state->cnt++;
470 if(remote_core_state->cnt == disp_get_core_id()) {
471 remote_core_state->cnt++;
474 // Don't move before barrelfish_init_onthread()
475 struct domain_state *st = get_domain_state();
476 if(remote_core_state->cnt != MAX_CPUS) {
477 err = interdisp_bind(allirefs[remote_core_state->cnt], client_connected,
478 remote_core_state, &st->interdisp_ws,
479 IDC_BIND_FLAGS_DEFAULT);
480 if(err_is_fail(err)) {
481 USER_PANIC_ERR(err, "Failure binding to inter-dispatcher service");
485 while(!remote_core_state->initialized) {
486 event_dispatch(get_default_waitset());
489 /* Free unnecessary state */
490 free(remote_core_state);
492 /* XXX: create a thread that will handle the default waitset */
493 st->default_waitset_handler = thread_create(span_slave_thread, NULL);
494 assert(st->default_waitset_handler != NULL);
496 return interdisp_msg_handler(&st->interdisp_ws);
500 * \brief Runs disabled on the remote core to initialize
502 static void remote_core_init_disabled(struct thread *thread)
504 dispatcher_handle_t disp = thread->disp;
506 /* Initialize the dispatcher */
507 disp_init_disabled(disp);
509 /* Initialize the threads library, and call remote_core_init_enabled */
510 thread_init_remote(disp, thread);
514 * \brief Initialize the domain library
516 * Registers a iref with the monitor to offer the interdisp service on this core
517 * Does not block for completion.
519 errval_t domain_init(void)
522 struct domain_state *domain_state = malloc(sizeof(struct domain_state));
524 return LIB_ERR_MALLOC_FAIL;
526 set_domain_state(domain_state);
528 domain_state->iref = 0;
529 domain_state->default_waitset_handler = NULL;
530 domain_state->remote_wakeup_queue = NULL;
531 waitset_chanstate_init(&domain_state->remote_wakeup_event,
532 CHANTYPE_EVENT_QUEUE);
533 for (int i = 0; i < MAX_CPUS; i++) {
534 domain_state->binding[i] = NULL;
537 waitset_init(&domain_state->interdisp_ws);
538 domain_state->conditional = false;
539 err = interdisp_export(NULL, server_listening, server_connected,
540 &domain_state->interdisp_ws, IDC_EXPORT_FLAGS_DEFAULT);
541 if (err_is_fail(err)) {
545 // XXX: Wait for the export to finish before returning
546 while(!domain_state->conditional) {
547 messages_wait_and_handle_next();
554 * \brief Handler to continue spanning domain state machine
556 static void span_domain_reply(struct monitor_binding *mb,
557 errval_t msgerr, uintptr_t domain_id)
559 /* On success, no further action needed */
560 if (err_is_ok(msgerr)) {
564 /* On failure, release resources and notify the caller */
565 struct span_domain_state *span_domain_state =
566 (struct span_domain_state*)domain_id;
567 errval_t err = cap_destroy(span_domain_state->frame);
568 if (err_is_fail(err)) {
569 err_push(msgerr, LIB_ERR_CAP_DESTROY);
572 if (span_domain_state->callback) { /* Use the callback to return error */
573 span_domain_state->callback(span_domain_state->callback_arg, msgerr);
574 } else { /* Use debug_err if no callback registered */
575 DEBUG_ERR(msgerr, "Failure in span_domain_reply");
579 static void span_domain_request_sender(void *arg)
581 struct monitor_binding *mb = arg;
582 struct span_domain_state *st = mb->st;
584 errval_t err = mb->tx_vtbl.
585 span_domain_request(mb, NOP_CONT, (uintptr_t)st, st->core_id, st->vroot,
587 if (err_is_ok(err)) {
588 event_mutex_unlock(&mb->mutex);
589 } else if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
590 /* Wait to use the monitor binding */
591 err = mb->register_send(mb, mb->waitset,
592 MKCONT(span_domain_request_sender,mb));
593 if(err_is_fail(err)) { // shouldn't fail, as we have the mutex
594 USER_PANIC_ERR(err, "register_send");
596 } else { // permanent error
597 event_mutex_unlock(&mb->mutex);
598 err = err_push(err, MON_CLIENT_ERR_SPAN_DOMAIN_REQUEST);
599 DEBUG_ERR(err, "span_domain_request");
603 static void span_domain_request_sender_wrapper(void *st)
605 struct monitor_binding *mb = get_monitor_binding();
607 span_domain_request_sender(mb);
611 * \brief Since we cannot dynamically grow our stack yet, we need a
612 * verion that will create threads on remote core with variable stack size
614 * \bug this is a hack
616 static errval_t domain_new_dispatcher_varstack(coreid_t core_id,
617 domain_spanned_callback_t callback,
618 void *callback_arg, size_t stack_size,
619 struct span_domain_state **ret_span_state)
621 assert(core_id != disp_get_core_id());
624 struct domain_state *domain_state = get_domain_state();
625 assert(domain_state != NULL);
627 while(domain_state->iref == 0) { /* If not initialized, wait */
628 messages_wait_and_handle_next();
631 /* Create the remote_core_state passed to the new dispatcher */
632 struct remote_core_state *remote_core_state =
633 calloc(1, sizeof(struct remote_core_state));
634 if (!remote_core_state) {
635 return LIB_ERR_MALLOC_FAIL;
637 remote_core_state->core_id = disp_get_core_id();
638 remote_core_state->iref = domain_state->iref;
640 /* get the alignment of the morecore state */
641 struct morecore_state *state = get_morecore_state();
642 remote_core_state->pagesize = state->mmu_state.alignment;
644 /* Create the thread for the new dispatcher to init on */
645 struct thread *newthread =
646 thread_create_unrunnable(remote_core_init_enabled,
647 (void*)remote_core_state, stack_size);
648 if (newthread == NULL) {
649 return LIB_ERR_THREAD_CREATE;
652 /* Save the state for later steps of the spanning state machine */
653 struct span_domain_state *span_domain_state =
654 malloc(sizeof(struct span_domain_state));
655 if (!span_domain_state) {
656 return LIB_ERR_MALLOC_FAIL;
658 span_domain_state->thread = newthread;
659 span_domain_state->core_id = core_id;
660 span_domain_state->callback = callback;
661 span_domain_state->callback_arg = callback_arg;
663 /* Give remote_core_state pointer to span_domain_state */
664 remote_core_state->span_domain_state = span_domain_state;
666 /* Start spanning domain state machine by sending vroot to the monitor */
667 struct capref vroot = {
672 /* Create new dispatcher frame */
674 size_t dispsize = ((size_t)1) << DISPATCHER_FRAME_BITS;
675 err = frame_alloc(&frame, dispsize, &dispsize);
676 if (err_is_fail(err)) {
677 return err_push(err, LIB_ERR_FRAME_ALLOC);
681 err = vspace_map_one_frame((void **)&dispaddr, dispsize, frame, NULL, NULL);
682 if (err_is_fail(err)) {
683 return err_push(err, LIB_ERR_VSPACE_MAP);
686 dispatcher_handle_t handle = dispaddr;
687 struct dispatcher_shared_generic *disp =
688 get_dispatcher_shared_generic(handle);
689 struct dispatcher_generic *disp_gen = get_dispatcher_generic(handle);
690 arch_registers_state_t *disabled_area =
691 dispatcher_get_disabled_save_area(handle);
693 /* Set dispatcher on the newthread */
694 span_domain_state->thread->disp = handle;
695 span_domain_state->frame = frame;
696 span_domain_state->vroot = vroot;
698 /* Setup dispatcher */
699 disp->udisp = (lvaddr_t)handle;
700 disp->disabled = true;
702 disp_gen->core_id = span_domain_state->core_id;
703 // Setup the dispatcher to run remote_core_init_disabled
704 // and pass the created thread as an argument
705 registers_set_initial(disabled_area, span_domain_state->thread,
706 (lvaddr_t)remote_core_init_disabled,
707 (lvaddr_t)&disp_gen->stack[DISPATCHER_STACK_WORDS],
708 (uintptr_t)span_domain_state->thread, 0, 0, 0);
709 // Give dispatcher a unique name for debugging
710 snprintf(disp->name, DISP_NAME_LEN, "%s%d", disp_name(),
711 span_domain_state->core_id);
714 // XXX: share LDT state between all dispatchers
715 // this needs to happen before the remote core starts, otherwise the segment
716 // selectors in the new thread state are invalid
717 struct dispatcher_shared_x86_64 *disp_x64
718 = get_dispatcher_shared_x86_64(handle);
719 struct dispatcher_shared_x86_64 *mydisp_x64
720 = get_dispatcher_shared_x86_64(curdispatcher());
722 disp_x64->ldt_base = mydisp_x64->ldt_base;
723 disp_x64->ldt_npages = mydisp_x64->ldt_npages;
726 threads_prepare_to_span(handle);
728 // Setup new local thread for inter-dispatcher messages, if not already done
729 static struct thread *interdisp_thread = NULL;
730 if(interdisp_thread == NULL) {
731 interdisp_thread = thread_create(interdisp_msg_handler,
732 &domain_state->interdisp_ws);
733 err = thread_detach(interdisp_thread);
734 assert(err_is_ok(err));
738 // XXX: Tell currently active interdisp-threads to handle default waitset
739 for(int i = 0; i < MAX_CPUS; i++) {
740 struct interdisp_binding *b = domain_state->b[i];
742 if(disp_get_core_id() != i && b != NULL) {
743 err = b->tx_vtbl.span_slave(b, NOP_CONT);
744 assert(err_is_ok(err));
750 /* XXX: create a thread that will handle the default waitset */
751 if (domain_state->default_waitset_handler == NULL) {
752 domain_state->default_waitset_handler
753 = thread_create(span_slave_thread, NULL);
754 assert(domain_state->default_waitset_handler != NULL);
758 if (ret_span_state != NULL) {
759 *ret_span_state = span_domain_state;
766 * \brief Creates a dispatcher on a remote core
768 * \param core_id Id of the core to create the dispatcher on
769 * \param callback Callback to use when new dispatcher is created
771 * The new dispatcher is created with the same vroot, sharing the same vspace.
772 * The new dispatcher also has a urpc connection to the core that created it.
774 errval_t domain_new_dispatcher(coreid_t core_id,
775 domain_spanned_callback_t callback,
778 struct span_domain_state *span_domain_state;
779 errval_t err = domain_new_dispatcher_varstack(core_id,
780 callback, callback_arg,
781 THREADS_DEFAULT_STACK_BYTES,
783 if (err_is_fail(err)) {
787 /* Wait to use the monitor binding */
788 struct monitor_binding *mcb = get_monitor_binding();
789 /* Set reply handler */
790 mcb->rx_vtbl.span_domain_reply = span_domain_reply;
791 event_mutex_enqueue_lock(&mcb->mutex, &span_domain_state->event_qnode,
792 (struct event_closure) {
793 .handler = span_domain_request_sender_wrapper,
794 .arg = span_domain_state });
796 while(!span_domain_state->initialized) {
797 event_dispatch(get_default_waitset());
800 free(span_domain_state);
806 * \brief Creates a dispatcher for a remote core, without running it.
808 * \param core_id Id of the core to create the dispatcher on
809 * \param ret_state If non-null, will contain the spanned domain state, which
810 * can be used to retrieve the vroot and dispframe, as well as
811 * to check when the new dispatcher is up
813 * The new dispatcher is created with the same vroot, sharing the same vspace.
814 * The new dispatcher also has a urpc connection to the core that created it.
816 errval_t domain_new_dispatcher_setup_only(coreid_t core_id,
817 struct span_domain_state **ret_state)
819 assert(ret_state != NULL);
820 return domain_new_dispatcher_varstack(core_id, NULL, NULL,
821 THREADS_DEFAULT_STACK_BYTES,
825 errval_t domain_send_cap(coreid_t core_id, struct capref cap)
828 struct domain_state *domain_state = get_domain_state();
829 if (!domain_state->binding[core_id]) {
830 return LIB_ERR_NO_SPANNED_DISP;
833 send_cap_err = SYS_ERR_OK;
834 cap_received = false;
836 struct interdisp_binding *b = domain_state->binding[core_id];
837 err = b->tx_vtbl.send_cap_request(b, NOP_CONT, cap, (uintptr_t)&cap);
838 if (err_is_fail(err)) {
839 return err_push(err, LIB_ERR_SEND_CAP_REQUEST);
843 // TODO: Handled on different thread
844 /* while(!cap_received) { */
845 /* messages_wait_and_handle_next(); */
852 * \brief Wakeup a thread on a foreign dispatcher while disabled.
854 * \param core_id Core ID to wakeup on
855 * \param thread Pointer to thread to wakeup
856 * \param mydisp Dispatcher this function is running on
858 * \return SYS_ERR_OK on success.
860 static errval_t domain_wakeup_on_coreid_disabled(coreid_t core_id,
861 struct thread *thread,
862 dispatcher_handle_t mydisp)
864 struct domain_state *ds = get_domain_state();
866 // XXX: Ugly hack to allow waking up on a core id we don't have a
867 // dispatcher handler for
868 thread->coreid = core_id;
871 assert_disabled(ds != NULL);
872 if (ds->binding[core_id] == NULL) {
873 return LIB_ERR_NO_SPANNED_DISP;
876 thread_enqueue(thread, &ds->remote_wakeup_queue);
878 // Signal the inter-disp waitset of this event
879 struct event_closure closure = {
880 .handler = handle_wakeup_on
883 waitset_chan_trigger_closure_disabled(&ds->interdisp_ws,
884 &ds->remote_wakeup_event,
887 assert_disabled(err_is_ok(err) ||
888 err_no(err) == LIB_ERR_CHAN_ALREADY_REGISTERED);
893 errval_t domain_wakeup_on_disabled(dispatcher_handle_t disp,
894 struct thread *thread,
895 dispatcher_handle_t mydisp)
897 coreid_t core_id = disp_handle_get_core_id(disp);
899 // TODO: Can't wakeup on anyone else than the owning dispatcher yet
900 assert_disabled(disp == thread->disp);
902 return domain_wakeup_on_coreid_disabled(core_id, thread, mydisp);
905 errval_t domain_wakeup_on(dispatcher_handle_t disp,
906 struct thread *thread)
908 dispatcher_handle_t mydisp = disp_disable();
909 errval_t err = domain_wakeup_on_disabled(disp, thread, mydisp);
914 errval_t domain_thread_move_to(struct thread *thread, coreid_t core_id)
916 assert(thread == thread_self());
917 dispatcher_handle_t mydisp = disp_disable();
918 struct dispatcher_generic *disp_gen = get_dispatcher_generic(mydisp);
919 struct dispatcher_shared_generic *disp =
920 get_dispatcher_shared_generic(mydisp);
922 struct thread *next = thread->next;
923 thread_remove_from_queue(&disp_gen->runq, thread);
925 errval_t err = domain_wakeup_on_coreid_disabled(core_id, thread, mydisp);
926 if(err_is_fail(err)) {
927 thread_enqueue(thread, &disp_gen->runq);
932 // run the next thread, if any
933 if (next != thread) {
934 disp_gen->current = next;
935 disp_resume(mydisp, &next->regs);
937 disp_gen->current = NULL;
938 disp->haswork = havework_disabled(mydisp);
939 disp_yield_disabled(mydisp);
942 USER_PANIC("should never be reached");
945 errval_t domain_thread_create_on_varstack(coreid_t core_id,
946 thread_func_t start_func,
947 void *arg, size_t stacksize,
948 struct thread **newthread)
950 if (disp_get_core_id() == core_id) {
951 struct thread *th = NULL;
952 if (stacksize == 0) {
953 th = thread_create(start_func, arg);
955 th = thread_create_varstack(start_func, arg, stacksize);
963 return LIB_ERR_THREAD_CREATE;
966 struct domain_state *domain_state = get_domain_state();
969 if (domain_state->binding[core_id] == NULL) {
970 return LIB_ERR_NO_SPANNED_DISP;
973 struct interdisp_binding *b = domain_state->binding[core_id];
974 struct create_thread_req *req = malloc(sizeof(*req));
975 req->reply_received = false;
976 // use special waitset to make sure loop exits properly.
977 struct waitset ws, *old_ws = b->waitset;
979 b->change_waitset(b, &ws);
980 err = b->tx_vtbl.create_thread_request(b, NOP_CONT,
981 (genvaddr_t)(uintptr_t)start_func,
982 (genvaddr_t)(uintptr_t)arg,
984 (genvaddr_t)(lvaddr_t)req);
985 if (err_is_fail(err)) {
989 while (!req->reply_received) {
994 *newthread = req->thread;
998 b->change_waitset(b, old_ws);
1004 errval_t domain_thread_create_on(coreid_t core_id, thread_func_t start_func,
1005 void *arg, struct thread **newthread)
1007 return domain_thread_create_on_varstack(core_id, start_func, arg, 0, newthread);
1010 errval_t domain_thread_join(struct thread *thread, int *retval)
1012 coreid_t core_id = thread->coreid;
1013 if (disp_get_core_id() == core_id) {
1014 return thread_join(thread, retval);
1016 struct domain_state *domain_state = get_domain_state();
1019 if (domain_state->binding[core_id] == NULL) {
1020 return LIB_ERR_NO_SPANNED_DISP;
1023 struct interdisp_binding *b = domain_state->binding[core_id];
1024 struct join_thread_req *req = malloc(sizeof(*req));
1025 req->reply_received = false;
1026 // use special waitset to make sure loop exits properly.
1027 struct waitset ws, *old_ws = b->waitset;
1029 b->change_waitset(b, &ws);
1030 err = b->tx_vtbl.join_thread_request(b, NOP_CONT,
1031 (genvaddr_t)(lvaddr_t)thread,
1032 (genvaddr_t)(lvaddr_t)req);
1033 if (err_is_fail(err)) {
1037 while (!req->reply_received) {
1038 event_dispatch(&ws);
1040 // change waitset back
1041 b->change_waitset(b, old_ws);
1044 *retval = req->retval;
1054 * \brief set the core_id.
1056 * Code using this should do a kernel_cap invocation to get the core_id first.
1058 void disp_set_core_id(coreid_t core_id)
1060 dispatcher_handle_t handle = curdispatcher();
1061 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1062 disp->core_id = core_id;
1067 * \brief returns the address and the size of the EH frame
1069 * \param eh_frame returned virtual address of the EH frame
1070 * \param eh_frame_size returned size of the EH frame
1072 void disp_get_eh_frame(lvaddr_t *eh_frame,
1073 size_t *eh_frame_size)
1075 dispatcher_handle_t handle = curdispatcher();
1076 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1078 *eh_frame = disp->eh_frame;
1080 if (eh_frame_size) {
1081 *eh_frame_size = disp->eh_frame_size;
1086 * \brief returns the address and the size of the EH frame header
1088 * \param eh_frame returned virtual address of the EH frame
1089 * \param eh_frame_size returned size of the EH frame
1091 void disp_get_eh_frame_hdr(lvaddr_t *eh_frame_hdr,
1092 size_t *eh_frame_hdr_size)
1094 dispatcher_handle_t handle = curdispatcher();
1095 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1097 *eh_frame_hdr = disp->eh_frame_hdr;
1099 if (eh_frame_hdr_size) {
1100 *eh_frame_hdr_size = disp->eh_frame_hdr_size;
1105 * \brief returns the core_id stored in disp_priv struct
1107 coreid_t disp_get_core_id(void)
1109 dispatcher_handle_t handle = curdispatcher();
1110 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1111 return disp->core_id;
1115 * \brief returns the current core_id stored in disp_shared struct
1117 coreid_t disp_get_current_core_id(void)
1119 dispatcher_handle_t handle = curdispatcher();
1120 struct dispatcher_shared_generic* disp = get_dispatcher_shared_generic(handle);
1121 return disp->curr_core_id;
1125 * \brief returns the domain_id stored in disp_priv struct
1127 domainid_t disp_get_domain_id(void)
1129 dispatcher_handle_t handle = curdispatcher();
1130 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1131 return disp->domain_id;
1135 * \brief returns the core_id stored in disp_priv struct
1137 coreid_t disp_handle_get_core_id(dispatcher_handle_t handle)
1139 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1140 return disp->core_id;
1143 struct waitset *get_default_waitset(void)
1145 dispatcher_handle_t handle = curdispatcher();
1146 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1147 return &disp->core_state.c.default_waitset;
1151 * \brief set the monitor client binding on the dispatcher priv
1153 void set_monitor_binding(struct monitor_binding *b)
1155 dispatcher_handle_t handle = curdispatcher();
1156 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1157 disp->core_state.c.monitor_binding = b;
1161 * \brief Returns the monitor client binding on the dispatcher priv
1163 struct monitor_binding *get_monitor_binding(void)
1165 dispatcher_handle_t handle = curdispatcher();
1166 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1167 return disp->core_state.c.monitor_binding;
1170 struct waitset_chanstate *get_monitor_binding_chanstate(void)
1172 struct monitor_binding *mb = get_monitor_binding();
1173 return mb->get_receiving_chanstate(mb);
1177 * \brief set the blocking rpc monitor client binding on the dispatcher priv
1179 void set_monitor_blocking_binding(struct monitor_blocking_binding *st)
1181 dispatcher_handle_t handle = curdispatcher();
1182 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1183 disp->core_state.c.monitor_blocking_binding = st;
1187 * \brief Returns the blocking rpc monitor client binding on the
1190 struct monitor_blocking_binding *get_monitor_blocking_binding(void)
1192 dispatcher_handle_t handle = curdispatcher();
1193 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1194 return disp->core_state.c.monitor_blocking_binding;
1198 * \brief set the mem client on the dispatcher priv
1200 void set_mem_client(struct mem_binding *st)
1202 dispatcher_handle_t handle = curdispatcher();
1203 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1204 disp->core_state.c.mem_st = st;
1208 * \brief Returns the mem client on the dispatcher priv
1210 struct mem_binding *get_mem_client(void)
1212 dispatcher_handle_t handle = curdispatcher();
1213 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1214 return disp->core_state.c.mem_st;
1218 * \brief Returns a pointer to the current vspace on the dispatcher priv
1220 struct vspace *get_current_vspace(void)
1222 dispatcher_handle_t handle = curdispatcher();
1223 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1224 return &disp->core_state.vspace_state.vspace;
1228 * \brief Returns a pointer to the current pinned state on the dispatcher priv
1230 struct pinned_state *get_current_pinned_state(void)
1232 dispatcher_handle_t handle = curdispatcher();
1233 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1234 return &disp->core_state.pinned_state;
1238 * \brief Returns a pointer to the current pmap on the dispatcher priv
1240 struct pmap *get_current_pmap(void)
1242 dispatcher_handle_t handle = curdispatcher();
1243 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1244 return (struct pmap*)&disp->core_state.vspace_state.pmap;
1248 * \brief Returns a pointer to the morecore state on the dispatcher priv
1250 struct morecore_state *get_morecore_state(void)
1252 dispatcher_handle_t handle = curdispatcher();
1253 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1254 return &disp->core_state.c.morecore_state;
1258 * \brief Returns a pointer to the ram_alloc state on the dispatcher priv
1260 struct ram_alloc_state *get_ram_alloc_state(void)
1262 dispatcher_handle_t handle = curdispatcher();
1263 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1264 return &disp->core_state.c.ram_alloc_state;
1268 * \brief Returns a pointer to the ram_alloc state on the dispatcher priv
1270 struct skb_state *get_skb_state(void)
1272 dispatcher_handle_t handle = curdispatcher();
1273 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1274 return &disp->core_state.c.skb_state;
1278 * \brief Returns a pointer to the octopus rpc client on the dispatcher priv
1280 struct octopus_binding *get_octopus_binding(void)
1282 dispatcher_handle_t handle = curdispatcher();
1283 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1284 return disp->core_state.c.octopus_binding;
1288 * \brief Sets the octopus rpc client on the dispatcher priv
1290 void set_octopus_binding(struct octopus_binding *c)
1292 dispatcher_handle_t handle = curdispatcher();
1293 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1294 disp->core_state.c.octopus_binding = c;
1298 * \brief Returns a pointer to the chips_context state on the dispatcher priv
1300 struct spawn_binding *get_spawn_binding(coreid_t core)
1302 dispatcher_handle_t handle = curdispatcher();
1303 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1304 assert(core < MAX_CPUS);
1305 return disp->core_state.c.spawn_bindings[core];
1309 * \brief set the chips_context state on the dispatcher priv
1311 void set_spawn_binding(coreid_t core, struct spawn_binding *c)
1313 dispatcher_handle_t handle = curdispatcher();
1314 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1315 assert(core < MAX_CPUS);
1316 disp->core_state.c.spawn_bindings[core] = c;
1319 * \brief Returns a pointer to the proc_mgmt rpc client on the dispatcher priv
1321 struct proc_mgmt_binding *get_proc_mgmt_binding(void)
1323 dispatcher_handle_t handle = curdispatcher();
1324 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1325 return disp->core_state.c.proc_mgmt_binding;
1329 * \brief Sets the prog_mgmt rpc client on the dispatcher priv
1331 void set_proc_mgmt_binding(struct proc_mgmt_binding *c)
1333 dispatcher_handle_t handle = curdispatcher();
1334 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1335 disp->core_state.c.proc_mgmt_binding = c;
1338 struct arrakis_binding *get_arrakis_binding(coreid_t core)
1340 dispatcher_handle_t handle = curdispatcher();
1341 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1342 assert(core < MAX_CPUS);
1343 return disp->core_state.c.arrakis_bindings[core];
1347 * \brief set the chips_context state on the dispatcher priv
1349 void set_arrakis_binding(coreid_t core, struct arrakis_binding *c)
1351 dispatcher_handle_t handle = curdispatcher();
1352 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1353 assert(core < MAX_CPUS);
1354 disp->core_state.c.arrakis_bindings[core] = c;
1358 * \brief Returns a pointer to the terminal state on the dispatcher priv
1360 struct terminal_state *get_terminal_state(void)
1362 dispatcher_handle_t handle = curdispatcher();
1363 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1364 return disp->core_state.c.terminal_state;
1368 * \brief set the terminal state on the dispatcher priv
1370 void set_terminal_state(struct terminal_state *st)
1372 dispatcher_handle_t handle = curdispatcher();
1373 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1374 disp->core_state.c.terminal_state = st;
1378 * \brief Returns a pointer to the domain state on the dispatcher priv
1380 struct domain_state *get_domain_state(void)
1382 dispatcher_handle_t handle = curdispatcher();
1383 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1384 return disp->core_state.c.domain_state;
1388 * \brief set the domain state on the dispatcher priv
1390 void set_domain_state(struct domain_state *st)
1392 dispatcher_handle_t handle = curdispatcher();
1393 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1394 disp->core_state.c.domain_state = st;
1398 * \brief Returns a pointer to the spawn state on the dispatcher priv
1400 struct spawn_state *get_spawn_state(void)
1402 dispatcher_handle_t handle = curdispatcher();
1403 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1404 return disp->core_state.c.spawn_state;
1408 * \brief set the spawn state on the dispatcher priv
1410 void set_spawn_state(struct spawn_state *st)
1412 dispatcher_handle_t handle = curdispatcher();
1413 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1414 disp->core_state.c.spawn_state = st;
1418 * \brief Returns a pointer to the spawn state on the dispatcher priv
1420 struct slot_alloc_state *get_slot_alloc_state(void)
1422 dispatcher_handle_t handle = curdispatcher();
1423 struct dispatcher_generic* disp = get_dispatcher_generic(handle);
1424 return &disp->core_state.c.slot_alloc_state;
1428 * \brief Returns a 64-bit hash code for a given domain cap.
1430 errval_t domain_cap_hash(struct capref domain_cap, uint64_t *ret_hash)
1432 assert(ret_hash != NULL);
1434 struct capability ret_cap;
1435 errval_t err = debug_cap_identify(domain_cap, &ret_cap);
1436 if (err_is_fail(err)) {
1437 return err_push(err, PROC_MGMT_ERR_DOMAIN_CAP_HASH);
1439 assert(ret_cap.type == ObjType_Domain);
1441 static uint64_t base = 1 + (uint64_t) MAX_COREID;
1442 *ret_hash = base * ret_cap.u.domain.coreid + ret_cap.u.domain.core_local_id;