3 * \brief Process management service.
7 * Copyright (c) 2017, ETH Zurich.
10 * This file is distributed under the terms in the attached LICENSE file.
11 * If you do not find this file, copies can be found by writing to:
12 * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
15 #include <barrelfish/barrelfish.h>
16 #include <barrelfish/nameservice_client.h>
17 #include <barrelfish/proc_mgmt_client.h>
18 #include <barrelfish/spawn_client.h>
19 #include <if/monitor_defs.h>
20 #include <if/proc_mgmt_defs.h>
21 #include <if/spawn_defs.h>
25 #include "pending_clients.h"
26 #include "spawnd_state.h"
28 static void add_spawnd_handler(struct proc_mgmt_binding *b, coreid_t core_id,
31 if (spawnd_state_exists(core_id)) {
32 DEBUG_ERR(PROC_MGMT_ERR_SPAWND_EXISTS, "spawnd_state_exists");
36 // Bind with the spawnd.
37 struct spawn_binding *spawnb;
38 errval_t err = spawn_bind_iref(iref, &spawnb);
39 if (err_is_fail(err)) {
40 DEBUG_ERR(err, "spawn_bind_iref");
44 err = spawnd_state_alloc(core_id, spawnb);
45 if (err_is_fail(err)) {
46 DEBUG_ERR(err, "spawnd_state_alloc");
49 debug_printf("Process manager bound with spawnd.%u on iref %u\n", core_id,
53 static void add_spawnd_handler_non_monitor(struct proc_mgmt_binding *b,
54 coreid_t core_id, iref_t iref)
56 // debug_printf("Ignoring add_spawnd call: %s\n",
57 // err_getstring(PROC_MGMT_ERR_NOT_MONITOR));
60 static void spawn_reply_handler(struct spawn_binding *b,
61 struct capref domain_cap, errval_t spawn_err);
62 static void spawn_with_caps_reply_handler(struct spawn_binding *b,
63 struct capref domain_cap,
65 static void span_reply_handler(struct spawn_binding *b,
66 struct capref domain_cap, errval_t span_err);
67 static void kill_reply_handler(struct spawn_binding *b,
68 struct capref domain_cap, errval_t kill_err);
69 static void exit_reply_handler(struct spawn_binding *b,
70 struct capref domain_cap, errval_t exit_err);
71 static void cleanup_reply_handler(struct spawn_binding *b,
72 struct capref domain_cap,
73 errval_t cleanup_err);
75 static void spawn_request_sender(void *arg)
77 struct pending_spawn *spawn = (struct pending_spawn*) arg;
80 bool with_caps = !(capref_is_null(spawn->inheritcn_cap) &&
81 capref_is_null(spawn->argcn_cap));
83 spawn->b->rx_vtbl.spawn_with_caps_reply = spawn_with_caps_reply_handler;
84 err = spawn->b->tx_vtbl.spawn_with_caps_request(spawn->b, NOP_CONT,
96 spawn->b->rx_vtbl.spawn_reply = spawn_reply_handler;
97 err = spawn->b->tx_vtbl.spawn_request(spawn->b, NOP_CONT, cap_procmng,
98 spawn->domain_cap, spawn->path,
99 spawn->argvbuf, spawn->argvbytes,
100 spawn->envbuf, spawn->envbytes,
103 if (err_is_ok(err)) {
106 if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
107 err = spawn->b->register_send(spawn->b, spawn->b->waitset,
108 MKCONT(spawn_request_sender, arg));
109 if (err_is_fail(err)) {
110 DEBUG_ERR(err, "registering for spawn request");
111 pending_clients_release(spawn->domain_cap,
112 with_caps ? ClientType_SpawnWithCaps
115 event_mutex_unlock(&spawn->b->mutex);
119 DEBUG_ERR(err, "sending spawn request");
120 pending_clients_release(spawn->domain_cap,
121 with_caps ? ClientType_SpawnWithCaps
124 event_mutex_unlock(&spawn->b->mutex);
130 static void span_request_sender(void *arg)
132 struct pending_span *span = (struct pending_span*) arg;
135 span->b->rx_vtbl.span_reply = span_reply_handler;
136 err = span->b->tx_vtbl.span_request(span->b, NOP_CONT, cap_procmng,
137 span->domain_cap, span->vroot,
139 if (err_is_ok(err)) {
140 err = domain_span(span->domain_cap, span->core_id);
141 if (err_is_fail(err)) {
142 DEBUG_ERR(err, "failed domain_span to core %u\n", span->core_id);
146 if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
147 err = span->b->register_send(span->b, span->b->waitset,
148 MKCONT(span_request_sender, arg));
149 if (err_is_fail(err)) {
150 DEBUG_ERR(err, "registering for span request");
151 pending_clients_release(span->domain_cap, ClientType_Span,
153 event_mutex_unlock(&span->b->mutex);
157 DEBUG_ERR(err, "sending span request");
158 pending_clients_release(span->domain_cap, ClientType_Span, NULL);
159 event_mutex_unlock(&span->b->mutex);
165 static void kill_request_sender(void *arg)
167 struct pending_kill_exit_cleanup *kill = (struct pending_kill_exit_cleanup*) arg;
170 kill->sb->rx_vtbl.kill_reply = kill_reply_handler;
171 err = kill->sb->tx_vtbl.kill_request(kill->sb, NOP_CONT, cap_procmng,
173 if (err_is_ok(err)) {
176 if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
177 err = kill->sb->register_send(kill->sb, kill->sb->waitset,
178 MKCONT(kill_request_sender, arg));
179 if (err_is_fail(err)) {
180 DEBUG_ERR(err, "registering for kill request");
182 struct pending_client *cl;
183 err = pending_clients_release_one(kill->domain_cap,
186 if (err_is_ok(err)) {
188 struct pending_client *tmp = cl;
194 event_mutex_unlock(&kill->sb->mutex);
198 DEBUG_ERR(err, "sending kill request");
200 struct pending_client *cl;
201 err = pending_clients_release_one(kill->domain_cap,
204 if (err_is_ok(err)) {
206 struct pending_client *tmp = cl;
212 event_mutex_unlock(&kill->sb->mutex);
218 static void exit_request_sender(void *arg)
220 struct pending_kill_exit_cleanup *exit = (struct pending_kill_exit_cleanup*) arg;
223 exit->sb->rx_vtbl.exit_reply = exit_reply_handler;
224 err = exit->sb->tx_vtbl.exit_request(exit->sb, NOP_CONT, cap_procmng,
226 if (err_is_ok(err)) {
229 if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
230 err = exit->sb->register_send(exit->sb, exit->sb->waitset,
231 MKCONT(exit_request_sender, arg));
232 if (err_is_fail(err)) {
233 DEBUG_ERR(err, "registering for exit request");
234 err = pending_clients_release(exit->domain_cap, ClientType_Exit,
236 event_mutex_unlock(&exit->sb->mutex);
240 DEBUG_ERR(err, "sending exit request");
241 err = pending_clients_release(exit->domain_cap, ClientType_Exit,
243 event_mutex_unlock(&exit->sb->mutex);
249 static void cleanup_request_sender(void *arg)
251 struct pending_kill_exit_cleanup *cleanup = (struct pending_kill_exit_cleanup*) arg;
254 cleanup->sb->rx_vtbl.cleanup_reply = cleanup_reply_handler;
255 err = cleanup->sb->tx_vtbl.cleanup_request(cleanup->sb, NOP_CONT, cap_procmng,
256 cleanup->domain_cap);
257 if (err_is_ok(err)) {
260 if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
261 err = cleanup->sb->register_send(cleanup->sb, cleanup->sb->waitset,
262 MKCONT(cleanup_request_sender, arg));
263 if (err_is_fail(err)) {
264 DEBUG_ERR(err, "registering for cleanup request");
265 pending_clients_release(cleanup->domain_cap, ClientType_Cleanup,
267 event_mutex_unlock(&cleanup->sb->mutex);
271 DEBUG_ERR(err, "sending cleanup request");
272 pending_clients_release(cleanup->domain_cap, ClientType_Cleanup,
274 event_mutex_unlock(&cleanup->sb->mutex);
280 static void spawn_reply_handler(struct spawn_binding *b,
281 struct capref domain_cap, errval_t spawn_err)
283 event_mutex_unlock(&b->mutex);
285 struct pending_client *cl;
286 errval_t err = pending_clients_release(domain_cap, ClientType_Spawn, &cl);
287 if (err_is_fail(err)) {
288 DEBUG_ERR(err, "failed to retrieve pending spawn client based on domain"
294 if (err_is_ok(spawn_err)) {
295 err = domain_spawn(domain_cap, cl->core_id);
298 errval_t resp_err = cl->b->tx_vtbl.spawn_response(cl->b, NOP_CONT, err,
300 if (err_is_fail(resp_err)) {
301 DEBUG_ERR(resp_err, "failed to send spawn_response to client");
307 static void spawn_with_caps_reply_handler(struct spawn_binding *b,
308 struct capref domain_cap,
311 event_mutex_unlock(&b->mutex);
313 struct pending_client *cl;
314 errval_t err = pending_clients_release(domain_cap, ClientType_SpawnWithCaps,
316 if (err_is_fail(err)) {
317 DEBUG_ERR(err, "failed to retrieve pending spawn_with_caps client based"
323 if (err_is_ok(spawn_err)) {
324 err = domain_spawn(domain_cap, cl->core_id);
327 errval_t resp_err = cl->b->tx_vtbl.spawn_with_caps_response(cl->b, NOP_CONT,
330 if (err_is_fail(resp_err)) {
331 DEBUG_ERR(resp_err, "failed to send spawn_with_caps_response to "
338 static void span_reply_handler(struct spawn_binding *b,
339 struct capref domain_cap, errval_t span_err)
341 event_mutex_unlock(&b->mutex);
343 struct pending_client *cl;
344 errval_t err = pending_clients_release(domain_cap, ClientType_Span, &cl);
345 if (err_is_fail(err)) {
346 DEBUG_ERR(err, "failed to retrieve pending span client based on domain"
351 struct domain_entry *entry;
352 err = domain_get_by_cap(cl->domain_cap, &entry);
353 if (err_is_fail(err)) {
354 DEBUG_ERR(err, "failed to retrieve span client by domain cap");
358 if (entry->status != DOMAIN_STATUS_RUNNING) {
359 // Domain has been stopped while we were serving the request; there's
360 // no one to respond to.
365 err = cl->b->tx_vtbl.span_response(cl->b, NOP_CONT, span_err);
366 if (err_is_fail(err)) {
367 DEBUG_ERR(err, "failed to send span_response to client");
373 static void cleanup_reply_handler(struct spawn_binding *b,
374 struct capref domain_cap,
375 errval_t cleanup_err)
377 event_mutex_unlock(&b->mutex);
379 struct pending_client *cl;
380 errval_t err = pending_clients_release(domain_cap, ClientType_Cleanup, &cl);
381 if (err_is_fail(err)) {
382 DEBUG_ERR(err, "failed to retrieve pending cleanup client based on "
387 if (err_is_fail(cleanup_err)) {
388 // TODO(razvan): Here, spawnd has failed deleting its local cspace.
389 // Should we send another cleanup message, until it might succeed?
394 struct domain_entry *entry;
395 err = domain_get_by_cap(domain_cap, &entry);
396 if (err_is_fail(err)) {
397 DEBUG_ERR(err, "failed to retrieve domain by cap returned by spawnd "
402 assert(entry->num_spawnds_resources > 0);
403 assert(entry->status != DOMAIN_STATUS_CLEANED);
405 --entry->num_spawnds_resources;
407 if (entry->num_spawnds_resources == 0) {
408 entry->status = DOMAIN_STATUS_CLEANED;
410 // At this point, the domain exists in state CLEANED for
411 // history reasons. For instance, if some other domain
412 // issues a wait call for this one, the process manager can
413 // return the exit status directly.
414 // At some point, however, we might want to just clean up
415 // the domain entry and recycle the domain cap.
417 // Expecting to receive further cleanup replies from other
418 // spawnds for the same domain cap, hence re-add the
420 err = pending_clients_add(domain_cap, cl->b,
421 ClientType_Cleanup, MAX_COREID);
422 if (err_is_fail(err)) {
423 DEBUG_ERR(err, "pending_clients_add in cleanup_reply_handler");
428 static void kill_reply_handler(struct spawn_binding *b,
429 struct capref domain_cap, errval_t kill_err)
431 event_mutex_unlock(&b->mutex);
433 struct pending_client *cl;
434 errval_t err = pending_clients_release(domain_cap, ClientType_Kill, &cl);
435 if (err_is_fail(err)) {
436 DEBUG_ERR(err, "failed to retrieve pending kill client based on domain "
442 if (err_is_fail(kill_err)) {
443 // TODO(razvan): Here, spawnd has failed deleting its local dispatcher.
444 // Should we send another kill message, until it might succeed?
446 resp_err = cl->b->tx_vtbl.kill_response(cl->b, NOP_CONT,
448 if (err_is_fail(resp_err)) {
449 DEBUG_ERR(resp_err, "failed to send kill_response to client");
451 struct pending_client *tmp = cl;
458 struct domain_entry *entry;
459 err = domain_get_by_cap(domain_cap, &entry);
460 if (err_is_fail(err)) {
461 DEBUG_ERR(err, "failed to retrieve domain by cap returned by spawnd "
466 assert(entry->num_spawnds_running > 0);
467 assert(entry->status != DOMAIN_STATUS_STOPPED);
469 --entry->num_spawnds_running;
471 if (entry->num_spawnds_running == 0) {
472 entry->status = DOMAIN_STATUS_STOPPED;
473 entry->exit_status = EXIT_STATUS_KILLED;
475 err = pending_clients_add(domain_cap, NULL, ClientType_Cleanup,
477 if (err_is_fail(err)) {
478 DEBUG_ERR(err, "pending_clients_add in kill_reply_handler");
481 // TODO(razvan): Might it be more sane if we respond back
482 // to the client after the domain has been cleaned up (i.e.
483 // the cspace root has been revoked for all dispatchers)?
485 resp_err = cl->b->tx_vtbl.kill_response(cl->b, NOP_CONT,
487 if (err_is_fail(resp_err)) {
488 DEBUG_ERR(resp_err, "failed to send kill_response to client");
490 struct pending_client *tmp = cl;
495 // TODO(razvan): Same problem applies to the waiters: would
496 // it be better if we sent them wait_responses after the
497 // cspace root has been revoked, too? (here and in the exit
499 struct domain_waiter *waiter = entry->waiters;
500 while (waiter != NULL) {
501 waiter->b->tx_vtbl.wait_response(waiter->b, NOP_CONT,
504 struct domain_waiter *tmp = waiter;
505 waiter = waiter->next;
509 for (coreid_t i = 0; i < MAX_COREID; ++i) {
510 if (entry->spawnds[i] == NULL) {
514 struct spawn_binding *spb = entry->spawnds[i]->b;
516 struct pending_kill_exit_cleanup *cleanup = (struct pending_kill_exit_cleanup*) malloc(
517 sizeof(struct pending_kill_exit_cleanup));
519 cleanup->domain_cap = domain_cap;
521 spb->rx_vtbl.cleanup_reply = cleanup_reply_handler;
522 event_mutex_enqueue_lock(&spb->mutex,
524 (struct event_closure) {
525 .handler = cleanup_request_sender,
529 err = pending_clients_add(domain_cap, cl->b, ClientType_Kill,
531 if (err_is_fail(err)) {
532 DEBUG_ERR(err, "pending_clients_add in kill_reply_handler");
537 static void exit_reply_handler(struct spawn_binding *b,
538 struct capref domain_cap, errval_t exit_err)
540 event_mutex_unlock(&b->mutex);
542 struct pending_client *cl;
543 errval_t err = pending_clients_release(domain_cap, ClientType_Exit, &cl);
544 if (err_is_fail(err)) {
545 DEBUG_ERR(err, "failed to retrieve pending exit client based on domain "
550 if (err_is_fail(exit_err)) {
551 // TODO(razvan): Here, spawnd has failed deleting its local dispatcher.
552 // Should we send another kill message, until it might succeed?
557 struct domain_entry *entry;
558 err = domain_get_by_cap(domain_cap, &entry);
559 if (err_is_fail(err)) {
560 DEBUG_ERR(err, "failed to retrieve domain by cap returned by spawnd "
565 assert(entry->num_spawnds_running > 0);
566 assert(entry->status != DOMAIN_STATUS_STOPPED);
568 --entry->num_spawnds_running;
570 if (entry->num_spawnds_running == 0) {
571 entry->status = DOMAIN_STATUS_STOPPED;
575 // TODO(razvan): Same problem applies to the waiters: would
576 // it be better if we sent them wait_responses after the
577 // cspace root has been revoked, too? (here and in the exit
579 struct domain_waiter *waiter = entry->waiters;
580 while (waiter != NULL) {
581 waiter->b->tx_vtbl.wait_response(waiter->b, NOP_CONT,
584 struct domain_waiter *tmp = waiter;
585 waiter = waiter->next;
589 for (coreid_t i = 0; i < MAX_COREID; ++i) {
590 if (entry->spawnds[i] == NULL) {
594 struct spawn_binding *spb = entry->spawnds[i]->b;
596 struct pending_kill_exit_cleanup *cleanup = (struct pending_kill_exit_cleanup*) malloc(
597 sizeof(struct pending_kill_exit_cleanup));
599 cleanup->domain_cap = domain_cap;
601 spb->rx_vtbl.cleanup_reply = cleanup_reply_handler;
602 event_mutex_enqueue_lock(&spb->mutex,
604 (struct event_closure) {
605 .handler = cleanup_request_sender,
609 err = pending_clients_add(domain_cap, cl->b, ClientType_Exit,
611 if (err_is_fail(err)) {
612 DEBUG_ERR(err, "pending_clients_add in kill_reply_handler");
617 static errval_t spawn_handler_common(struct proc_mgmt_binding *b,
618 enum ClientType type,
619 coreid_t core_id, const char *path,
620 const char *argvbuf, size_t argvbytes,
621 const char *envbuf, size_t envbytes,
622 struct capref inheritcn_cap,
623 struct capref argcn_cap, uint8_t flags)
625 if (!spawnd_state_exists(core_id)) {
626 return PROC_MGMT_ERR_INVALID_SPAWND;
629 struct spawnd_state *state = spawnd_state_get(core_id);
630 assert(state != NULL);
631 struct spawn_binding *cl = state->b;
634 struct capref domain_cap;
635 errval_t err = slot_alloc(&domain_cap);
636 if (err_is_fail(err)) {
637 DEBUG_ERR(err, "slot_alloc domain_cap");
638 return err_push(err, PROC_MGMT_ERR_CREATE_DOMAIN_CAP);
640 err = cap_retype(domain_cap, cap_procmng, 0, ObjType_Domain, 0, 1);
641 if (err_is_fail(err)) {
642 DEBUG_ERR(err, "cap_retype domain_cap");
643 return err_push(err, PROC_MGMT_ERR_CREATE_DOMAIN_CAP);
646 err = pending_clients_add(domain_cap, b, type, core_id);
647 if (err_is_fail(err)) {
648 DEBUG_ERR(err, "pending_clients_add");
652 struct pending_spawn *spawn = (struct pending_spawn*) malloc(
653 sizeof(struct pending_spawn));
654 spawn->domain_cap = domain_cap;
656 spawn->core_id = core_id;
658 spawn->argvbuf = argvbuf;
659 spawn->argvbytes = argvbytes;
660 spawn->envbuf = envbuf;
661 spawn->envbytes = envbytes;
662 spawn->inheritcn_cap = inheritcn_cap;
663 spawn->argcn_cap = argcn_cap;
664 spawn->flags = flags;
666 event_mutex_enqueue_lock(&cl->mutex, &spawn->qn,
667 (struct event_closure) {
668 .handler = spawn_request_sender,
674 static void spawn_handler(struct proc_mgmt_binding *b, coreid_t core_id,
675 const char *path, const char *argvbuf,
676 size_t argvbytes, const char *envbuf, size_t envbytes,
679 errval_t err, resp_err;
680 err = spawn_handler_common(b, ClientType_Spawn, core_id, path, argvbuf,
681 argvbytes, envbuf, envbytes, NULL_CAP, NULL_CAP,
683 if (err_is_ok(err)) {
684 // Will respond to client when we get the reply from spawnd.
688 resp_err = b->tx_vtbl.spawn_response(b, NOP_CONT, err, NULL_CAP);
689 if (err_is_fail(resp_err)) {
690 DEBUG_ERR(resp_err, "failed to send spawn_response");
694 static void spawn_with_caps_handler(struct proc_mgmt_binding *b,
695 coreid_t core_id, const char *path,
696 const char *argvbuf, size_t argvbytes,
697 const char *envbuf, size_t envbytes,
698 struct capref inheritcn_cap,
699 struct capref argcn_cap, uint8_t flags)
701 errval_t err, resp_err;
702 err = spawn_handler_common(b, ClientType_SpawnWithCaps, core_id, path,
703 argvbuf, argvbytes, envbuf, envbytes,
704 inheritcn_cap, argcn_cap, flags);
705 if (err_is_ok(err)) {
706 // Will respond to client when we get the reply from spawnd.
710 resp_err = b->tx_vtbl.spawn_with_caps_response(b, NOP_CONT, err,
712 if (err_is_fail(resp_err)) {
713 DEBUG_ERR(resp_err, "failed to send spawn_with_caps_response");
717 static void span_handler(struct proc_mgmt_binding *b, struct capref domain_cap,
718 coreid_t core_id, struct capref vroot,
719 struct capref dispframe)
721 errval_t err, resp_err;
722 err = domain_can_span(domain_cap, core_id);
723 if (err_is_fail(err)) {
724 goto respond_with_err;
727 if (!spawnd_state_exists(core_id)) {
728 err = PROC_MGMT_ERR_INVALID_SPAWND;
729 goto respond_with_err;
732 struct spawnd_state *state = spawnd_state_get(core_id);
733 assert(state != NULL);
734 struct spawn_binding *cl = state->b;
737 err = pending_clients_add(domain_cap, b, ClientType_Span, core_id);
738 if (err_is_fail(err)) {
739 goto respond_with_err;
742 struct pending_span *span = (struct pending_span*) malloc(
743 sizeof(struct pending_span));
744 span->domain_cap = domain_cap;
746 span->core_id = core_id;
748 span->dispframe = dispframe;
750 event_mutex_enqueue_lock(&cl->mutex, &span->qn,
751 (struct event_closure) {
752 .handler = span_request_sender,
756 resp_err = b->tx_vtbl.span_response(b, NOP_CONT, err);
757 if (err_is_fail(resp_err)) {
758 DEBUG_ERR(resp_err, "failed to send span_response");
762 static errval_t kill_handler_common(struct proc_mgmt_binding *b,
763 struct capref domain_cap,
764 enum ClientType type,
767 errval_t err = pending_clients_add(domain_cap, b, type, MAX_COREID);
768 if (err_is_fail(err)) {
772 struct domain_entry *entry;
773 err = domain_get_by_cap(domain_cap, &entry);
774 if (err_is_fail(err)) {
778 entry->exit_status = exit_status;
779 domain_stop_pending(entry);
781 for (coreid_t i = 0; i < MAX_COREID; ++i) {
782 if (entry->spawnds[i] == NULL) {
786 struct spawn_binding *spb = entry->spawnds[i]->b;
788 struct pending_kill_exit_cleanup *cmd = (struct pending_kill_exit_cleanup*) malloc(
789 sizeof(struct pending_kill_exit_cleanup));
790 cmd->domain_cap = domain_cap;
794 case ClientType_Kill:
797 event_mutex_enqueue_lock(&spb->mutex,
799 (struct event_closure) {
800 .handler = kill_request_sender,
804 case ClientType_Exit:
805 event_mutex_enqueue_lock(&spb->mutex,
807 (struct event_closure) {
808 .handler = exit_request_sender,
812 USER_PANIC("invalid client type for kill: %u\n", type);
819 static void kill_handler(struct proc_mgmt_binding *b,
820 struct capref victim_domain_cap)
822 errval_t err = kill_handler_common(b, victim_domain_cap, ClientType_Kill,
824 if (err_is_fail(err)) {
825 errval_t resp_err = b->tx_vtbl.kill_response(b, NOP_CONT, err);
826 if (err_is_fail(resp_err)) {
827 DEBUG_ERR(resp_err, "failed to send kill_response");
832 static void exit_handler(struct proc_mgmt_binding *b, struct capref domain_cap,
835 errval_t err = kill_handler_common(b, domain_cap, ClientType_Exit,
837 if (err_is_fail(err)) {
838 DEBUG_ERR(err, "processing exit_handler for requesting domain, exit "
839 "code %u", exit_status);
841 // Error or not, there's no client to respond to anymore.
844 static void wait_handler(struct proc_mgmt_binding *b, struct capref domain_cap)
846 errval_t err, resp_err;
847 struct domain_entry *entry;
848 err = domain_get_by_cap(domain_cap, &entry);
849 if (err_is_fail(err)) {
853 if (entry->status == DOMAIN_STATUS_STOPPED) {
854 // Domain has already been stopped, so just reply with exit status.
858 struct domain_waiter *waiter = (struct domain_waiter*) malloc(
859 sizeof(struct domain_waiter));
861 waiter->next = entry->waiters;
862 entry->waiters = waiter;
863 // Will respond when domain is stopped.
867 resp_err = b->tx_vtbl.wait_response(b, NOP_CONT, err, entry->exit_status);
868 if (err_is_fail(resp_err)) {
869 DEBUG_ERR(resp_err, "failed to send wait_response");
873 static struct proc_mgmt_rx_vtbl monitor_vtbl = {
874 .add_spawnd = add_spawnd_handler,
875 .spawn_call = spawn_handler,
876 .spawn_with_caps_call = spawn_with_caps_handler,
877 .span_call = span_handler,
878 .kill_call = kill_handler,
879 .exit_call = exit_handler,
880 .wait_call = wait_handler
883 static struct proc_mgmt_rx_vtbl non_monitor_vtbl = {
884 .add_spawnd = add_spawnd_handler_non_monitor,
885 .spawn_call = spawn_handler,
886 .spawn_with_caps_call = spawn_with_caps_handler,
887 .span_call = span_handler,
888 .kill_call = kill_handler,
889 .exit_call = exit_handler,
890 .wait_call = wait_handler
893 static errval_t alloc_ep_for_monitor(struct capref *ep)
895 struct proc_mgmt_lmp_binding *lmpb =
896 malloc(sizeof(struct proc_mgmt_lmp_binding));
897 assert(lmpb != NULL);
899 // setup our end of the binding
900 errval_t err = proc_mgmt_client_lmp_accept(lmpb, get_default_waitset(),
901 DEFAULT_LMP_BUF_WORDS);
902 if (err_is_fail(err)) {
904 return err_push(err, LIB_ERR_PROC_MGMT_CLIENT_ACCEPT);
907 *ep = lmpb->chan.local_cap;
908 lmpb->b.rx_vtbl = monitor_vtbl;
913 static void export_cb(void *st, errval_t err, iref_t iref)
915 if (err_is_fail(err)) {
916 USER_PANIC_ERR(err, "export failed");
919 // Allocate an endpoint for the local monitor, who will use it to inform
920 // us about new spawnd irefs on behalf of other monitors.
922 err = alloc_ep_for_monitor(&ep);
923 if (err_is_fail(err)) {
924 USER_PANIC_ERR(err, "failed to allocate LMP EP for local monitor");
927 // Send the endpoint to the monitor, so it can finish the handshake.
928 struct monitor_binding *mb = get_monitor_binding();
929 err = mb->tx_vtbl.set_proc_mgmt_ep_request(mb, NOP_CONT, ep);
930 if (err_is_fail(err)) {
931 USER_PANIC_ERR(err, "failed to send set_proc_mgmt_ep_request to "
935 // Also register this iref with the name service, for arbitrary client
936 // domains to use for spawn-related ops.
937 err = nameservice_register(SERVICE_BASENAME, iref);
938 if (err_is_fail(err)) {
939 USER_PANIC_ERR(err, "nameservice_register failed");
943 static errval_t connect_cb(void *st, struct proc_mgmt_binding *b)
945 b->rx_vtbl = non_monitor_vtbl;
949 errval_t start_service(void)
951 return proc_mgmt_export(NULL, export_cb, connect_cb, get_default_waitset(),
952 IDC_EXPORT_FLAGS_DEFAULT);