3 * \brief Process management service.
7 * Copyright (c) 2017, ETH Zurich.
10 * This file is distributed under the terms in the attached LICENSE file.
11 * If you do not find this file, copies can be found by writing to:
12 * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
15 #include <barrelfish/barrelfish.h>
16 #include <barrelfish/nameservice_client.h>
17 #include <barrelfish/proc_mgmt_client.h>
18 #include <barrelfish/spawn_client.h>
19 #include <if/monitor_defs.h>
20 #include <if/proc_mgmt_defs.h>
21 #include <if/spawn_defs.h>
25 #include "pending_clients.h"
26 #include "spawnd_state.h"
28 static void add_spawnd_handler(struct proc_mgmt_binding *b, coreid_t core_id,
31 if (spawnd_state_exists(core_id)) {
32 DEBUG_ERR(PROC_MGMT_ERR_SPAWND_EXISTS, "spawnd_state_exists");
36 // Bind with the spawnd.
37 struct spawn_binding *spawnb;
38 errval_t err = spawn_bind_iref(iref, &spawnb);
39 if (err_is_fail(err)) {
40 DEBUG_ERR(err, "spawn_bind_iref");
44 err = spawnd_state_alloc(core_id, spawnb);
45 if (err_is_fail(err)) {
46 DEBUG_ERR(err, "spawnd_state_alloc");
49 debug_printf("Process manager bound with spawnd.%u on iref %u\n", core_id,
53 static void add_spawnd_handler_non_monitor(struct proc_mgmt_binding *b,
54 coreid_t core_id, iref_t iref)
56 // debug_printf("Ignoring add_spawnd call: %s\n",
57 // err_getstring(PROC_MGMT_ERR_NOT_MONITOR));
60 static void spawn_reply_handler(struct spawn_binding *b,
61 struct capref domain_cap, errval_t spawn_err);
62 static void spawn_with_caps_reply_handler(struct spawn_binding *b,
63 struct capref domain_cap,
65 static void span_reply_handler(struct spawn_binding *b,
66 struct capref domain_cap, errval_t span_err);
67 static void kill_reply_handler(struct spawn_binding *b,
68 struct capref domain_cap, errval_t kill_err);
69 static void exit_reply_handler(struct spawn_binding *b,
70 struct capref domain_cap, errval_t exit_err);
71 static void cleanup_reply_handler(struct spawn_binding *b,
72 struct capref domain_cap,
73 errval_t cleanup_err);
75 static void spawn_request_sender(void *arg)
77 struct pending_spawn *spawn = (struct pending_spawn*) arg;
80 bool with_caps = !(capref_is_null(spawn->inheritcn_cap) &&
81 capref_is_null(spawn->argcn_cap));
83 spawn->b->rx_vtbl.spawn_with_caps_reply = spawn_with_caps_reply_handler;
84 err = spawn->b->tx_vtbl.spawn_with_caps_request(spawn->b, NOP_CONT,
96 spawn->b->rx_vtbl.spawn_reply = spawn_reply_handler;
97 err = spawn->b->tx_vtbl.spawn_request(spawn->b, NOP_CONT, cap_procmng,
98 spawn->domain_cap, spawn->path,
99 spawn->argvbuf, spawn->argvbytes,
100 spawn->envbuf, spawn->envbytes,
103 if (err_is_ok(err)) {
106 if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
107 err = spawn->b->register_send(spawn->b, spawn->b->waitset,
108 MKCONT(spawn_request_sender, arg));
109 if (err_is_fail(err)) {
110 DEBUG_ERR(err, "registering for spawn request");
111 pending_clients_release(spawn->domain_cap,
112 with_caps ? ClientType_SpawnWithCaps
115 event_mutex_unlock(&spawn->b->mutex);
119 DEBUG_ERR(err, "sending spawn request");
120 pending_clients_release(spawn->domain_cap,
121 with_caps ? ClientType_SpawnWithCaps
124 event_mutex_unlock(&spawn->b->mutex);
130 static void span_request_sender(void *arg)
132 struct pending_span *span = (struct pending_span*) arg;
135 span->b->rx_vtbl.span_reply = span_reply_handler;
136 err = span->b->tx_vtbl.span_request(span->b, NOP_CONT, cap_procmng,
137 span->domain_cap, span->vroot,
139 if (err_is_ok(err)) {
140 err = domain_span(span->domain_cap, span->core_id);
141 if (err_is_fail(err)) {
142 DEBUG_ERR(err, "failed domain_span to core %u\n", span->core_id);
146 if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
147 err = span->b->register_send(span->b, span->b->waitset,
148 MKCONT(span_request_sender, arg));
149 if (err_is_fail(err)) {
150 DEBUG_ERR(err, "registering for span request");
151 pending_clients_release(span->domain_cap, ClientType_Span,
153 event_mutex_unlock(&span->b->mutex);
157 DEBUG_ERR(err, "sending span request");
158 pending_clients_release(span->domain_cap, ClientType_Span, NULL);
159 event_mutex_unlock(&span->b->mutex);
165 static void kill_request_sender(void *arg)
167 struct pending_kill_exit_cleanup *kill = (struct pending_kill_exit_cleanup*) arg;
170 kill->sb->rx_vtbl.kill_reply = kill_reply_handler;
171 err = kill->sb->tx_vtbl.kill_request(kill->sb, NOP_CONT, cap_procmng,
173 if (err_is_ok(err)) {
176 if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
177 err = kill->sb->register_send(kill->sb, kill->sb->waitset,
178 MKCONT(kill_request_sender, arg));
179 if (err_is_fail(err)) {
180 DEBUG_ERR(err, "registering for kill request");
182 struct pending_client *cl;
183 err = pending_clients_release_one(kill->domain_cap,
186 if (err_is_ok(err)) {
188 struct pending_client *tmp = cl;
194 event_mutex_unlock(&kill->sb->mutex);
198 DEBUG_ERR(err, "sending kill request");
200 struct pending_client *cl;
201 err = pending_clients_release_one(kill->domain_cap,
204 if (err_is_ok(err)) {
206 struct pending_client *tmp = cl;
212 event_mutex_unlock(&kill->sb->mutex);
218 static void exit_request_sender(void *arg)
220 struct pending_kill_exit_cleanup *exit = (struct pending_kill_exit_cleanup*) arg;
223 exit->sb->rx_vtbl.exit_reply = exit_reply_handler;
224 err = exit->sb->tx_vtbl.exit_request(exit->sb, NOP_CONT, cap_procmng,
226 if (err_is_ok(err)) {
229 if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
230 err = exit->sb->register_send(exit->sb, exit->sb->waitset,
231 MKCONT(exit_request_sender, arg));
232 if (err_is_fail(err)) {
233 DEBUG_ERR(err, "registering for exit request");
234 err = pending_clients_release(exit->domain_cap, ClientType_Exit,
236 event_mutex_unlock(&exit->sb->mutex);
240 DEBUG_ERR(err, "sending exit request");
241 err = pending_clients_release(exit->domain_cap, ClientType_Exit,
243 event_mutex_unlock(&exit->sb->mutex);
249 static void cleanup_request_sender(void *arg)
251 struct pending_kill_exit_cleanup *cleanup = (struct pending_kill_exit_cleanup*) arg;
254 cleanup->sb->rx_vtbl.cleanup_reply = cleanup_reply_handler;
255 err = cleanup->sb->tx_vtbl.cleanup_request(cleanup->sb, NOP_CONT, cap_procmng,
256 cleanup->domain_cap);
257 if (err_is_ok(err)) {
260 if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
261 err = cleanup->sb->register_send(cleanup->sb, cleanup->sb->waitset,
262 MKCONT(cleanup_request_sender, arg));
263 if (err_is_fail(err)) {
264 DEBUG_ERR(err, "registering for cleanup request");
265 pending_clients_release(cleanup->domain_cap, ClientType_Cleanup,
267 event_mutex_unlock(&cleanup->sb->mutex);
271 DEBUG_ERR(err, "sending cleanup request");
272 pending_clients_release(cleanup->domain_cap, ClientType_Cleanup,
274 event_mutex_unlock(&cleanup->sb->mutex);
280 static void spawn_reply_handler(struct spawn_binding *b,
281 struct capref domain_cap, errval_t spawn_err)
283 event_mutex_unlock(&b->mutex);
285 struct pending_client *cl;
286 errval_t err = pending_clients_release(domain_cap, ClientType_Spawn, &cl);
287 if (err_is_fail(err)) {
288 DEBUG_ERR(err, "failed to retrieve pending spawn client based on domain"
294 if (err_is_ok(spawn_err)) {
295 err = domain_spawn(domain_cap, cl->core_id);
298 errval_t resp_err = cl->b->tx_vtbl.spawn_response(cl->b, NOP_CONT, err,
300 if (err_is_fail(resp_err)) {
301 DEBUG_ERR(resp_err, "failed to send spawn_response to client");
307 static void spawn_with_caps_reply_handler(struct spawn_binding *b,
308 struct capref domain_cap,
311 event_mutex_unlock(&b->mutex);
313 struct pending_client *cl;
314 errval_t err = pending_clients_release(domain_cap, ClientType_SpawnWithCaps,
316 if (err_is_fail(err)) {
317 DEBUG_ERR(err, "failed to retrieve pending spawn_with_caps client based"
323 if (err_is_ok(spawn_err)) {
324 err = domain_spawn(domain_cap, cl->core_id);
327 errval_t resp_err = cl->b->tx_vtbl.spawn_with_caps_response(cl->b, NOP_CONT,
330 if (err_is_fail(resp_err)) {
331 DEBUG_ERR(resp_err, "failed to send spawn_with_caps_response to "
338 static void span_reply_handler(struct spawn_binding *b,
339 struct capref domain_cap, errval_t span_err)
341 event_mutex_unlock(&b->mutex);
343 struct pending_client *cl;
344 errval_t err = pending_clients_release(domain_cap, ClientType_Span, &cl);
345 if (err_is_fail(err)) {
346 DEBUG_ERR(err, "failed to retrieve pending span client based on domain"
351 struct domain_entry *entry;
352 err = domain_get_by_cap(cl->domain_cap, &entry);
353 if (err_is_fail(err)) {
354 DEBUG_ERR(err, "failed to retrieve span client by domain cap");
358 if (entry->status != DOMAIN_STATUS_RUNNING) {
359 // Domain has been stopped while we were serving the request; there's
360 // no one to respond to.
365 err = cl->b->tx_vtbl.span_response(cl->b, NOP_CONT, span_err);
366 if (err_is_fail(err)) {
367 DEBUG_ERR(err, "failed to send span_response to client");
373 static void cleanup_reply_handler(struct spawn_binding *b,
374 struct capref domain_cap,
375 errval_t cleanup_err)
377 event_mutex_unlock(&b->mutex);
379 struct pending_client *cl;
380 errval_t err = pending_clients_release(domain_cap, ClientType_Cleanup, &cl);
381 if (err_is_fail(err)) {
382 DEBUG_ERR(err, "failed to retrieve pending cleanup client based on "
387 if (err_is_fail(cleanup_err)) {
388 // TODO(razvan): Here, spawnd has failed deleting its local cspace.
389 // Should we send another cleanup message, until it might succeed?
394 struct domain_entry *entry;
395 err = domain_get_by_cap(domain_cap, &entry);
396 if (err_is_fail(err)) {
397 DEBUG_ERR(err, "failed to retrieve domain by cap returned by spawnd "
402 assert(entry->num_spawnds_resources > 0);
403 assert(entry->status != DOMAIN_STATUS_CLEANED);
405 --entry->num_spawnds_resources;
407 if (entry->num_spawnds_resources == 0) {
408 entry->status = DOMAIN_STATUS_CLEANED;
410 // At this point, the domain exists in state CLEANED for
411 // history reasons. For instance, if some other domain
412 // issues a wait call for this one, the process manager can
413 // return the exit status directly.
414 // At some point, however, we might want to just clean up
415 // the domain entry and recycle the domain cap.
417 // Expecting to receive further cleanup replies from other
418 // spawnds for the same domain cap, hence re-add the
420 err = pending_clients_add(domain_cap, cl->b,
421 ClientType_Cleanup, MAX_COREID);
422 if (err_is_fail(err)) {
423 DEBUG_ERR(err, "pending_clients_add in cleanup_reply_handler");
428 static void kill_reply_handler(struct spawn_binding *b,
429 struct capref domain_cap, errval_t kill_err)
431 event_mutex_unlock(&b->mutex);
433 struct pending_client *cl;
434 errval_t err = pending_clients_release(domain_cap, ClientType_Kill, &cl);
435 if (err_is_fail(err)) {
436 DEBUG_ERR(err, "failed to retrieve pending kill client based on domain "
442 if (err_is_fail(kill_err)) {
443 // TODO(razvan): Here, spawnd has failed deleting its local dispatcher.
444 // Should we send another kill message, until it might succeed?
446 resp_err = cl->b->tx_vtbl.kill_response(cl->b, NOP_CONT,
448 if (err_is_fail(resp_err)) {
449 DEBUG_ERR(resp_err, "failed to send kill_response to client");
451 struct pending_client *tmp = cl;
458 struct domain_entry *entry;
459 err = domain_get_by_cap(domain_cap, &entry);
460 if (err_is_fail(err)) {
461 DEBUG_ERR(err, "failed to retrieve domain by cap returned by spawnd "
466 assert(entry->num_spawnds_running > 0);
467 assert(entry->status != DOMAIN_STATUS_STOPPED);
469 --entry->num_spawnds_running;
471 if (entry->num_spawnds_running == 0) {
472 entry->status = DOMAIN_STATUS_STOPPED;
473 entry->exit_status = EXIT_STATUS_KILLED;
475 err = pending_clients_add(domain_cap, NULL, ClientType_Cleanup,
477 if (err_is_fail(err)) {
478 DEBUG_ERR(err, "pending_clients_add in kill_reply_handler");
481 // TODO(razvan): Might it be more sane if we respond back
482 // to the client after the domain has been cleaned up (i.e.
483 // the cspace root has been revoked for all dispatchers)?
485 resp_err = cl->b->tx_vtbl.kill_response(cl->b, NOP_CONT,
487 if (err_is_fail(resp_err)) {
488 DEBUG_ERR(resp_err, "failed to send kill_response to client");
490 struct pending_client *tmp = cl;
495 // TODO(razvan): Same problem applies to the waiters: would
496 // it be better if we sent them wait_responses after the
497 // cspace root has been revoked, too? (here and in the exit
499 struct domain_waiter *waiter = entry->waiters;
500 while (waiter != NULL) {
501 waiter->b->tx_vtbl.wait_response(waiter->b, NOP_CONT,
504 struct domain_waiter *tmp = waiter;
505 waiter = waiter->next;
509 for (coreid_t i = 0; i < MAX_COREID; ++i) {
510 if (entry->spawnds[i] == NULL) {
514 struct spawn_binding *spb = entry->spawnds[i]->b;
516 struct pending_kill_exit_cleanup *cleanup = (struct pending_kill_exit_cleanup*) malloc(
517 sizeof(struct pending_kill_exit_cleanup));
519 cleanup->domain_cap = domain_cap;
521 spb->rx_vtbl.cleanup_reply = cleanup_reply_handler;
522 event_mutex_enqueue_lock(&spb->mutex,
524 (struct event_closure) {
525 .handler = cleanup_request_sender,
529 err = pending_clients_add(domain_cap, cl->b, ClientType_Kill,
531 if (err_is_fail(err)) {
532 DEBUG_ERR(err, "pending_clients_add in kill_reply_handler");
537 static void exit_reply_handler(struct spawn_binding *b,
538 struct capref domain_cap, errval_t exit_err)
540 event_mutex_unlock(&b->mutex);
542 struct pending_client *cl;
543 errval_t err = pending_clients_release(domain_cap, ClientType_Exit, &cl);
544 if (err_is_fail(err)) {
545 DEBUG_ERR(err, "failed to retrieve pending exit client based on domain "
550 if (err_is_fail(exit_err)) {
551 // TODO(razvan): Here, spawnd has failed deleting its local dispatcher.
552 // Should we send another kill message, until it might succeed?
557 struct domain_entry *entry;
558 err = domain_get_by_cap(domain_cap, &entry);
559 if (err_is_fail(err)) {
560 DEBUG_ERR(err, "failed to retrieve domain by cap returned by spawnd "
565 assert(entry->num_spawnds_running > 0);
566 assert(entry->status != DOMAIN_STATUS_STOPPED);
568 --entry->num_spawnds_running;
570 if (entry->num_spawnds_running == 0) {
571 entry->status = DOMAIN_STATUS_STOPPED;
573 err = pending_clients_add(domain_cap, NULL, ClientType_Cleanup,
575 if (err_is_fail(err)) {
576 DEBUG_ERR(err, "pending_clients_add in exit_reply_handler");
581 // TODO(razvan): Same problem applies to the waiters: would
582 // it be better if we sent them wait_responses after the
583 // cspace root has been revoked, too? (here and in the exit
585 struct domain_waiter *waiter = entry->waiters;
586 while (waiter != NULL) {
587 waiter->b->tx_vtbl.wait_response(waiter->b, NOP_CONT,
590 struct domain_waiter *tmp = waiter;
591 waiter = waiter->next;
595 for (coreid_t i = 0; i < MAX_COREID; ++i) {
596 if (entry->spawnds[i] == NULL) {
600 struct spawn_binding *spb = entry->spawnds[i]->b;
602 struct pending_kill_exit_cleanup *cleanup = (struct pending_kill_exit_cleanup*) malloc(
603 sizeof(struct pending_kill_exit_cleanup));
605 cleanup->domain_cap = domain_cap;
607 spb->rx_vtbl.cleanup_reply = cleanup_reply_handler;
608 event_mutex_enqueue_lock(&spb->mutex,
610 (struct event_closure) {
611 .handler = cleanup_request_sender,
615 err = pending_clients_add(domain_cap, cl->b, ClientType_Exit,
617 if (err_is_fail(err)) {
618 DEBUG_ERR(err, "pending_clients_add in kill_reply_handler");
623 static errval_t spawn_handler_common(struct proc_mgmt_binding *b,
624 enum ClientType type,
625 coreid_t core_id, const char *path,
626 const char *argvbuf, size_t argvbytes,
627 const char *envbuf, size_t envbytes,
628 struct capref inheritcn_cap,
629 struct capref argcn_cap, uint8_t flags)
631 if (!spawnd_state_exists(core_id)) {
632 return PROC_MGMT_ERR_INVALID_SPAWND;
635 struct spawnd_state *state = spawnd_state_get(core_id);
636 assert(state != NULL);
637 struct spawn_binding *cl = state->b;
640 struct capref domain_cap;
641 errval_t err = slot_alloc(&domain_cap);
642 if (err_is_fail(err)) {
643 DEBUG_ERR(err, "slot_alloc domain_cap");
644 return err_push(err, PROC_MGMT_ERR_CREATE_DOMAIN_CAP);
646 err = cap_retype(domain_cap, cap_procmng, 0, ObjType_Domain, 0, 1);
647 if (err_is_fail(err)) {
648 DEBUG_ERR(err, "cap_retype domain_cap");
649 return err_push(err, PROC_MGMT_ERR_CREATE_DOMAIN_CAP);
652 err = pending_clients_add(domain_cap, b, type, core_id);
653 if (err_is_fail(err)) {
654 DEBUG_ERR(err, "pending_clients_add");
658 struct pending_spawn *spawn = (struct pending_spawn*) malloc(
659 sizeof(struct pending_spawn));
660 spawn->domain_cap = domain_cap;
662 spawn->core_id = core_id;
664 spawn->argvbuf = argvbuf;
665 spawn->argvbytes = argvbytes;
666 spawn->envbuf = envbuf;
667 spawn->envbytes = envbytes;
668 spawn->inheritcn_cap = inheritcn_cap;
669 spawn->argcn_cap = argcn_cap;
670 spawn->flags = flags;
672 event_mutex_enqueue_lock(&cl->mutex, &spawn->qn,
673 (struct event_closure) {
674 .handler = spawn_request_sender,
680 static void spawn_handler(struct proc_mgmt_binding *b, coreid_t core_id,
681 const char *path, const char *argvbuf,
682 size_t argvbytes, const char *envbuf, size_t envbytes,
685 errval_t err, resp_err;
686 err = spawn_handler_common(b, ClientType_Spawn, core_id, path, argvbuf,
687 argvbytes, envbuf, envbytes, NULL_CAP, NULL_CAP,
689 if (err_is_ok(err)) {
690 // Will respond to client when we get the reply from spawnd.
694 resp_err = b->tx_vtbl.spawn_response(b, NOP_CONT, err, NULL_CAP);
695 if (err_is_fail(resp_err)) {
696 DEBUG_ERR(resp_err, "failed to send spawn_response");
700 static void spawn_with_caps_handler(struct proc_mgmt_binding *b,
701 coreid_t core_id, const char *path,
702 const char *argvbuf, size_t argvbytes,
703 const char *envbuf, size_t envbytes,
704 struct capref inheritcn_cap,
705 struct capref argcn_cap, uint8_t flags)
707 errval_t err, resp_err;
708 err = spawn_handler_common(b, ClientType_SpawnWithCaps, core_id, path,
709 argvbuf, argvbytes, envbuf, envbytes,
710 inheritcn_cap, argcn_cap, flags);
711 if (err_is_ok(err)) {
712 // Will respond to client when we get the reply from spawnd.
716 resp_err = b->tx_vtbl.spawn_with_caps_response(b, NOP_CONT, err,
718 if (err_is_fail(resp_err)) {
719 DEBUG_ERR(resp_err, "failed to send spawn_with_caps_response");
723 static void span_handler(struct proc_mgmt_binding *b, struct capref domain_cap,
724 coreid_t core_id, struct capref vroot,
725 struct capref dispframe)
727 errval_t err, resp_err;
728 err = domain_can_span(domain_cap, core_id);
729 if (err_is_fail(err)) {
730 goto respond_with_err;
733 if (!spawnd_state_exists(core_id)) {
734 err = PROC_MGMT_ERR_INVALID_SPAWND;
735 goto respond_with_err;
738 struct spawnd_state *state = spawnd_state_get(core_id);
739 assert(state != NULL);
740 struct spawn_binding *cl = state->b;
743 err = pending_clients_add(domain_cap, b, ClientType_Span, core_id);
744 if (err_is_fail(err)) {
745 goto respond_with_err;
748 struct pending_span *span = (struct pending_span*) malloc(
749 sizeof(struct pending_span));
750 span->domain_cap = domain_cap;
752 span->core_id = core_id;
754 span->dispframe = dispframe;
756 event_mutex_enqueue_lock(&cl->mutex, &span->qn,
757 (struct event_closure) {
758 .handler = span_request_sender,
762 resp_err = b->tx_vtbl.span_response(b, NOP_CONT, err);
763 if (err_is_fail(resp_err)) {
764 DEBUG_ERR(resp_err, "failed to send span_response");
768 static errval_t kill_handler_common(struct proc_mgmt_binding *b,
769 struct capref domain_cap,
770 enum ClientType type,
773 errval_t err = pending_clients_add(domain_cap, b, type, MAX_COREID);
774 if (err_is_fail(err)) {
778 struct domain_entry *entry;
779 err = domain_get_by_cap(domain_cap, &entry);
780 if (err_is_fail(err)) {
784 entry->exit_status = exit_status;
785 domain_stop_pending(entry);
787 for (coreid_t i = 0; i < MAX_COREID; ++i) {
788 if (entry->spawnds[i] == NULL) {
792 struct spawn_binding *spb = entry->spawnds[i]->b;
794 struct pending_kill_exit_cleanup *cmd = (struct pending_kill_exit_cleanup*) malloc(
795 sizeof(struct pending_kill_exit_cleanup));
796 cmd->domain_cap = domain_cap;
800 case ClientType_Kill:
803 event_mutex_enqueue_lock(&spb->mutex,
805 (struct event_closure) {
806 .handler = kill_request_sender,
810 case ClientType_Exit:
811 event_mutex_enqueue_lock(&spb->mutex,
813 (struct event_closure) {
814 .handler = exit_request_sender,
818 USER_PANIC("invalid client type for kill: %u\n", type);
825 static void kill_handler(struct proc_mgmt_binding *b,
826 struct capref victim_domain_cap)
828 errval_t err = kill_handler_common(b, victim_domain_cap, ClientType_Kill,
830 if (err_is_fail(err)) {
831 errval_t resp_err = b->tx_vtbl.kill_response(b, NOP_CONT, err);
832 if (err_is_fail(resp_err)) {
833 DEBUG_ERR(resp_err, "failed to send kill_response");
838 static void exit_handler(struct proc_mgmt_binding *b, struct capref domain_cap,
841 errval_t err = kill_handler_common(b, domain_cap, ClientType_Exit,
843 if (err_is_fail(err)) {
844 DEBUG_ERR(err, "processing exit_handler for requesting domain, exit "
845 "code %u", exit_status);
847 // Error or not, there's no client to respond to anymore.
850 static void wait_handler(struct proc_mgmt_binding *b, struct capref domain_cap)
852 errval_t err, resp_err;
853 struct domain_entry *entry;
854 err = domain_get_by_cap(domain_cap, &entry);
855 if (err_is_fail(err)) {
859 if (entry->status == DOMAIN_STATUS_STOPPED) {
860 // Domain has already been stopped, so just reply with exit status.
864 struct domain_waiter *waiter = (struct domain_waiter*) malloc(
865 sizeof(struct domain_waiter));
867 waiter->next = entry->waiters;
868 entry->waiters = waiter;
869 // Will respond when domain is stopped.
873 resp_err = b->tx_vtbl.wait_response(b, NOP_CONT, err, entry->exit_status);
874 if (err_is_fail(resp_err)) {
875 DEBUG_ERR(resp_err, "failed to send wait_response");
879 static struct proc_mgmt_rx_vtbl monitor_vtbl = {
880 .add_spawnd = add_spawnd_handler,
881 .spawn_call = spawn_handler,
882 .spawn_with_caps_call = spawn_with_caps_handler,
883 .span_call = span_handler,
884 .kill_call = kill_handler,
885 .exit_call = exit_handler,
886 .wait_call = wait_handler
889 static struct proc_mgmt_rx_vtbl non_monitor_vtbl = {
890 .add_spawnd = add_spawnd_handler_non_monitor,
891 .spawn_call = spawn_handler,
892 .spawn_with_caps_call = spawn_with_caps_handler,
893 .span_call = span_handler,
894 .kill_call = kill_handler,
895 .exit_call = exit_handler,
896 .wait_call = wait_handler
899 static errval_t alloc_ep_for_monitor(struct capref *ep)
901 struct proc_mgmt_lmp_binding *lmpb =
902 malloc(sizeof(struct proc_mgmt_lmp_binding));
903 assert(lmpb != NULL);
905 // setup our end of the binding
906 errval_t err = proc_mgmt_client_lmp_accept(lmpb, get_default_waitset(),
907 DEFAULT_LMP_BUF_WORDS);
908 if (err_is_fail(err)) {
910 return err_push(err, LIB_ERR_PROC_MGMT_CLIENT_ACCEPT);
913 *ep = lmpb->chan.local_cap;
914 lmpb->b.rx_vtbl = monitor_vtbl;
919 static void export_cb(void *st, errval_t err, iref_t iref)
921 if (err_is_fail(err)) {
922 USER_PANIC_ERR(err, "export failed");
925 // Allocate an endpoint for the local monitor, who will use it to inform
926 // us about new spawnd irefs on behalf of other monitors.
928 err = alloc_ep_for_monitor(&ep);
929 if (err_is_fail(err)) {
930 USER_PANIC_ERR(err, "failed to allocate LMP EP for local monitor");
933 // Send the endpoint to the monitor, so it can finish the handshake.
934 struct monitor_binding *mb = get_monitor_binding();
935 err = mb->tx_vtbl.set_proc_mgmt_ep_request(mb, NOP_CONT, ep);
936 if (err_is_fail(err)) {
937 USER_PANIC_ERR(err, "failed to send set_proc_mgmt_ep_request to "
941 // Also register this iref with the name service, for arbitrary client
942 // domains to use for spawn-related ops.
943 err = nameservice_register(SERVICE_BASENAME, iref);
944 if (err_is_fail(err)) {
945 USER_PANIC_ERR(err, "nameservice_register failed");
949 static errval_t connect_cb(void *st, struct proc_mgmt_binding *b)
951 b->rx_vtbl = non_monitor_vtbl;
955 errval_t start_service(void)
957 return proc_mgmt_export(NULL, export_cb, connect_cb, get_default_waitset(),
958 IDC_EXPORT_FLAGS_DEFAULT);