7 * Copyright (c) 2010, 2011, 2012, ETH Zurich.
10 * This file is distributed under the terms in the attached LICENSE file.
11 * If you do not find this file, copies can be found by writing to:
12 * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
17 #include <barrelfish/barrelfish.h>
18 #include <spawndomain/spawndomain.h>
19 #include <barrelfish/monitor_client.h>
20 #include <barrelfish/nameservice_client.h>
21 #include <barrelfish/cpu_arch.h>
23 #include <vfs/vfs_path.h>
24 #include <dist/barrier.h>
25 #include <if/spawn_defs.h>
26 #include <if/monitor_defs.h>
27 #include <if/monitor_blocking_defs.h>
28 #include <barrelfish/dispatcher_arch.h>
29 #include <barrelfish/invocations_arch.h>
35 static errval_t spawn(struct capref domain_cap, const char *path,
36 char *const argv[], const char *argbuf, size_t argbytes,
37 char *const envp[], struct capref inheritcn_cap,
38 struct capref argcn_cap, uint8_t flags,
43 /* read file into memory */
45 err = vfs_open(path, &fh);
46 if (err_is_fail(err)) {
47 return err_push(err, SPAWN_ERR_LOAD);
50 struct vfs_fileinfo info;
51 err = vfs_stat(fh, &info);
52 if (err_is_fail(err)) {
54 return err_push(err, SPAWN_ERR_LOAD);
57 assert(info.type == VFS_FILE);
58 uint8_t *image = malloc(info.size);
61 return err_push(err, SPAWN_ERR_LOAD);
64 size_t pos = 0, readlen;
66 err = vfs_read(fh, &image[pos], info.size - pos, &readlen);
67 if (err_is_fail(err)) {
70 return err_push(err, SPAWN_ERR_LOAD);
71 } else if (readlen == 0) {
74 return SPAWN_ERR_LOAD; // XXX
78 } while (err_is_ok(err) && readlen > 0 && pos < info.size);
81 if (err_is_fail(err)) {
82 DEBUG_ERR(err, "failed to close file %s", path);
85 // find short name (last part of path)
86 const char *name = strrchr(path, VFS_PATH_SEP);
96 err = spawn_load_image(&si, (lvaddr_t)image, info.size, CURRENT_CPU_TYPE,
97 name, my_core_id, argv, envp, inheritcn_cap,
99 if (err_is_fail(err)) {
106 /* request connection from monitor */
107 struct monitor_blocking_binding *mrpc = get_monitor_blocking_binding();
109 err = slot_alloc(&monep);
110 if (err_is_fail(err)) {
111 return err_push(err, SPAWN_ERR_MONEP_SLOT_ALLOC);
113 err = mrpc->rpc_tx_vtbl.alloc_monitor_ep(mrpc, &msgerr, &monep);
114 if (err_is_fail(err)) {
115 return err_push(err, SPAWN_ERR_MONITOR_CLIENT);
116 } else if (err_is_fail(msgerr)) {
120 /* copy connection into the new domain */
121 struct capref destep = {
123 .slot = TASKCN_SLOT_MONITOREP,
125 err = cap_copy(destep, monep);
126 if (err_is_fail(err)) {
129 return err_push(err, SPAWN_ERR_MONITOR_CLIENT);
132 err = cap_destroy(monep);
133 if (err_is_fail(err)) {
134 return err_push(err, SPAWN_ERR_MONITOR_CLIENT);
137 debug_printf("spawning %s on core %u\n", path, my_core_id);
139 /* give the perfmon capability */
140 struct capref dest, src;
141 dest.cnode = si.taskcn;
142 dest.slot = TASKCN_SLOT_PERF_MON;
143 src.cnode = cnode_task;
144 src.slot = TASKCN_SLOT_PERF_MON;
145 err = cap_copy(dest, src);
146 if (err_is_fail(err)) {
147 return err_push(err, SPAWN_ERR_COPY_PERF_MON);
150 if (!capref_is_null(domain_cap)) {
151 // Pass over the domain cap.
152 dest.cnode = si.taskcn;
153 dest.slot = TASKCN_SLOT_DOMAINID;
154 err = cap_copy(dest, domain_cap);
155 if (err_is_fail(err)) {
156 return err_push(err, SPAWN_ERR_COPY_DOMAIN_CAP);
161 err = spawn_run(&si);
162 if (err_is_fail(err)) {
164 return err_push(err, SPAWN_ERR_RUN);
167 // Allocate domain id
168 struct ps_entry *pe = malloc(sizeof(struct ps_entry));
170 memset(pe, 0, sizeof(struct ps_entry));
171 memcpy(pe->argv, argv, MAX_CMDLINE_ARGS*sizeof(*argv));
172 pe->argbuf = memdup(argbuf, argbytes);
173 pe->argbytes = argbytes;
175 * NB: It's important to keep a copy of the DCB *and* the root
176 * CNode around. We need to revoke both (in the right order, see
177 * kill_domain() below), so that we ensure no one else is
178 * referring to the domain's CSpace anymore. Especially the loop
179 * created by placing rootcn into its own address space becomes a
182 err = slot_alloc(&pe->rootcn_cap);
183 assert(err_is_ok(err));
184 err = cap_copy(pe->rootcn_cap, si.rootcn_cap);
185 pe->rootcn = si.rootcn;
186 assert(err_is_ok(err));
187 err = slot_alloc(&pe->dcb);
188 assert(err_is_ok(err));
189 err = cap_copy(pe->dcb, si.dcb);
190 assert(err_is_ok(err));
191 pe->status = PS_STATUS_RUNNING;
193 if (!capref_is_null(domain_cap)) {
194 err = ps_hash_domain(pe, domain_cap);
195 if (err_is_fail(err)) {
198 return err_push(err, SPAWN_ERR_DOMAIN_CAP_HASH);
202 err = ps_allocate(pe, domainid);
203 if(err_is_fail(err)) {
207 // Store in target dispatcher frame
208 struct dispatcher_generic *dg = get_dispatcher_generic(si.handle);
209 dg->domain_id = *domainid;
212 err = spawn_free(&si);
213 if (err_is_fail(err)) {
214 return err_push(err, SPAWN_ERR_FREE);
220 static void retry_use_local_memserv_response(void *a)
224 struct spawn_binding *b = (struct spawn_binding*)a;
226 err = b->tx_vtbl.use_local_memserv_response(b, NOP_CONT);
228 if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
230 err = b->register_send(b, get_default_waitset(),
231 MKCONT(retry_use_local_memserv_response,a));
233 if (err_is_fail(err)) {
234 DEBUG_ERR(err, "error sending use_local_memserv reply\n");
240 static void use_local_memserv_handler(struct spawn_binding *b)
245 err = b->tx_vtbl.use_local_memserv_response(b, NOP_CONT);
246 if (err_is_fail(err)) {
247 DEBUG_ERR(err, "error sending use_local_memserv reply");
248 if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
249 err = b->register_send(b, get_default_waitset(),
250 MKCONT(retry_use_local_memserv_response, b));
251 if (err_is_fail(err)) {
252 // note that only one continuation may be registered at a time
253 DEBUG_ERR(err, "register_send failed!");
259 struct pending_spawn_response {
260 struct spawn_binding *b;
265 static errval_t spawn_with_caps_common(struct capref domain_cap,
266 const char *path, const char *argbuf,
267 size_t argbytes, const char *envbuf,
269 struct capref inheritcn_cap,
270 struct capref argcn_cap, uint8_t flags,
271 domainid_t *domainid)
277 /* extract arguments from buffer */
278 char *argv[MAX_CMDLINE_ARGS + 1];
281 while (pos < argbytes && i < MAX_CMDLINE_ARGS) {
282 argv[i++] = (CONST_CAST)argbuf + pos;
283 char *end = memchr(&argbuf[pos], '\0', argbytes - pos);
285 err = SPAWN_ERR_GET_CMDLINE_ARGS;
288 pos = end - argbuf + 1;
290 assert(i <= MAX_CMDLINE_ARGS);
293 /* extract environment from buffer */
294 char *envp[MAX_CMDLINE_ARGS + 1];
297 while (pos < envbytes && i < MAX_CMDLINE_ARGS) {
298 envp[i++] = (CONST_CAST)envbuf + pos;
299 char *end = memchr(&envbuf[pos], '\0', envbytes - pos);
301 err = SPAWN_ERR_GET_CMDLINE_ARGS;
304 pos = end - envbuf + 1;
306 assert(i <= MAX_CMDLINE_ARGS);
310 npath = alloca(strlen(path));
312 vfs_path_normalise(npath);
314 err = spawn(domain_cap, npath, argv, argbuf, argbytes, envp, inheritcn_cap,
315 argcn_cap, flags, domainid);
316 // XXX: do we really want to delete the inheritcn and the argcn here? iaw:
317 // do we copy these somewhere? -SG
318 if (!capref_is_null(inheritcn_cap)) {
320 err2 = cap_delete(inheritcn_cap);
321 assert(err_is_ok(err2));
323 if (!capref_is_null(argcn_cap)) {
325 err2 = cap_delete(argcn_cap);
326 assert(err_is_ok(err2));
330 if(err_is_fail(err)) {
331 DEBUG_ERR(err, "spawn");
337 static errval_t spawn_with_caps_handler(struct spawn_binding *b, const char *path,
338 const char *argvbuf, size_t argvbytes, const char *envbuf, size_t envbytes,
339 struct capref inheritcn_cap, struct capref argcn_cap, uint8_t flags,
340 errval_t *err, spawn_domainid_t *domain_id)
342 *err = spawn_with_caps_common(NULL_CAP, path, argvbuf, argvbytes, envbuf,
343 envbytes, inheritcn_cap, argcn_cap, flags,
348 static errval_t spawn_handler(struct spawn_binding *b, const char *path,
349 const char *argvbuf, size_t argvbytes, const char *envbuf, size_t envbytes,
350 uint8_t flags, errval_t *err, spawn_domainid_t *domain_id)
352 *err = spawn_with_caps_common(NULL_CAP, path, argvbuf, argvbytes, envbuf,
353 envbytes, NULL_CAP, NULL_CAP, flags,
358 static void spawn_with_caps_request_handler(struct spawn_binding *b,
359 struct capref procmng_cap,
360 struct capref domain_cap,
366 struct capref inheritcn_cap,
367 struct capref argcn_cap,
370 errval_t err, reply_err;
371 struct capability ret;
372 err = monitor_cap_identify_remote(procmng_cap, &ret);
373 if (err_is_fail(err)) {
374 err = err_push(err, SPAWN_ERR_IDENTIFY_PROC_MNGR_CAP);
378 if (ret.type != ObjType_ProcessManager) {
379 err = SPAWN_ERR_NOT_PROC_MNGR;
383 spawn_domainid_t dummy_domain_id;
384 err = spawn_with_caps_common(domain_cap, path, argvbuf, argvbytes, envbuf,
385 envbytes, inheritcn_cap, argcn_cap, flags,
389 reply_err = b->tx_vtbl.spawn_with_caps_reply(b, NOP_CONT, domain_cap, err);
390 if (err_is_fail(reply_err)) {
391 DEBUG_ERR(err, "failed to send spawn_with_caps_reply");
395 static void spawn_request_handler(struct spawn_binding *b,
396 struct capref procmng_cap,
397 struct capref domain_cap, const char *path,
398 const char *argvbuf, size_t argvbytes,
399 const char *envbuf, size_t envbytes,
403 errval_t err, reply_err;
404 struct capability ret;
405 err = monitor_cap_identify_remote(procmng_cap, &ret);
406 if (err_is_fail(err)) {
407 err = err_push(err, SPAWN_ERR_IDENTIFY_PROC_MNGR_CAP);
411 if (ret.type != ObjType_ProcessManager) {
412 err = SPAWN_ERR_NOT_PROC_MNGR;
416 spawn_domainid_t dummy_domain_id;
417 err = spawn_with_caps_common(domain_cap, path, argvbuf, argvbytes, envbuf,
418 envbytes, NULL_CAP, NULL_CAP, flags,
422 reply_err = b->tx_vtbl.spawn_reply(b, NOP_CONT, domain_cap, err);
423 if (err_is_fail(reply_err)) {
424 DEBUG_ERR(err, "failed to send spawn_reply");
428 static void span_request_handler(struct spawn_binding *b,
429 struct capref procmng_cap,
430 struct capref domain_cap, struct capref vroot,
431 struct capref dispframe)
433 errval_t err, mon_err, reply_err;
434 struct capability ret;
435 err = monitor_cap_identify_remote(procmng_cap, &ret);
436 if (err_is_fail(err)) {
437 err = err_push(err, SPAWN_ERR_IDENTIFY_PROC_MNGR_CAP);
441 if (ret.type != ObjType_ProcessManager) {
442 err = SPAWN_ERR_NOT_PROC_MNGR;
447 memset(&si, 0, sizeof(si));
449 debug_printf("Spanning domain to core %d\n", disp_get_core_id());
452 err = spawn_span_domain(&si, vroot, dispframe);
453 if (err_is_fail(err)) {
454 err = err_push(err, SPAWN_ERR_SPAN);
458 // Set connection to monitor.
459 struct monitor_blocking_binding *mrpc = get_monitor_blocking_binding();
461 err = slot_alloc(&monep);
462 if (err_is_fail(err)) {
463 err = err_push(err, SPAWN_ERR_MONEP_SLOT_ALLOC);
466 err = mrpc->rpc_tx_vtbl.alloc_monitor_ep(mrpc, &mon_err, &monep);
467 if (err_is_ok(err)) {
470 if (err_is_fail(err)) {
471 err = err_push(err, SPAWN_ERR_MONITOR_CLIENT);
475 /* copy connection into the new domain */
476 struct capref destep = {
478 .slot = TASKCN_SLOT_MONITOREP,
480 err = cap_copy(destep, monep);
481 if (err_is_fail(err)) {
484 err = err_push(err, SPAWN_ERR_MONITOR_CLIENT);
488 err = cap_destroy(monep);
489 if (err_is_fail(err)) {
490 err = err_push(err, SPAWN_ERR_MONITOR_CLIENT);
494 /* give the perfmon capability */
495 struct capref dest, src;
496 dest.cnode = si.taskcn;
497 dest.slot = TASKCN_SLOT_PERF_MON;
498 src.cnode = cnode_task;
499 src.slot = TASKCN_SLOT_PERF_MON;
500 err = cap_copy(dest, src);
501 if (err_is_fail(err)) {
502 err = err_push(err, SPAWN_ERR_COPY_PERF_MON);
506 // Pass over the domain cap.
507 dest.cnode = si.taskcn;
508 dest.slot = TASKCN_SLOT_DOMAINID;
509 err = cap_copy(dest, domain_cap);
510 if (err_is_fail(err)) {
511 err = err_push(err, SPAWN_ERR_COPY_DOMAIN_CAP);
516 err = spawn_run(&si);
517 if (err_is_fail(err)) {
518 err = err_push(err, SPAWN_ERR_RUN);
522 // Allocate an id for this dispatcher.
523 struct ps_entry *pe = malloc(sizeof(struct ps_entry));
525 memset(pe, 0, sizeof(struct ps_entry));
527 * NB: It's important to keep a copy of the DCB *and* the root
528 * CNode around. We need to revoke both (in the right order, see
529 * kill_domain() below), so that we ensure no one else is
530 * referring to the domain's CSpace anymore. Especially the loop
531 * created by placing rootcn into its own address space becomes a
534 // TODO(razvan): The following code is here to comply with spawn().
535 err = slot_alloc(&pe->rootcn_cap);
536 assert(err_is_ok(err));
537 err = cap_copy(pe->rootcn_cap, si.rootcn_cap);
538 pe->rootcn = si.rootcn;
539 assert(err_is_ok(err));
540 err = slot_alloc(&pe->dcb);
541 assert(err_is_ok(err));
542 err = cap_copy(pe->dcb, si.dcb);
543 assert(err_is_ok(err));
544 pe->status = PS_STATUS_RUNNING;
546 err = ps_hash_domain(pe, domain_cap);
547 if (err_is_fail(err)) {
550 err = err_push(err, SPAWN_ERR_DOMAIN_CAP_HASH);
555 err = ps_allocate(pe, &domainid);
556 if(err_is_fail(err)) {
561 err = spawn_free(&si);
562 if (err_is_fail(err)) {
563 err = err_push(err, SPAWN_ERR_FREE);
567 reply_err = b->tx_vtbl.span_reply(b, NOP_CONT, domain_cap, err);
568 if (err_is_fail(reply_err)) {
569 DEBUG_ERR(err, "failed to send span_reply");
573 static void cleanup_cap(struct capref cap)
577 err = cap_revoke(cap);
578 if (err_is_fail(err)) {
579 DEBUG_ERR(err, "cap_revoke");
581 err = cap_destroy(cap);
582 if (err_is_fail(err)) {
583 DEBUG_ERR(err, "cap_destroy");
587 static errval_t kill_handler_common(struct capref procmng_cap,
588 struct capref domain_cap)
590 struct capability ret;
591 errval_t err = monitor_cap_identify_remote(procmng_cap, &ret);
592 if (err_is_fail(err)) {
593 return err_push(err, SPAWN_ERR_IDENTIFY_PROC_MNGR_CAP);
596 if (ret.type != ObjType_ProcessManager) {
597 return SPAWN_ERR_NOT_PROC_MNGR;
601 err = ps_get_domain(domain_cap, &pe, NULL);
602 if (err_is_fail(err)) {
603 return err_push(err, SPAWN_ERR_DOMAIN_NOTFOUND);
606 cleanup_cap(pe->dcb);
611 static void kill_request_handler(struct spawn_binding *b,
612 struct capref procmng_cap,
613 struct capref victim_domain_cap)
615 errval_t err = kill_handler_common(procmng_cap, victim_domain_cap);
616 errval_t reply_err = b->tx_vtbl.kill_reply(b, NOP_CONT, victim_domain_cap,
618 if (err_is_fail(reply_err)) {
619 DEBUG_ERR(err, "failed to send kill_reply");
623 static void exit_request_handler(struct spawn_binding *b,
624 struct capref procmng_cap,
625 struct capref domain_cap)
627 errval_t err = kill_handler_common(procmng_cap, domain_cap);
628 errval_t reply_err = b->tx_vtbl.exit_reply(b, NOP_CONT, domain_cap, err);
629 if (err_is_fail(reply_err)) {
630 DEBUG_ERR(err, "failed to send exit_reply");
634 static void cleanup_request_handler(struct spawn_binding *b,
635 struct capref procmng_cap,
636 struct capref domain_cap)
638 errval_t err, reply_err;
639 struct capability ret;
640 err = monitor_cap_identify_remote(procmng_cap, &ret);
641 if (err_is_fail(err)) {
642 err = err_push(err, SPAWN_ERR_IDENTIFY_PROC_MNGR_CAP);
646 if (ret.type != ObjType_ProcessManager) {
647 err = SPAWN_ERR_NOT_PROC_MNGR;
652 err = ps_release_domain(domain_cap, &pe);
653 if (err_is_fail(err)) {
654 err = err_push(err, SPAWN_ERR_DOMAIN_NOTFOUND);
658 cleanup_cap(pe->rootcn_cap);
660 // Cleanup struct ps_entry. Note that waiters will be handled by the process
661 // manager, as opposed to the old protocol of handling them here.
663 ps_remove(pe->domain_id);
667 reply_err = b->tx_vtbl.cleanup_reply(b, NOP_CONT, domain_cap, err);
668 if (err_is_fail(reply_err)) {
669 DEBUG_ERR(err, "failed to send cleanup_reply");
674 * \brief Removes a zombie domain.
676 static void cleanup_domain(domainid_t domainid)
679 struct ps_entry *ps = ps_get(domainid);
682 // Tell all waiters of exit and free list as we go
683 for(struct ps_waiter *w = ps->waiters; w != NULL;) {
684 err = w->binding->tx_vtbl.wait_response
685 (w->binding, NOP_CONT, ps->exitcode, SYS_ERR_OK);
686 if(err_is_fail(err)) {
687 DEBUG_ERR(err, "wait_response");
690 struct ps_waiter *oldw = w;
696 // Cleanup rest of ps entry
702 static errval_t kill_domain(domainid_t domainid, uint8_t exitcode)
704 struct ps_entry *ps = ps_get(domainid);
707 return SPAWN_ERR_DOMAIN_NOTFOUND;
710 ps->status = PS_STATUS_ZOMBIE;
711 ps->exitcode = exitcode;
713 // Garbage collect victim's capabilities
714 cleanup_cap(ps->dcb); // Deschedule dispatcher (do this first!)
715 cleanup_cap(ps->rootcn_cap);
717 // XXX: why only when waiters exist? -SG
718 if(ps->waiters != NULL) {
719 // Cleanup local data structures and inform waiters
720 cleanup_domain(domainid);
726 static void kill_handler(struct spawn_binding *b, domainid_t domainid)
728 errval_t err = kill_domain(domainid, 0);
730 err = b->tx_vtbl.kill_response(b, NOP_CONT, err);
731 if(err_is_fail(err)) {
732 DEBUG_ERR(err, "kill_response");
736 static void exit_handler(struct spawn_binding *b, domainid_t domainid,
739 errval_t err = kill_domain(domainid, exitcode);
740 struct ps_entry *ps = ps_get(domainid);
742 if(err_is_fail(err)) {
743 DEBUG_ERR(err, "kill_domain");
747 // XXX: Can't do nothing
751 // May never return anything to client
754 static void wait_handler(struct spawn_binding *b, domainid_t domainid,
758 struct ps_entry *ps = ps_get(domainid);
761 err = b->tx_vtbl.wait_response(b, NOP_CONT, 0, SPAWN_ERR_DOMAIN_NOTFOUND);
762 if(err_is_fail(err)) {
763 DEBUG_ERR(err, "wait_response");
766 if(!nohang || ps->status == PS_STATUS_ZOMBIE) {
767 // Enqueue the waiter
768 struct ps_waiter *waiter = malloc(sizeof(struct ps_waiter));
769 assert(waiter != NULL);
770 waiter->next = ps->waiters;
772 ps->waiters = waiter;
774 // nohang and no zombie, return error
775 err = b->tx_vtbl.wait_response(b, NOP_CONT, 0, SPAWN_ERR_DOMAIN_RUNNING);
776 if(err_is_fail(err)) {
777 DEBUG_ERR(err, "wait_response");
781 // Cleanup if zombie (will send the reply)
782 if(ps->status == PS_STATUS_ZOMBIE) {
783 cleanup_domain(domainid);
788 static void get_domainlist_sent(void *arg)
793 static void get_domainlist_handler(struct spawn_binding *b)
797 uint8_t *domains = calloc(sizeof(uint8_t), MAX_DOMAINS);
799 // XXX: Very inefficient
800 for(domainid_t i = 0; i < MAX_DOMAINS; i++) {
806 err = b->tx_vtbl.get_domainlist_response
807 (b, MKCLOSURE(get_domainlist_sent, domains), domains, len);
808 if(err_is_fail(err)) {
809 DEBUG_ERR(err, "get_domainlist_response");
814 static void status_handler(struct spawn_binding *b, domainid_t domainid)
817 struct ps_entry *ps = ps_get(domainid);
818 spawn_ps_entry_t pse;
820 memset(&pse, 0, sizeof(pse));
823 err = b->tx_vtbl.status_response(b, NOP_CONT, pse, NULL, 0,
824 SPAWN_ERR_DOMAIN_NOTFOUND);
825 if(err_is_fail(err)) {
826 DEBUG_ERR(err, "status_response");
830 pse.status = ps->status;
832 err = b->tx_vtbl.status_response(b, NOP_CONT, pse, ps->argbuf, ps->argbytes,
834 if(err_is_fail(err)) {
835 DEBUG_ERR(err, "status_response");
840 static errval_t dump_capabilities(domainid_t domainid) {
841 struct ps_entry *ps = ps_get(domainid);
844 return SPAWN_ERR_DOMAIN_NOTFOUND;
847 return invoke_dispatcher_dump_capabilities(ps->dcb);
850 static void dump_capabilities_handler(struct spawn_binding *b, domainid_t domainid) {
851 errval_t err = dump_capabilities(domainid);
853 err = b->tx_vtbl.dump_capabilities_response(b, NOP_CONT, err);
854 if(err_is_fail(err)) {
855 DEBUG_ERR(err, "debug_print_capabilities_response");
859 static struct spawn_rx_vtbl rx_vtbl = {
860 // .spawn_domain_call = spawn_handler,
861 // .spawn_domain_with_caps_call = spawn_with_caps_handler,
863 // Async messages for the process manager.
864 .spawn_request = spawn_request_handler,
865 .spawn_with_caps_request = spawn_with_caps_request_handler,
866 .span_request = span_request_handler,
867 .kill_request = kill_request_handler,
868 .exit_request = exit_request_handler,
869 .cleanup_request = cleanup_request_handler,
871 .use_local_memserv_call = use_local_memserv_handler,
872 .kill_call = kill_handler,
873 .exit_call = exit_handler,
874 .wait_call = wait_handler,
875 .get_domainlist_call = get_domainlist_handler,
876 .status_call = status_handler,
877 .dump_capabilities_call = dump_capabilities_handler
880 static struct spawn_rpc_rx_vtbl rpc_rx_vtbl = {
881 .spawn_domain_call = spawn_handler,
882 .spawn_domain_with_caps_call = spawn_with_caps_handler,
883 // .use_local_memserv_call = use_local_memserv_handler,
884 // .kill_call = kill_handler,
885 // .exit_call = exit_handler,
886 // .wait_call = wait_handler,
887 // .get_domainlist_call = get_domainlist_handler,
888 // .status_call = status_handler,
889 // .dump_capabilities_call = dump_capabilities_handler
892 static void export_cb(void *st, errval_t err, iref_t iref)
894 if (err_is_fail(err)) {
895 USER_PANIC_ERR(err, "export failed");
898 // Send iref back to monitor, which will forward it to the process manager.
899 struct monitor_binding *mb = get_monitor_binding();
900 err = mb->tx_vtbl.set_spawn_iref_request(mb, NOP_CONT, iref);
901 if (err_is_fail(err)) {
902 USER_PANIC_ERR(err, "failed to send set_spawn_iref_request to "
908 size_t len = snprintf(namebuf, sizeof(namebuf), "%s.%d", SERVICE_BASENAME,
910 assert(len < sizeof(namebuf));
911 namebuf[sizeof(namebuf) - 1] = '\0';
913 // register this iref with the name service
914 err = nameservice_register(namebuf, iref);
915 if (err_is_fail(err)) {
916 USER_PANIC_ERR(err, "nameservice_register failed");
921 static errval_t connect_cb(void *st, struct spawn_binding *b)
923 // copy my message receive handler vtable to the binding
924 b->rx_vtbl = rx_vtbl;
925 b->rpc_rx_vtbl = rpc_rx_vtbl;
929 errval_t start_service(void)
931 return spawn_export(NULL, export_cb, connect_cb, get_default_waitset(),
932 IDC_EXPORT_FLAGS_DEFAULT);