7 * Copyright (c) 2010, 2011, 2012, ETH Zurich.
10 * This file is distributed under the terms in the attached LICENSE file.
11 * If you do not find this file, copies can be found by writing to:
12 * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
17 #include <barrelfish/barrelfish.h>
18 #include <spawndomain/spawndomain.h>
19 #include <barrelfish/monitor_client.h>
20 #include <barrelfish/nameservice_client.h>
21 #include <barrelfish/cpu_arch.h>
23 #include <vfs/vfs_path.h>
24 #include <dist/barrier.h>
25 #include <if/spawn_defs.h>
26 #include <if/monitor_defs.h>
27 #include <if/monitor_blocking_defs.h>
28 #include <barrelfish/dispatcher_arch.h>
29 #include <barrelfish/invocations_arch.h>
35 static errval_t spawn(struct capref domain_cap, const char *path,
36 char *const argv[], const char *argbuf, size_t argbytes,
37 char *const envp[], struct capref inheritcn_cap,
38 struct capref argcn_cap, uint8_t flags,
43 /* read file into memory */
45 err = vfs_open(path, &fh);
46 if (err_is_fail(err)) {
47 return err_push(err, SPAWN_ERR_LOAD);
50 struct vfs_fileinfo info;
51 err = vfs_stat(fh, &info);
52 if (err_is_fail(err)) {
54 return err_push(err, SPAWN_ERR_LOAD);
57 assert(info.type == VFS_FILE);
58 uint8_t *image = malloc(info.size);
61 return err_push(err, SPAWN_ERR_LOAD);
64 size_t pos = 0, readlen;
66 err = vfs_read(fh, &image[pos], info.size - pos, &readlen);
67 if (err_is_fail(err)) {
70 return err_push(err, SPAWN_ERR_LOAD);
71 } else if (readlen == 0) {
74 return SPAWN_ERR_LOAD; // XXX
78 } while (err_is_ok(err) && readlen > 0 && pos < info.size);
81 if (err_is_fail(err)) {
82 DEBUG_ERR(err, "failed to close file %s", path);
85 // find short name (last part of path)
86 const char *name = strrchr(path, VFS_PATH_SEP);
96 err = spawn_load_image(&si, (lvaddr_t)image, info.size, CURRENT_CPU_TYPE,
97 name, my_core_id, argv, envp, inheritcn_cap,
99 if (err_is_fail(err)) {
106 /* request connection from monitor */
107 struct monitor_blocking_binding *mrpc = get_monitor_blocking_binding();
109 err = slot_alloc(&monep);
110 if (err_is_fail(err)) {
111 return err_push(err, SPAWN_ERR_MONEP_SLOT_ALLOC);
113 err = mrpc->rpc_tx_vtbl.alloc_monitor_ep(mrpc, &msgerr, &monep);
114 if (err_is_fail(err)) {
115 return err_push(err, SPAWN_ERR_MONITOR_CLIENT);
116 } else if (err_is_fail(msgerr)) {
120 /* copy connection into the new domain */
121 struct capref destep = {
123 .slot = TASKCN_SLOT_MONITOREP,
125 err = cap_copy(destep, monep);
126 if (err_is_fail(err)) {
129 return err_push(err, SPAWN_ERR_MONITOR_CLIENT);
132 err = cap_destroy(monep);
133 if (err_is_fail(err)) {
134 return err_push(err, SPAWN_ERR_MONITOR_CLIENT);
137 debug_printf("spawning %s on core %u\n", path, my_core_id);
139 /* give the perfmon capability */
140 struct capref dest, src;
141 dest.cnode = si.taskcn;
142 dest.slot = TASKCN_SLOT_PERF_MON;
143 src.cnode = cnode_task;
144 src.slot = TASKCN_SLOT_PERF_MON;
145 err = cap_copy(dest, src);
146 if (err_is_fail(err)) {
147 return err_push(err, SPAWN_ERR_COPY_PERF_MON);
150 if (!capref_is_null(domain_cap)) {
151 // Pass over the domain cap.
152 dest.cnode = si.taskcn;
153 dest.slot = TASKCN_SLOT_DOMAINID;
154 err = cap_copy(dest, domain_cap);
155 if (err_is_fail(err)) {
156 return err_push(err, SPAWN_ERR_COPY_DOMAIN_CAP);
161 err = spawn_run(&si);
162 if (err_is_fail(err)) {
164 return err_push(err, SPAWN_ERR_RUN);
167 // Allocate domain id
168 struct ps_entry *pe = malloc(sizeof(struct ps_entry));
170 memset(pe, 0, sizeof(struct ps_entry));
171 memcpy(pe->argv, argv, MAX_CMDLINE_ARGS*sizeof(*argv));
172 pe->argbuf = memdup(argbuf, argbytes);
173 pe->argbytes = argbytes;
175 * NB: It's important to keep a copy of the DCB *and* the root
176 * CNode around. We need to revoke both (in the right order, see
177 * kill_domain() below), so that we ensure no one else is
178 * referring to the domain's CSpace anymore. Especially the loop
179 * created by placing rootcn into its own address space becomes a
182 err = slot_alloc(&pe->rootcn_cap);
183 assert(err_is_ok(err));
184 err = cap_copy(pe->rootcn_cap, si.rootcn_cap);
185 pe->rootcn = si.rootcn;
186 assert(err_is_ok(err));
187 err = slot_alloc(&pe->dcb);
188 assert(err_is_ok(err));
189 err = cap_copy(pe->dcb, si.dcb);
190 assert(err_is_ok(err));
191 pe->status = PS_STATUS_RUNNING;
193 if (!capref_is_null(domain_cap)) {
194 err = ps_hash_domain(pe, domain_cap);
195 if (err_is_fail(err)) {
198 return err_push(err, SPAWN_ERR_DOMAIN_CAP_HASH);
202 err = ps_allocate(pe, domainid);
203 if(err_is_fail(err)) {
207 // Store in target dispatcher frame
208 struct dispatcher_generic *dg = get_dispatcher_generic(si.handle);
209 dg->domain_id = *domainid;
212 err = spawn_free(&si);
213 if (err_is_fail(err)) {
214 return err_push(err, SPAWN_ERR_FREE);
220 static void retry_use_local_memserv_response(void *a)
224 struct spawn_binding *b = (struct spawn_binding*)a;
226 err = b->tx_vtbl.use_local_memserv_response(b, NOP_CONT);
228 if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
230 err = b->register_send(b, get_default_waitset(),
231 MKCONT(retry_use_local_memserv_response,a));
233 if (err_is_fail(err)) {
234 DEBUG_ERR(err, "error sending use_local_memserv reply\n");
240 static void use_local_memserv_handler(struct spawn_binding *b)
245 err = b->tx_vtbl.use_local_memserv_response(b, NOP_CONT);
246 if (err_is_fail(err)) {
247 DEBUG_ERR(err, "error sending use_local_memserv reply");
248 if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
249 err = b->register_send(b, get_default_waitset(),
250 MKCONT(retry_use_local_memserv_response, b));
251 if (err_is_fail(err)) {
252 // note that only one continuation may be registered at a time
253 DEBUG_ERR(err, "register_send failed!");
259 struct pending_spawn_response {
260 struct spawn_binding *b;
265 static errval_t spawn_with_caps_common(struct capref domain_cap,
266 const char *path, const char *argbuf,
267 size_t argbytes, const char *envbuf,
269 struct capref inheritcn_cap,
270 struct capref argcn_cap, uint8_t flags,
271 domainid_t *domainid)
277 /* extract arguments from buffer */
278 char *argv[MAX_CMDLINE_ARGS + 1];
281 while (pos < argbytes && i < MAX_CMDLINE_ARGS) {
282 argv[i++] = (CONST_CAST)argbuf + pos;
283 char *end = memchr(&argbuf[pos], '\0', argbytes - pos);
285 err = SPAWN_ERR_GET_CMDLINE_ARGS;
288 pos = end - argbuf + 1;
290 assert(i <= MAX_CMDLINE_ARGS);
293 /* extract environment from buffer */
294 char *envp[MAX_CMDLINE_ARGS + 1];
297 while (pos < envbytes && i < MAX_CMDLINE_ARGS) {
298 envp[i++] = (CONST_CAST)envbuf + pos;
299 char *end = memchr(&envbuf[pos], '\0', envbytes - pos);
301 err = SPAWN_ERR_GET_CMDLINE_ARGS;
304 pos = end - envbuf + 1;
306 assert(i <= MAX_CMDLINE_ARGS);
310 npath = alloca(strlen(path));
312 vfs_path_normalise(npath);
314 err = spawn(domain_cap, npath, argv, argbuf, argbytes, envp, inheritcn_cap,
315 argcn_cap, flags, domainid);
316 // XXX: do we really want to delete the inheritcn and the argcn here? iaw:
317 // do we copy these somewhere? -SG
318 if (!capref_is_null(inheritcn_cap)) {
320 err2 = cap_delete(inheritcn_cap);
321 assert(err_is_ok(err2));
323 if (!capref_is_null(argcn_cap)) {
325 err2 = cap_delete(argcn_cap);
326 assert(err_is_ok(err2));
330 if(err_is_fail(err)) {
331 DEBUG_ERR(err, "spawn");
337 static errval_t spawn_with_caps_handler(struct spawn_binding *b, const char *path,
338 const char *argvbuf, size_t argvbytes, const char *envbuf, size_t envbytes,
339 struct capref inheritcn_cap, struct capref argcn_cap, uint8_t flags,
340 errval_t *err, spawn_domainid_t *domain_id)
342 *err = spawn_with_caps_common(NULL_CAP, path, argvbuf, argvbytes, envbuf,
343 envbytes, inheritcn_cap, argcn_cap, flags,
348 static errval_t spawn_handler(struct spawn_binding *b, const char *path,
349 const char *argvbuf, size_t argvbytes, const char *envbuf, size_t envbytes,
350 uint8_t flags, errval_t *err, spawn_domainid_t *domain_id)
352 *err = spawn_with_caps_common(NULL_CAP, path, argvbuf, argvbytes, envbuf,
353 envbytes, NULL_CAP, NULL_CAP, flags,
358 static void spawn_with_caps_request_handler(struct spawn_binding *b,
359 struct capref procmng_cap,
360 struct capref domain_cap,
366 struct capref inheritcn_cap,
367 struct capref argcn_cap,
370 errval_t err, reply_err;
371 struct capability ret;
372 err = monitor_cap_identify_remote(procmng_cap, &ret);
373 if (err_is_fail(err)) {
374 err = err_push(err, SPAWN_ERR_IDENTIFY_PROC_MNGR_CAP);
378 if (ret.type != ObjType_ProcessManager) {
379 err = SPAWN_ERR_NOT_PROC_MNGR;
383 spawn_domainid_t dummy_domain_id;
384 err = spawn_with_caps_common(domain_cap, path, argvbuf, argvbytes, envbuf,
385 envbytes, inheritcn_cap, argcn_cap, flags,
389 reply_err = b->tx_vtbl.spawn_reply(b, NOP_CONT, err);
390 if (err_is_fail(reply_err)) {
391 DEBUG_ERR(err, "failed to send spawn_with_caps_reply");
395 static void spawn_request_handler(struct spawn_binding *b,
396 struct capref procmng_cap,
397 struct capref domain_cap, const char *path,
398 const char *argvbuf, size_t argvbytes,
399 const char *envbuf, size_t envbytes,
403 errval_t err, reply_err;
404 struct capability ret;
405 err = monitor_cap_identify_remote(procmng_cap, &ret);
406 if (err_is_fail(err)) {
407 err = err_push(err, SPAWN_ERR_IDENTIFY_PROC_MNGR_CAP);
411 if (ret.type != ObjType_ProcessManager) {
412 err = SPAWN_ERR_NOT_PROC_MNGR;
416 spawn_domainid_t dummy_domain_id;
417 err = spawn_with_caps_common(domain_cap, path, argvbuf, argvbytes, envbuf,
418 envbytes, NULL_CAP, NULL_CAP, flags,
422 reply_err = b->tx_vtbl.spawn_reply(b, NOP_CONT, err);
423 if (err_is_fail(reply_err)) {
424 DEBUG_ERR(err, "failed to send spawn_reply");
428 static void span_request_handler(struct spawn_binding *b,
429 struct capref procmng_cap,
430 struct capref domain_cap, struct capref vroot,
431 struct capref dispframe)
433 errval_t err, mon_err, reply_err;
434 struct capability ret;
435 err = monitor_cap_identify_remote(procmng_cap, &ret);
436 if (err_is_fail(err)) {
437 err = err_push(err, SPAWN_ERR_IDENTIFY_PROC_MNGR_CAP);
441 if (ret.type != ObjType_ProcessManager) {
442 err = SPAWN_ERR_NOT_PROC_MNGR;
447 memset(&si, 0, sizeof(si));
449 debug_printf("Spanning domain to core %d\n", disp_get_core_id());
452 err = spawn_span_domain(&si, vroot, dispframe);
453 if (err_is_fail(err)) {
454 err = err_push(err, SPAWN_ERR_SPAN);
458 // Set connection to monitor.
459 struct monitor_blocking_binding *mrpc = get_monitor_blocking_binding();
461 err = slot_alloc(&monep);
462 if (err_is_fail(err)) {
463 err = err_push(err, SPAWN_ERR_MONEP_SLOT_ALLOC);
466 err = mrpc->rpc_tx_vtbl.alloc_monitor_ep(mrpc, &mon_err, &monep);
467 if (err_is_ok(err)) {
470 if (err_is_fail(err)) {
471 err = err_push(err, SPAWN_ERR_MONITOR_CLIENT);
475 /* copy connection into the new domain */
476 struct capref destep = {
478 .slot = TASKCN_SLOT_MONITOREP,
480 err = cap_copy(destep, monep);
481 if (err_is_fail(err)) {
484 err = err_push(err, SPAWN_ERR_MONITOR_CLIENT);
488 err = cap_destroy(monep);
489 if (err_is_fail(err)) {
490 err = err_push(err, SPAWN_ERR_MONITOR_CLIENT);
494 /* give the perfmon capability */
495 struct capref dest, src;
496 dest.cnode = si.taskcn;
497 dest.slot = TASKCN_SLOT_PERF_MON;
498 src.cnode = cnode_task;
499 src.slot = TASKCN_SLOT_PERF_MON;
500 err = cap_copy(dest, src);
501 if (err_is_fail(err)) {
502 err = err_push(err, SPAWN_ERR_COPY_PERF_MON);
506 // Pass over the domain cap.
507 dest.cnode = si.taskcn;
508 dest.slot = TASKCN_SLOT_DOMAINID;
509 err = cap_copy(dest, domain_cap);
510 if (err_is_fail(err)) {
511 err = err_push(err, SPAWN_ERR_COPY_DOMAIN_CAP);
516 err = spawn_run(&si);
517 if (err_is_fail(err)) {
518 err = err_push(err, SPAWN_ERR_RUN);
522 // Allocate an id for this dispatcher.
523 struct ps_entry *pe = malloc(sizeof(struct ps_entry));
525 memset(pe, 0, sizeof(struct ps_entry));
527 * NB: It's important to keep a copy of the DCB *and* the root
528 * CNode around. We need to revoke both (in the right order, see
529 * kill_domain() below), so that we ensure no one else is
530 * referring to the domain's CSpace anymore. Especially the loop
531 * created by placing rootcn into its own address space becomes a
534 // TODO(razvan): The following code is here to comply with spawn().
535 err = slot_alloc(&pe->rootcn_cap);
536 assert(err_is_ok(err));
537 err = cap_copy(pe->rootcn_cap, si.rootcn_cap);
538 pe->rootcn = si.rootcn;
539 assert(err_is_ok(err));
540 err = slot_alloc(&pe->dcb);
541 assert(err_is_ok(err));
542 err = cap_copy(pe->dcb, si.dcb);
543 assert(err_is_ok(err));
544 pe->status = PS_STATUS_RUNNING;
546 err = ps_hash_domain(pe, domain_cap);
547 if (err_is_fail(err)) {
550 err = err_push(err, SPAWN_ERR_DOMAIN_CAP_HASH);
555 err = ps_allocate(pe, &domainid);
556 if(err_is_fail(err)) {
561 err = spawn_free(&si);
562 if (err_is_fail(err)) {
563 err = err_push(err, SPAWN_ERR_FREE);
567 reply_err = b->tx_vtbl.spawn_reply(b, NOP_CONT, err);
568 if (err_is_fail(reply_err)) {
569 DEBUG_ERR(err, "failed to send span_reply");
573 static void cleanup_cap(struct capref cap)
577 err = cap_revoke(cap);
578 if (err_is_fail(err)) {
579 DEBUG_ERR(err, "cap_revoke");
581 err = cap_destroy(cap);
582 if (err_is_fail(err)) {
583 DEBUG_ERR(err, "cap_destroy");
587 static void kill_request_handler(struct spawn_binding *b,
588 struct capref procmng_cap,
589 struct capref victim_domain_cap)
591 errval_t err, reply_err;
592 struct capability ret;
593 err = monitor_cap_identify_remote(procmng_cap, &ret);
594 if (err_is_fail(err)) {
595 err = err_push(err, SPAWN_ERR_IDENTIFY_PROC_MNGR_CAP);
599 if (ret.type != ObjType_ProcessManager) {
600 err = SPAWN_ERR_NOT_PROC_MNGR;
605 err = ps_get_domain(victim_domain_cap, &pe, NULL);
606 if (err_is_fail(err)) {
607 err = err_push(err, SPAWN_ERR_DOMAIN_NOTFOUND);
611 cleanup_cap(pe->dcb);
614 reply_err = b->tx_vtbl.spawn_reply(b, NOP_CONT, err);
615 if (err_is_fail(reply_err)) {
616 DEBUG_ERR(err, "failed to send kill_reply");
620 static void cleanup_request_handler(struct spawn_binding *b,
621 struct capref procmng_cap,
622 struct capref domain_cap)
624 errval_t err, reply_err;
625 struct capability ret;
626 err = monitor_cap_identify_remote(procmng_cap, &ret);
627 if (err_is_fail(err)) {
628 err = err_push(err, SPAWN_ERR_IDENTIFY_PROC_MNGR_CAP);
632 if (ret.type != ObjType_ProcessManager) {
633 err = SPAWN_ERR_NOT_PROC_MNGR;
638 err = ps_release_domain(domain_cap, &pe);
639 if (err_is_fail(err)) {
640 err = err_push(err, SPAWN_ERR_DOMAIN_NOTFOUND);
644 cleanup_cap(pe->rootcn_cap);
646 // Cleanup struct ps_entry. Note that waiters will be handled by the process
647 // manager, as opposed to the old protocol of handling them here.
649 ps_remove(pe->domain_id);
653 reply_err = b->tx_vtbl.spawn_reply(b, NOP_CONT, err);
654 if (err_is_fail(reply_err)) {
655 DEBUG_ERR(err, "failed to send cleanup_reply");
660 * \brief Removes a zombie domain.
662 static void cleanup_domain(domainid_t domainid)
665 struct ps_entry *ps = ps_get(domainid);
668 // Tell all waiters of exit and free list as we go
669 for(struct ps_waiter *w = ps->waiters; w != NULL;) {
670 err = w->binding->tx_vtbl.wait_response
671 (w->binding, NOP_CONT, ps->exitcode, SYS_ERR_OK);
672 if(err_is_fail(err)) {
673 DEBUG_ERR(err, "wait_response");
676 struct ps_waiter *oldw = w;
682 // Cleanup rest of ps entry
688 static errval_t kill_domain(domainid_t domainid, uint8_t exitcode)
690 struct ps_entry *ps = ps_get(domainid);
693 return SPAWN_ERR_DOMAIN_NOTFOUND;
696 ps->status = PS_STATUS_ZOMBIE;
697 ps->exitcode = exitcode;
699 // Garbage collect victim's capabilities
700 cleanup_cap(ps->dcb); // Deschedule dispatcher (do this first!)
701 cleanup_cap(ps->rootcn_cap);
703 // XXX: why only when waiters exist? -SG
704 if(ps->waiters != NULL) {
705 // Cleanup local data structures and inform waiters
706 cleanup_domain(domainid);
712 static void kill_handler(struct spawn_binding *b, domainid_t domainid)
714 errval_t err = kill_domain(domainid, 0);
716 err = b->tx_vtbl.kill_response(b, NOP_CONT, err);
717 if(err_is_fail(err)) {
718 DEBUG_ERR(err, "kill_response");
722 static void exit_handler(struct spawn_binding *b, domainid_t domainid,
725 errval_t err = kill_domain(domainid, exitcode);
726 struct ps_entry *ps = ps_get(domainid);
728 if(err_is_fail(err)) {
729 DEBUG_ERR(err, "kill_domain");
733 // XXX: Can't do nothing
737 // May never return anything to client
740 static void wait_handler(struct spawn_binding *b, domainid_t domainid,
744 struct ps_entry *ps = ps_get(domainid);
747 err = b->tx_vtbl.wait_response(b, NOP_CONT, 0, SPAWN_ERR_DOMAIN_NOTFOUND);
748 if(err_is_fail(err)) {
749 DEBUG_ERR(err, "wait_response");
752 if(!nohang || ps->status == PS_STATUS_ZOMBIE) {
753 // Enqueue the waiter
754 struct ps_waiter *waiter = malloc(sizeof(struct ps_waiter));
755 assert(waiter != NULL);
756 waiter->next = ps->waiters;
758 ps->waiters = waiter;
760 // nohang and no zombie, return error
761 err = b->tx_vtbl.wait_response(b, NOP_CONT, 0, SPAWN_ERR_DOMAIN_RUNNING);
762 if(err_is_fail(err)) {
763 DEBUG_ERR(err, "wait_response");
767 // Cleanup if zombie (will send the reply)
768 if(ps->status == PS_STATUS_ZOMBIE) {
769 cleanup_domain(domainid);
774 static void get_domainlist_sent(void *arg)
779 static void get_domainlist_handler(struct spawn_binding *b)
783 uint8_t *domains = calloc(sizeof(uint8_t), MAX_DOMAINS);
785 // XXX: Very inefficient
786 for(domainid_t i = 0; i < MAX_DOMAINS; i++) {
792 err = b->tx_vtbl.get_domainlist_response
793 (b, MKCLOSURE(get_domainlist_sent, domains), domains, len);
794 if(err_is_fail(err)) {
795 DEBUG_ERR(err, "get_domainlist_response");
800 static void status_handler(struct spawn_binding *b, domainid_t domainid)
803 struct ps_entry *ps = ps_get(domainid);
804 spawn_ps_entry_t pse;
806 memset(&pse, 0, sizeof(pse));
809 err = b->tx_vtbl.status_response(b, NOP_CONT, pse, NULL, 0,
810 SPAWN_ERR_DOMAIN_NOTFOUND);
811 if(err_is_fail(err)) {
812 DEBUG_ERR(err, "status_response");
816 pse.status = ps->status;
818 err = b->tx_vtbl.status_response(b, NOP_CONT, pse, ps->argbuf, ps->argbytes,
820 if(err_is_fail(err)) {
821 DEBUG_ERR(err, "status_response");
826 static errval_t dump_capabilities(domainid_t domainid) {
827 struct ps_entry *ps = ps_get(domainid);
830 return SPAWN_ERR_DOMAIN_NOTFOUND;
833 return invoke_dispatcher_dump_capabilities(ps->dcb);
836 static void dump_capabilities_handler(struct spawn_binding *b, domainid_t domainid) {
837 errval_t err = dump_capabilities(domainid);
839 err = b->tx_vtbl.dump_capabilities_response(b, NOP_CONT, err);
840 if(err_is_fail(err)) {
841 DEBUG_ERR(err, "debug_print_capabilities_response");
845 static struct spawn_rx_vtbl rx_vtbl = {
846 // .spawn_domain_call = spawn_handler,
847 // .spawn_domain_with_caps_call = spawn_with_caps_handler,
849 // Async messages for the process manager.
850 .spawn_request = spawn_request_handler,
851 .spawn_with_caps_request = spawn_with_caps_request_handler,
852 .span_request = span_request_handler,
853 .kill_request = kill_request_handler,
854 .cleanup_request = cleanup_request_handler,
856 .use_local_memserv_call = use_local_memserv_handler,
857 .kill_call = kill_handler,
858 .exit_call = exit_handler,
859 .wait_call = wait_handler,
860 .get_domainlist_call = get_domainlist_handler,
861 .status_call = status_handler,
862 .dump_capabilities_call = dump_capabilities_handler
865 static struct spawn_rpc_rx_vtbl rpc_rx_vtbl = {
866 .spawn_domain_call = spawn_handler,
867 .spawn_domain_with_caps_call = spawn_with_caps_handler,
868 // .use_local_memserv_call = use_local_memserv_handler,
869 // .kill_call = kill_handler,
870 // .exit_call = exit_handler,
871 // .wait_call = wait_handler,
872 // .get_domainlist_call = get_domainlist_handler,
873 // .status_call = status_handler,
874 // .dump_capabilities_call = dump_capabilities_handler
877 static void export_cb(void *st, errval_t err, iref_t iref)
879 if (err_is_fail(err)) {
880 USER_PANIC_ERR(err, "export failed");
883 // Send iref back to monitor, which will forward it to the process manager.
884 struct monitor_binding *mb = get_monitor_binding();
885 err = mb->tx_vtbl.set_spawn_iref_request(mb, NOP_CONT, iref);
886 if (err_is_fail(err)) {
887 USER_PANIC_ERR(err, "failed to send set_spawn_iref_request to "
893 size_t len = snprintf(namebuf, sizeof(namebuf), "%s.%d", SERVICE_BASENAME,
895 assert(len < sizeof(namebuf));
896 namebuf[sizeof(namebuf) - 1] = '\0';
898 // register this iref with the name service
899 err = nameservice_register(namebuf, iref);
900 if (err_is_fail(err)) {
901 USER_PANIC_ERR(err, "nameservice_register failed");
906 static errval_t connect_cb(void *st, struct spawn_binding *b)
908 // copy my message receive handler vtable to the binding
909 b->rx_vtbl = rx_vtbl;
910 b->rpc_rx_vtbl = rpc_rx_vtbl;
914 errval_t start_service(void)
916 return spawn_export(NULL, export_cb, connect_cb, get_default_waitset(),
917 IDC_EXPORT_FLAGS_DEFAULT);