};
cap IPI is_always_copy {};
+
+cap ProcessManager is_always_copy {
+ // Capability to act as process manager, i.e. create new domain caps.
+};
+
+cap Domain from ProcessManager {
+ eq coreid coreid; /* Core where the domain was created. */
+ eq uint32 core_local_id; /* Core-local ID of the domain. */
+};
\ No newline at end of file
// ID capability
failure ID_SPACE_EXHAUSTED "ID space exhausted",
+ // Domain capability
+ failure DOMAIN_SPACE_EXHAUSTED "Domain space exhausted",
+
// I2C driver
// XXX: should not be in kernel
failure I2C_UNINITIALIZED "Trying to use uninitialized i2c controller",
failure SEGBASE_OVER_4G_LIMIT "Segment base address is above 32-bit boundary",
failure LDT_FULL "LDT is out of space",
failure LDT_SELECTOR_INVALID "Segment selector is invalid for LDT",
+
+ // Process management client library
+ failure PROC_MGMT_CLIENT_ACCEPT "Error in proc_mgmt_client_lmp_accept()",
};
// errors in Flounder-generated bindings
failure COPY_PERF_MON "Error copying performance monitoring cap",
failure COPY_KERNEL_CAP "Error copying Kernel cap",
failure COPY_INHERITCN_CAP "Error copying capability from inherited cnode",
+ failure COPY_PROC_MNG_CAP "Error copying ProcessManager cap",
+ failure COPY_DOMAIN_CAP "Error copying domain cap",
// make_runnable
failure DISPATCHER_SETUP "Dispatcher setup",
failure DOMAIN_ALLOCATE "No more domain descriptors",
failure DOMAIN_NOTFOUND "Domain not found",
failure DOMAIN_RUNNING "Domain is running",
+
+ failure IDENTIFY_PROC_MNGR_CAP "Failed to identify process manager cap",
+ failure NOT_PROC_MNGR "Request did not come from the process manager",
+
+ failure CREATE_DOMAIN_TABLE "Failed to create domain hash table",
+ failure DOMAIN_CAP_HASH "Failed to compute hash code for domain cap",
+ failure DOMAIN_TABLE_FIND "Failed to find requested domain in domain table",
failure FIND_SPAWNDS "Unable to find spawn daemons",
failure MALFORMED_SPAWND_RECORD "Spawn record without ID found?",
};
+// errors related to the process manager
+errors proc_mgmt PROC_MGMT_ERR_ {
+ failure NOT_MONITOR "Received monitor-only request from non-monitor domain",
+ failure SPAWND_EXISTS "Requested spawnd slot already exists",
+ failure INVALID_SPAWND "Not connected to spawnd on the requested core",
+ failure CREATE_DOMAIN_CAP "Failed to create new domain cap",
+ failure SPAWND_REQUEST "Failed to send request to spawnd",
+ failure DOMAIN_CAP_HASH "Failed to compute hash code for domain cap",
+ failure CREATE_CLIENTS_TABLE "Failed to create pending clients hash table",
+ failure CLIENTS_TABLE_FIND "Failed to find requested client in pending clients table",
+ failure CREATE_DOMAIN_TABLE "Failed to create domain hash table",
+ failure DOMAIN_TABLE_FIND "Failed to find requested domain in domain table",
+ failure DOMAIN_NOT_RUNNING "Domain is not currently running",
+ failure ALREADY_SPANNED "Domain has already been spanned to the given core",
+ failure KILL "Failed to kill requested domain",
+};
+
// errors from ELF library
errors libelf ELF_ERR_ {
failure FILESZ "Invalid file size",
failure COPY_KERNEL_CAP "Failed to copy kernel cap to monitor",
failure COPY_BSP_KCB "Error copying BSP KernelControlBlock",
failure COPY_IPI "Failed to copy IPI cap to monitor",
+ failure COPY_PROC_MNG_CAP "Failed to copy ProcessManager cap to monitor",
failure COPY_PERF_MON "Failed to copy performance monitoring cap to monitor",
failure COPY_MODULECN_CAP "Failed to copy module CNode cap to monitor",
failure COPY_PACN_CAP "Failed to copy phys addr CNode cap to monitor",
module /x86_64/sbin/kaluga boot
module /x86_64/sbin/acpi boot
module /x86_64/sbin/spawnd boot
+module /x86_64/sbin/proc_mgmt boot
#bootapic-x86_64=1-15
module /x86_64/sbin/startd boot
module /x86_64/sbin/routing_setup boot
"pci",
"ping_pong",
"pixels",
+ "proc_mgmt",
"rcce",
"replay",
"routing",
uint64 framebytes);
message span_domain_reply(state_id state_id, errval err);
+ // Inform the monitor connected to the process manager about the spawnd
+ // that the caller just spawned.
+ message add_spawnd(iref iref);
+
// Resource control
message rsrc_join(rsrcid id, coreid coreid);
message rsrc_join_complete(rsrcid id);
message get_ramfs_iref_reply(iref iref, uintptr st);
message set_ramfs_iref_request(iref iref);
+ message set_proc_mgmt_ep_request(cap ep);
+
+ message set_spawn_iref_request(iref iref);
+
message set_mem_iref_request(iref iref);
message set_name_iref_request(iref iref);
--- /dev/null
+/*
+ * Copyright (c) 2017, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+interface proc_mgmt "Process management service" {
+
+ // Add a new spawnd to the process manager's list.
+ message add_spawnd(coreid core, iref iref);
+
+ // Spawn a new domain, returning its domain cap.
+ rpc spawn(in coreid core,
+ in String path[2048],
+ in char argvbuf[argvbytes, 2048],
+ in char envbuf[envbytes, 2048],
+ in uint8 flags,
+ out errval err,
+ out cap domain_cap);
+
+ rpc spawn_with_caps(in coreid core,
+ in String path[2048],
+ in char argvbuf[argvbytes, 2048],
+ in char envbuf[envbytes, 2048],
+ in cap inheritcn_cap,
+ in cap argcn_cap,
+ in uint8 flags,
+ out errval err,
+ out cap domain_cap);
+
+ // Span a new core for a given domain, based on provided vroot and dispframe.
+ rpc span(in cap domain_cap, in coreid core, in cap vroot, in cap dispframe,
+ out errval err);
+
+ // Kill a domain for which the caller has a domain cap.
+ rpc kill(in cap domain_cap, out errval err);
+
+ // Let the process manager know the caller has finished execution.
+ message exit(cap domain_cap, uint8 status);
+ //rpc exit(in cap domain_cap, in uint8 status);
+
+ rpc wait(in cap domain_cap, out errval err, out uint8 status);
+};
in cap argcn_cap, in uint8 flags, out errval err,
out domainid domain_id);
+ // Messages for the async interface with the process manager.
+ message spawn_request(cap procmng_cap,
+ cap domain_cap,
+ String path[2048],
+ char argvbuf[argvbytes, 2048],
+ char envbuf[envbytes, 2048],
+ uint8 flags);
+
+ message spawn_with_caps_request(cap procmng_cap,
+ cap domain_cap,
+ String path[2048],
+ char argvbuf[argvbytes, 2048],
+ char envbuf[envbytes, 2048],
+ cap inheritcn_cap,
+ cap argcn_cap,
+ uint8 flags);
+
+ message span_request(cap procmng_cap, cap domain_cap, cap vroot,
+ cap dispframe);
+
+ message kill_request(cap procmng_cap, cap domain_cap);
+
+ message cleanup_request(cap procmng_cap, cap domain_cap);
+
+ message spawn_reply(errval err);
+
+ rpc spawn_proc_mgmt_domain(in cap domain_cap,
+ in String path[2048],
+ in char argvbuf[argvbytes, 2048],
+ in char envbuf[envbytes, 2048],
+ in uint8 flags,
+ out errval err);
+
+ rpc spawn_proc_mgmt_domain_with_caps(in cap domain_cap,
+ in String path[2048],
+ in char argvbuf[argvbytes, 2048],
+ in char envbuf[envbytes, 2048],
+ in cap inheritcn_cap,
+ in cap argcn_cap,
+ in uint8 flags,
+ out errval err);
+
+ rpc span(in cap domain_cap, in cap vroot, in cap dispframe, out errval err);
+
rpc use_local_memserv();
rpc kill(in domainid domain_id, out errval err);
/* well-known capabilities */
extern struct capref cap_root, cap_monitorep, cap_irq, cap_io, cap_dispatcher,
cap_selfep, cap_kernel, cap_initep, cap_perfmon, cap_dispframe,
- cap_sessionid, cap_ipi, cap_vroot, cap_argcn;
+ cap_sessionid, cap_ipi, cap_vroot, cap_argcn, cap_procmng,
+ cap_domainid;
/**
* \brief Returns the depth in the CSpace address of a cap
struct mem_binding;
struct spawn_binding;
struct arrakis_binding;
+struct proc_mgmt_binding;
struct core_state_generic {
struct waitset default_waitset;
struct ram_alloc_state ram_alloc_state;
struct octopus_binding *octopus_binding;
struct spawn_binding *spawn_bindings[MAX_CPUS];
+ struct proc_mgmt_binding *proc_mgmt_binding;
struct arrakis_binding *arrakis_bindings[MAX_CPUS];
struct terminal_state *terminal_state;
struct domain_state *domain_state;
#define BARRELFISH_DOMAIN_H
#include <sys/cdefs.h>
+#include <barrelfish/event_queue.h>
#include <barrelfish/threads.h>
__BEGIN_DECLS
typedef void (*domain_spanned_callback_t)(void *arg, errval_t err);
+///< Struct for spanning domains state machine
+struct span_domain_state {
+ struct thread *thread; ///< Thread to run on remote core
+ uint8_t core_id; ///< Id of the remote core
+ errval_t err; ///< To propagate error value
+ domain_spanned_callback_t callback; ///< Callback for when domain has spanned
+ void *callback_arg; ///< Optional argument to pass with callback
+ struct capref frame; ///< Dispatcher frame
+ struct capref vroot; ///< VRoot cap
+ struct event_queue_node event_qnode; ///< Event queue node
+ struct waitset_chanstate initev; ///< Dispatcher initialized event
+ bool initialized; ///< True if remote initialized
+};
+
struct mem_binding;
struct octopus_binding;
struct monitor_binding;
struct waitset;
struct spawn_binding;
struct arrakis_binding;
+struct proc_mgmt_binding;
struct waitset *get_default_waitset(void);
void disp_set_core_id(coreid_t core_id);
void set_spawn_state(struct spawn_state *st);
struct slot_alloc_state *get_slot_alloc_state(void);
struct skb_state *get_skb_state(void);
+struct proc_mgmt_binding *get_proc_mgmt_binding(void);
+void set_proc_mgmt_binding(struct proc_mgmt_binding *st);
errval_t domain_init(void);
errval_t domain_new_dispatcher(coreid_t core_id,
domain_spanned_callback_t callback,
void *callback_arg);
+errval_t domain_new_dispatcher_setup_only(coreid_t core_id,
+ struct span_domain_state **ret_state);
errval_t domain_thread_create_on(coreid_t core_id, thread_func_t start_func,
void *arg, struct thread **newthread);
errval_t domain_thread_create_on_varstack(coreid_t core_id,
struct thread *thread,
dispatcher_handle_t mydisp);
errval_t domain_thread_move_to(struct thread *thread, coreid_t core_id);
+errval_t domain_cap_hash(struct capref domain_cap, uint64_t *ret_hash);
__END_DECLS
errval_t monitor_debug_print_cababilities(void);
+errval_t monitor_cap_identify_remote(struct capref cap, struct capability *ret);
+
__END_DECLS
#endif // BARRELFISH_MONITOR_CLIENT_H
--- /dev/null
+/**
+ * \file
+ * \brief Client for interacting with the process management server.
+ */
+
+/*
+ * Copyright (c) 2017, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef BARRELFISH_PROC_MGMT_CLIENT_H
+#define BARRELFISH_PROC_MGMT_CLIENT_H
+
+#include <if/proc_mgmt_defs.h>
+#include <sys/cdefs.h>
+
+__BEGIN_DECLS
+
+struct proc_mgmt_lmp_binding;
+
+errval_t proc_mgmt_client_lmp_accept(struct proc_mgmt_lmp_binding *lmpb,
+ struct waitset *ws,
+ size_t lmp_buflen_words);
+errval_t proc_mgmt_client_lmp_bind(struct proc_mgmt_lmp_binding *lmpb,
+ struct capref ep,
+ proc_mgmt_bind_continuation_fn *cont,
+ void *st,
+ struct waitset *ws,
+ size_t lmp_buflen_words);
+errval_t proc_mgmt_bind_client(void);
+
+errval_t proc_mgmt_add_spawnd(iref_t iref, coreid_t core_id);
+errval_t proc_mgmt_spawn_program(coreid_t core_id, const char *path,
+ char *const argv[], char *const envp[],
+ uint8_t flags, struct capref *ret_domain_cap);
+errval_t proc_mgmt_spawn_program_with_caps(coreid_t core_id, const char *path,
+ char *const argv[], char *const envp[],
+ struct capref inheritcn_cap,
+ struct capref argcn_cap, uint8_t flags,
+ struct capref *ret_domain_cap);
+errval_t proc_mgmt_span(coreid_t core_id);
+errval_t proc_mgmt_kill(struct capref domain_cap);
+errval_t proc_mgmt_exit(uint8_t status);
+errval_t proc_mgmt_wait(struct capref domain_cap, uint8_t *status);
+
+__END_DECLS
+
+#endif // BARRELFISH_PROC_MGMT_CLIENT_H
errval_t spawn_wait_core(coreid_t coreid, domainid_t domainid,
uint8_t *exitcode, bool nohang);
errval_t spawn_binding(coreid_t coreid, struct spawn_binding **ret_client);
+errval_t spawn_bind_iref(iref_t iref, struct spawn_binding **ret_client);
errval_t spawn_get_domain_list(uint8_t **domains, size_t *len);
errval_t spawn_get_status(uint8_t domain, struct spawn_ps_entry *pse,
char **argbuf, size_t *arglen, errval_t *reterr);
static inline bool type_is_vnode(enum objtype type)
{
- STATIC_ASSERT(48 == ObjType_Num, "Check VNode definitions");
+ STATIC_ASSERT(50 == ObjType_Num, "Check VNode definitions");
return (type == ObjType_VNode_x86_64_pml4 ||
type == ObjType_VNode_x86_64_pdpt ||
static inline bool type_is_vroot(enum objtype type)
{
- STATIC_ASSERT(48 == ObjType_Num, "Check VNode definitions");
+ STATIC_ASSERT(50 == ObjType_Num, "Check VNode definitions");
return (type == ObjType_VNode_x86_64_pml4 ||
#ifdef CONFIG_PAE
static inline size_t vnode_objbits(enum objtype type)
{
// This function should be emitted by hamlet or somesuch.
- STATIC_ASSERT(48 == ObjType_Num, "Check VNode definitions");
+ STATIC_ASSERT(50 == ObjType_Num, "Check VNode definitions");
if (type == ObjType_VNode_x86_64_pml4 ||
type == ObjType_VNode_x86_64_pdpt ||
static inline size_t vnode_objsize(enum objtype type)
{
// This function should be emitted by hamlet or somesuch.
- STATIC_ASSERT(48 == ObjType_Num, "Check VNode definitions");
+ STATIC_ASSERT(50 == ObjType_Num, "Check VNode definitions");
if (type == ObjType_VNode_x86_64_pml4 ||
type == ObjType_VNode_x86_64_pdpt ||
*/
static inline size_t vnode_entry_bits(enum objtype type) {
// This function should be emitted by hamlet or somesuch.
- STATIC_ASSERT(48 == ObjType_Num, "Check VNode definitions");
+ STATIC_ASSERT(50 == ObjType_Num, "Check VNode definitions");
if (type == ObjType_VNode_x86_64_pml4 ||
type == ObjType_VNode_x86_64_pdpt ||
* @return Number of page table entries in bits
*/
static inline size_t cnode_get_slots(struct capability *cnode) {
- STATIC_ASSERT(48 == ObjType_Num, "Check CNode definitions");
+ STATIC_ASSERT(50 == ObjType_Num, "Check CNode definitions");
switch (cnode->type) {
case ObjType_L1CNode:
static inline enum objtype get_mapping_type(enum objtype captype)
{
- STATIC_ASSERT(48 == ObjType_Num, "Knowledge of all mapping types");
+ STATIC_ASSERT(50 == ObjType_Num, "Knowledge of all mapping types");
switch (captype) {
case ObjType_Frame:
static inline bool type_is_mapping(enum objtype type)
{
- STATIC_ASSERT(48 == ObjType_Num, "Knowledge of all mapping types");
+ STATIC_ASSERT(50 == ObjType_Num, "Knowledge of all mapping types");
switch (type) {
case ObjType_Frame_Mapping:
static inline bool type_is_mappable(enum objtype type)
{
- STATIC_ASSERT(48 == ObjType_Num, "Knowledge of all mappable types");
+ STATIC_ASSERT(50 == ObjType_Num, "Knowledge of all mappable types");
switch (type) {
case ObjType_Frame:
* Predicates related to sharing capabilities
*/
-STATIC_ASSERT(48 == ObjType_Num, "Knowledge of all cap types");
+STATIC_ASSERT(50 == ObjType_Num, "Knowledge of all cap types");
static inline bool
distcap_needs_locality(enum objtype type)
{
}
}
-STATIC_ASSERT(48 == ObjType_Num, "Knowledge of all cap types");
+STATIC_ASSERT(50 == ObjType_Num, "Knowledge of all cap types");
static inline bool
distcap_is_moveable(enum objtype type)
{
#define TASKCN_SLOT_SYSMEM 15 ///< ???
#define TASKCN_SLOT_COREBOOT 16 ///< Copy of realmode section used to bootstrap a core
#define TASKCN_SLOT_IPI 17 ///< Copy of IPI cap
-#define TASKCN_SLOTS_USER 18 ///< First free slot in taskcn for user
+#define TASKCN_SLOT_PROC_MNG 18 ///< Cap for the process manager
+#define TASKCN_SLOT_DOMAINID 19 ///< Domain ID cap
+#define TASKCN_SLOTS_USER 20 ///< First free slot in taskcn for user
/* Page CNode */
#define PAGECN_SLOT_VROOT 0 ///< First slot of page cnode is root page table
#define PRIxRSRCID PRIx32
/* Domain ID */
-typedef uint32_t domainid_t;
+typedef uint32_t domainid_t;
+#define MAX_DOMAINID 0xffffffff
#define PRIuDOMAINID PRIu32
#define PRIxDOMAINID PRIx32
/**
* \brief Cleanup the last cap copy for an object and the object itself
*/
-STATIC_ASSERT(48 == ObjType_Num, "Knowledge of all RAM-backed cap types");
+STATIC_ASSERT(50 == ObjType_Num, "Knowledge of all RAM-backed cap types");
static errval_t
cleanup_last(struct cte *cte, struct cte *ret_ram_cap)
{
struct capability monitor_ep;
-STATIC_ASSERT(48 == ObjType_Num, "Knowledge of all cap types");
+STATIC_ASSERT(50 == ObjType_Num, "Knowledge of all cap types");
int sprint_cap(char *buf, size_t len, struct capability *cap)
{
switch (cap->type) {
return snprintf(buf, len, "ID capability (coreid 0x%" PRIxCOREID
" core_local_id 0x%" PRIx32 ")", cap->u.id.coreid,
cap->u.id.core_local_id);
+ case ObjType_ProcessManager:
+ return snprintf(buf, len, "Process manager capability");
+
+ case ObjType_Domain:
+ return snprintf(buf, len, "Domain capability (coreid 0x%" PRIxCOREID
+ " core_local_id 0x%" PRIx32 ")", cap->u.domain.coreid,
+ cap->u.domain.core_local_id);
case ObjType_PerfMon:
return snprintf(buf, len, "PerfMon cap");
static uint32_t id_cap_counter = 1;
/**
+ * Domain capability core_local_id counter.
+ */
+static uint32_t domain_cap_counter = 1;
+
+/**
* Sets #dest equal to #src
*
* #dest cannot be in use.
// If you create more capability types you need to deal with them
// in the table below.
-STATIC_ASSERT(48 == ObjType_Num, "Knowledge of all cap types");
+STATIC_ASSERT(50 == ObjType_Num, "Knowledge of all cap types");
static size_t caps_max_numobjs(enum objtype type, gensize_t srcsize, gensize_t objsize)
{
switch(type) {
return srcsize / OBJSIZE_KCB;
}
+ case ObjType_Domain:
+ return L2_CNODE_SLOTS;
+
case ObjType_Kernel:
case ObjType_IRQTable:
case ObjType_IRQDest:
case ObjType_Notify_IPI:
case ObjType_PerfMon:
case ObjType_IPI:
+ case ObjType_ProcessManager:
case ObjType_VNode_ARM_l1_Mapping:
case ObjType_VNode_ARM_l2_Mapping:
case ObjType_VNode_AARCH64_l0_Mapping:
*
* For the meaning of the parameters, see the 'caps_create' function.
*/
-STATIC_ASSERT(48 == ObjType_Num, "Knowledge of all cap types");
+STATIC_ASSERT(50 == ObjType_Num, "Knowledge of all cap types");
static errval_t caps_zero_objects(enum objtype type, lpaddr_t lpaddr,
gensize_t objsize, size_t count)
*/
// If you create more capability types you need to deal with them
// in the table below.
-STATIC_ASSERT(48 == ObjType_Num, "Knowledge of all cap types");
+STATIC_ASSERT(50 == ObjType_Num, "Knowledge of all cap types");
static errval_t caps_create(enum objtype type, lpaddr_t lpaddr, gensize_t size,
gensize_t objsize, size_t count, coreid_t owner,
err = set_cap(&dest_caps->cap, &temp_cap);
break;
+ case ObjType_Domain:
+ // Domain type does not refer to a kernel object
+ assert(lpaddr == 0);
+ assert(size == 0);
+ assert(objsize == 0);
+ assert(count <= L2_CNODE_SLOTS);
+
+ // Prevent wrap around
+ if (domain_cap_counter + count >= UINT32_MAX) {
+ return SYS_ERR_DOMAIN_SPACE_EXHAUSTED;
+ }
+
+ for(size_t i = 0; i < count; i++) {
+ // Initialize type specific fields
+ temp_cap.u.domain.coreid = my_core_id;
+ temp_cap.u.domain.core_local_id = domain_cap_counter++;
+ // Insert the capability
+ err = set_cap(&dest_caps[i].cap, &temp_cap);
+ if (err_is_fail(err)) {
+ break;
+ }
+ }
+ break;
case ObjType_IO:
temp_cap.u.io.start = 0;
temp_cap.u.io.end = 65535;
case ObjType_EndPoint:
case ObjType_Notify_IPI:
case ObjType_PerfMon:
+ case ObjType_ProcessManager:
// These types do not refer to a kernel object
assert(lpaddr == 0);
assert(size == 0);
//{{{1 Capability creation
/// check arguments, return true iff ok
-STATIC_ASSERT(48 == ObjType_Num, "Knowledge of all cap types");
+STATIC_ASSERT(50 == ObjType_Num, "Knowledge of all cap types");
#ifndef NDEBUG
static bool check_caps_create_arguments(enum objtype type,
size_t bytes, size_t objsize,
return SYS_ERR_OK;
}
-STATIC_ASSERT(48 == ObjType_Num, "Knowledge of all cap types");
+STATIC_ASSERT(50 == ObjType_Num, "Knowledge of all cap types");
/// Retype caps
/// Create `count` new caps of `type` from `offset` in src, and put them in
/// `dest_cnode` starting at `dest_slot`.
src_cap->type == ObjType_Dispatcher ||
src_cap->type == ObjType_Frame ||
src_cap->type == ObjType_DevFrame ||
- src_cap->type == ObjType_IRQSrc);
+ src_cap->type == ObjType_IRQSrc ||
+ src_cap->type == ObjType_ProcessManager);
if (src_cap->type != ObjType_Dispatcher && src_cap->type != ObjType_IRQSrc) {
base = get_address(src_cap);
/* check that we can create `count` objs from `offset` in source, and
* update base accordingly */
- if (src_cap->type != ObjType_Dispatcher && src_cap->type != ObjType_IRQSrc) {
+ if (src_cap->type != ObjType_Dispatcher && src_cap->type != ObjType_IRQSrc
+ && src_cap->type != ObjType_Domain) {
// TODO: convince ourselves that this is the only condition on offset
if (offset + count * objsize > get_size(src_cap)) {
debug(SUBSYS_CAPS, "caps_retype: cannot create all %zu objects"
}
/// Create copies to a cte
-STATIC_ASSERT(48 == ObjType_Num, "Knowledge of all cap types");
+STATIC_ASSERT(50 == ObjType_Num, "Knowledge of all cap types");
errval_t caps_copy_to_cte(struct cte *dest_cte, struct cte *src_cte, bool mint,
uintptr_t param1, uintptr_t param2)
{
static inline size_t caps_get_mapping_offset(struct capability *cap) {
// This function should be emitted by hamlet or somesuch.
- STATIC_ASSERT(48 == ObjType_Num, "Check Mapping definitions");
+ STATIC_ASSERT(50 == ObjType_Num, "Check Mapping definitions");
switch (cap->type) {
case ObjType_VNode_AARCH64_l3_Mapping:
* Cap tracing
*/
#ifdef TRACE_PMEM_CAPS
-STATIC_ASSERT(48 == ObjType_Num, "knowledge of all cap types");
+STATIC_ASSERT(50 == ObjType_Num, "knowledge of all cap types");
STATIC_ASSERT(64 >= ObjType_Num, "cap types fit in uint64_t bitfield");
#define MAPPING_TYPES \
((1ull<<ObjType_VNode_x86_64_pml4_Mapping) | \
err = caps_create_new(ObjType_IPI, 0, 0, 0, my_core_id, ipicap_cte);
assert(err_is_ok(err));
+ // Create process manager capability
+ struct cte *procmngcap_cte = caps_locate_slot(CNODE(st->taskcn),
+ TASKCN_SLOT_PROC_MNG);
+ err = caps_create_new(ObjType_ProcessManager, 0, 0, 0, my_core_id,
+ procmngcap_cte);
+ assert(err_is_ok(err));
+
/* Initialize dispatcher */
dispatcher_handle_t init_handle
= local_phys_to_mem(init_dispframe_cte->cap.u.frame.base);
let common_srcs = [ "capabilities.c", "init.c", "dispatch.c", "threads.c",
"thread_once.c", "thread_sync.c", "slab.c", "domain.c", "idc.c",
"waitset.c", "event_queue.c", "event_mutex.c",
- "idc_export.c", "nameservice_client.c", "msgbuf.c",
+ "idc_export.c", "nameservice_client.c", "msgbuf.c", "proc_mgmt_client.c",
"monitor_client.c", "flounder_support.c", "flounder_glue_binding.c",
"flounder_txqueue.c","morecore.c", "debug.c", "heap.c",
"ram_alloc.c", "terminal.c", "spawn_client.c", "vspace/vspace.c",
++ common_srcs ++ idc_srcs,
assemblyFiles = arch_assembly (archFamily arch),
flounderBindings = [ "mem", "octopus", "interdisp", "spawn",
- "terminal", "arrakis", "terminal_config" ],
+ "proc_mgmt", "terminal", "arrakis",
+ "terminal_config" ],
-- only makes sense to compile monitor binding for lmp
flounderTHCStubs = [ "octopus" ],
flounderExtraBindings = [ ("monitor", ["lmp"]),
("mem", ["rpcclient"]),
("octopus", ["rpcclient"]),
("spawn", ["rpcclient"]),
+ ("proc_mgmt", ["rpcclient"]),
("arrakis", ["rpcclient"])],
addCFlags = [ "-DMORECORE_PAGESIZE="++(morecore_pagesize arch) ],
addIncludes = [ "include", "include" </> arch_dir, (arch_include arch) ],
assemblyFiles = arch_assembly (archFamily arch),
addCFlags = [ "-DARRAKIS", "-DMORECORE_PAGESIZE="++(morecore_pagesize arch) ],
flounderBindings = [ "mem", "octopus", "interdisp", "spawn", "arrakis",
- "terminal", "terminal_config", "terminal_session" ],
+ "proc_mgmt", "terminal", "terminal_config",
+ "terminal_session" ],
-- only makes sense to compile monitor binding for lmp
flounderTHCStubs = [ "octopus" ],
flounderExtraBindings = [ ("monitor", ["lmp"]),
("mem", ["rpcclient"]),
("octopus", ["rpcclient"]),
("spawn", ["rpcclient"]),
+ ("proc_mgmt", ["rpcclient"]),
("arrakis", ["rpcclient"])],
addIncludes = [ "include", "include" </> arch_dir, (arch_include arch) ],
addGeneratedDependencies = [ "/include/asmoffsets.h" ]
.slot = TASKCN_SLOT_SESSIONID
};
+/// Process manager cap, allows creating domains.
+struct capref cap_procmng = {
+ .cnode = TASK_CNODE_INIT,
+ .slot = TASKCN_SLOT_PROC_MNG
+};
+
+/// Domain ID cap.
+struct capref cap_domainid = {
+ .cnode = TASK_CNODE_INIT,
+ .slot = TASKCN_SLOT_DOMAINID
+};
+
/// Root PML4 VNode
struct capref cap_vroot = {
.cnode = PAGE_CNODE_INIT,
#include <barrelfish/barrelfish.h>
#include <barrelfish/caddr.h>
#include <barrelfish/debug.h>
+#include <barrelfish/monitor_client.h>
#include <barrelfish/sys_debug.h>
#include <barrelfish/dispatch.h>
-#include <if/monitor_blocking_defs.h>
#include <stdarg.h>
#include <stdlib.h>
#include <string.h>
errval_t debug_cap_identify(struct capref cap, struct capability *ret)
{
- errval_t err, msgerr;
-
if (get_cap_addr(cap) == 0) {
return SYS_ERR_CAP_NOT_FOUND;
}
uint8_t level = get_cap_level(cap);
capaddr_t caddr = get_cap_addr(cap);
- err = invoke_kernel_identify_cap(caddr, level, ret);
+ errval_t err = invoke_kernel_identify_cap(caddr, level, ret);
if (err_is_ok(err)) {
// we have kernel cap, return result;
return SYS_ERR_OK;
}
- // Direct invocation failed, try via monitor
- union {
- monitor_blocking_caprep_t caprep;
- struct capability capability;
- } u;
-
- struct monitor_blocking_binding *r = get_monitor_blocking_binding();
- if (!r) {
- return LIB_ERR_MONITOR_RPC_NULL;
- }
- err = r->rpc_tx_vtbl.cap_identify(r, cap, &msgerr, &u.caprep);
- if (err_is_fail(err)){
- return err;
- } else if (err_is_fail(msgerr)) {
- return msgerr;
- }
-
- assert(ret != NULL);
- *ret = u.capability;
-
- return msgerr;
+ return monitor_cap_identify_remote(cap, ret);
}
/**
/**
* \brief Function to do the actual printing based on the type of capability
*/
-STATIC_ASSERT(48 == ObjType_Num, "Knowledge of all cap types");
+STATIC_ASSERT(50 == ObjType_Num, "Knowledge of all cap types");
int debug_print_cap(char *buf, size_t len, struct capability *cap)
{
switch (cap->type) {
" core_local_id 0x%" PRIx32 ")", cap->u.id.coreid,
cap->u.id.core_local_id);
+ case ObjType_ProcessManager:
+ return snprintf(buf, len, "Process manager capability");
+
+ case ObjType_Domain:
+ return snprintf(buf, len, "Domain capability (coreid 0x%" PRIxCOREID
+ " core_local_id 0x%" PRIx32 ")", cap->u.domain.coreid,
+ cap->u.domain.core_local_id);
+
case ObjType_PerfMon:
return snprintf(buf, len, "PerfMon cap");
* Attn: Systems Group.
*/
+#include <limits.h>
#include <stdio.h>
#include <barrelfish/barrelfish.h>
#include <barrelfish/curdispatcher_arch.h>
#include <barrelfish/dispatcher_arch.h>
+#include <barrelfish/monitor_client.h>
#include <barrelfish/waitset_chan.h>
#include <barrelfish_kpi/domain_params.h>
#include <arch/registers.h>
size_t pagesize; ///< the pagesize to be used for the heap
};
-///< Struct for spanning domains state machine
-struct span_domain_state {
- struct thread *thread; ///< Thread to run on remote core
- uint8_t core_id; ///< Id of the remote core
- errval_t err; ///< To propagate error value
- domain_spanned_callback_t callback; ///< Callback for when domain has spanned
- void *callback_arg; ///< Optional argument to pass with callback
- struct capref frame; ///< Dispatcher frame
- struct capref vroot; ///< VRoot cap
- struct event_queue_node event_qnode; ///< Event queue node
- struct waitset_chanstate initev; ///< Dispatcher initialized event
- bool initialized; ///< True if remote initialized
-};
-
///< Array of all interdisp IREFs in the domain
static iref_t allirefs[MAX_CPUS];
st->default_waitset_handler = thread_create(span_slave_thread, NULL);
assert(st->default_waitset_handler != NULL);
-
-
return interdisp_msg_handler(&st->interdisp_ws);
}
} else { /* Use debug_err if no callback registered */
DEBUG_ERR(msgerr, "Failure in span_domain_reply");
}
- free(span_domain_state);
}
static void span_domain_request_sender(void *arg)
*/
static errval_t domain_new_dispatcher_varstack(coreid_t core_id,
domain_spanned_callback_t callback,
- void *callback_arg, size_t stack_size)
+ void *callback_arg, size_t stack_size,
+ struct span_domain_state **ret_span_state)
{
assert(core_id != disp_get_core_id());
errval_t err;
struct domain_state *domain_state = get_domain_state();
- struct monitor_binding *mb = get_monitor_binding();
assert(domain_state != NULL);
- /* Set reply handler */
- mb->rx_vtbl.span_domain_reply = span_domain_reply;
-
while(domain_state->iref == 0) { /* If not initialized, wait */
messages_wait_and_handle_next();
}
assert(domain_state->default_waitset_handler != NULL);
}
#endif
+
+ if (ret_span_state != NULL) {
+ *ret_span_state = span_domain_state;
+ }
+
+ return SYS_ERR_OK;
+}
+
+/**
+ * \brief Creates a dispatcher on a remote core
+ *
+ * \param core_id Id of the core to create the dispatcher on
+ * \param callback Callback to use when new dispatcher is created
+ *
+ * The new dispatcher is created with the same vroot, sharing the same vspace.
+ * The new dispatcher also has a urpc connection to the core that created it.
+ */
+errval_t domain_new_dispatcher(coreid_t core_id,
+ domain_spanned_callback_t callback,
+ void *callback_arg)
+{
+ struct span_domain_state *span_domain_state;
+ errval_t err = domain_new_dispatcher_varstack(core_id,
+ callback, callback_arg,
+ THREADS_DEFAULT_STACK_BYTES,
+ &span_domain_state);
+ if (err_is_fail(err)) {
+ return err;
+ }
+
/* Wait to use the monitor binding */
struct monitor_binding *mcb = get_monitor_binding();
+ /* Set reply handler */
+ mcb->rx_vtbl.span_domain_reply = span_domain_reply;
event_mutex_enqueue_lock(&mcb->mutex, &span_domain_state->event_qnode,
(struct event_closure) {
.handler = span_domain_request_sender_wrapper,
event_dispatch(get_default_waitset());
}
- /* Free state */
free(span_domain_state);
return SYS_ERR_OK;
}
/**
- * \brief Creates a dispatcher on a remote core
+ * \brief Creates a dispatcher for a remote core, without running it.
*
* \param core_id Id of the core to create the dispatcher on
- * \param callback Callback to use when new dispatcher is created
+ * \param ret_state If non-null, will contain the spanned domain state, which
+ * can be used to retrieve the vroot and dispframe, as well as
+ * to check when the new dispatcher is up
*
* The new dispatcher is created with the same vroot, sharing the same vspace.
* The new dispatcher also has a urpc connection to the core that created it.
*/
-errval_t domain_new_dispatcher(coreid_t core_id,
- domain_spanned_callback_t callback,
- void *callback_arg)
+errval_t domain_new_dispatcher_setup_only(coreid_t core_id,
+ struct span_domain_state **ret_state)
{
- return domain_new_dispatcher_varstack(core_id, callback, callback_arg,
- THREADS_DEFAULT_STACK_BYTES);
+ assert(ret_state != NULL);
+ return domain_new_dispatcher_varstack(core_id, NULL, NULL,
+ THREADS_DEFAULT_STACK_BYTES,
+ ret_state);
}
errval_t domain_send_cap(coreid_t core_id, struct capref cap)
assert(core < MAX_CPUS);
disp->core_state.c.spawn_bindings[core] = c;
}
+/**
+ * \brief Returns a pointer to the proc_mgmt rpc client on the dispatcher priv
+ */
+struct proc_mgmt_binding *get_proc_mgmt_binding(void)
+{
+ dispatcher_handle_t handle = curdispatcher();
+ struct dispatcher_generic* disp = get_dispatcher_generic(handle);
+ return disp->core_state.c.proc_mgmt_binding;
+}
+
+/**
+ * \brief Sets the prog_mgmt rpc client on the dispatcher priv
+ */
+void set_proc_mgmt_binding(struct proc_mgmt_binding *c)
+{
+ dispatcher_handle_t handle = curdispatcher();
+ struct dispatcher_generic* disp = get_dispatcher_generic(handle);
+ disp->core_state.c.proc_mgmt_binding = c;
+}
struct arrakis_binding *get_arrakis_binding(coreid_t core)
{
struct dispatcher_generic* disp = get_dispatcher_generic(handle);
return &disp->core_state.c.slot_alloc_state;
}
+
+/**
+ * \brief Returns a 64-bit hash code for a given domain cap.
+ */
+errval_t domain_cap_hash(struct capref domain_cap, uint64_t *ret_hash)
+{
+ assert(ret_hash != NULL);
+
+ struct capability ret_cap;
+ errval_t err = monitor_cap_identify_remote(domain_cap, &ret_cap);
+ if (err_is_fail(err)) {
+ return err_push(err, PROC_MGMT_ERR_DOMAIN_CAP_HASH);
+ }
+ if (ret_cap.type != ObjType_Domain) {
+ return PROC_MGMT_ERR_DOMAIN_CAP_HASH;
+ }
+
+ static uint64_t base = 1 + (uint64_t) MAX_DOMAINID;
+ *ret_hash = base * ret_cap.u.domain.core_local_id + ret_cap.u.domain.coreid;
+
+ return SYS_ERR_OK;
+}
\ No newline at end of file
#include <barrelfish/morecore.h>
#include <barrelfish/monitor_client.h>
#include <barrelfish/nameservice_client.h>
+#include <barrelfish/proc_mgmt_client.h>
#include <barrelfish/spawn_client.h>
#include <barrelfish/systime.h>
#include <barrelfish_kpi/domain_params.h>
// XXX: Leak all other domain allocations
} else {
- err = spawn_exit(status);
- if(err_is_fail(err)) {
- DEBUG_ERR(err, "spawn_exit");
+ err = proc_mgmt_exit(status);
+ if (err_is_fail(err)) {
+ // Maybe we have not been spawned through the process manager, but
+ // through spawnd directly (we're some bootstrap domain).
+ err = spawn_exit(status);
+ if (err_is_fail(err)) {
+ DEBUG_ERR(err, "spawn_exit");
+ }
}
}
return err;
}
+
+/**
+ * \brief Ask the monitor to remotely identify the given cap.
+ */
+errval_t monitor_cap_identify_remote(struct capref cap, struct capability *ret)
+{
+ errval_t err, msgerr;
+
+ union {
+ monitor_blocking_caprep_t caprep;
+ struct capability capability;
+ } u;
+
+ struct monitor_blocking_binding *r = get_monitor_blocking_binding();
+ if (!r) {
+ return LIB_ERR_MONITOR_RPC_NULL;
+ }
+ err = r->rpc_tx_vtbl.cap_identify(r, cap, &msgerr, &u.caprep);
+ if (err_is_fail(err)){
+ return err;
+ } else if (err_is_fail(msgerr)) {
+ return msgerr;
+ }
+
+ assert(ret != NULL);
+ *ret = u.capability;
+
+ return msgerr;
+}
\ No newline at end of file
--- /dev/null
+/**
+ * \file
+ * \brief Client for interacting with the process management server.
+ */
+
+/*
+ * Copyright (c) 2017, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#include <barrelfish/barrelfish.h>
+#include <barrelfish/nameservice_client.h>
+#include <barrelfish/proc_mgmt_client.h>
+#include <if/octopus_defs.h>
+#include <if/proc_mgmt_defs.h>
+#include <vfs/vfs_path.h>
+
+struct proc_mgmt_bind_retst {
+ errval_t err;
+ struct proc_mgmt_binding *b;
+ bool present;
+};
+
+extern char **environ;
+
+static void error_handler(struct proc_mgmt_binding *b, errval_t err)
+{
+#if defined(__x86_64__) || defined(__i386__)
+ debug_printf("%p %p %p %p\n",
+ __builtin_return_address(0),
+ __builtin_return_address(1),
+ __builtin_return_address(2),
+ __builtin_return_address(3));
+#endif
+ debug_err(__FILE__, __func__, __LINE__, err,
+ "asynchronous error in proc_mgmt binding");
+ abort();
+}
+
+static void proc_mgmt_bind_cont(void *st, errval_t err,
+ struct proc_mgmt_binding *b)
+{
+ struct proc_mgmt_bind_retst *retst = (struct proc_mgmt_bind_retst*) st;
+ assert(retst != NULL);
+ assert(!retst->present);
+ retst->err = err;
+ retst->b = b;
+ retst->present = true;
+}
+
+static void proc_mgmt_accept_recv_handler(void *arg)
+{
+ struct proc_mgmt_lmp_binding *b = arg;
+ struct lmp_recv_msg msg = LMP_RECV_MSG_INIT;
+ struct capref cap;
+ errval_t err;
+
+ // try to retrieve a message from the channel
+ err = lmp_chan_recv(&b->chan, &msg, &cap);
+ if (err_is_fail(err)) {
+ if (err_no(err) == LIB_ERR_NO_LMP_MSG) {
+ // nothing there, re-register
+ struct event_closure recv_handler = {
+ .handler = proc_mgmt_accept_recv_handler,
+ .arg = b,
+ };
+ err = lmp_chan_register_recv(&b->chan, b->b.waitset, recv_handler);
+ b->b.error_handler(&b->b, err_push(err, LIB_ERR_CHAN_REGISTER_RECV));
+ } else {
+ // real error, report to user
+ b->b.error_handler(&b->b, err_push(err, LIB_ERR_LMP_CHAN_RECV));
+ }
+ return;
+ }
+
+ // TODO(razvan): LMP_PROC_MGMT_ACCEPT ?
+ assert(b->chan.connstate == LMP_MONITOR_ACCEPT);
+ assert(!capref_is_null(cap));
+ b->chan.remote_cap = cap;
+ b->chan.connstate = LMP_CONNECTED;
+
+ /* allocate a new receive slot */
+ err = lmp_chan_alloc_recv_slot(&b->chan);
+ if (err_is_fail(err)) {
+ // XXX: report the error, but continue
+ b->b.error_handler(&b->b, err_push(err, LIB_ERR_LMP_ALLOC_RECV_SLOT));
+ }
+
+ /* Run the RX handler; has a side-effect of registering for receive events */
+ proc_mgmt_lmp_rx_handler(b);
+}
+
+static errval_t init_lmp_binding(struct proc_mgmt_lmp_binding *lmpb,
+ struct waitset *ws,
+ size_t buflen_words)
+{
+ errval_t err;
+
+ proc_mgmt_lmp_init(lmpb, ws);
+
+ /* allocate a cap slot for the new endpoint cap */
+ err = slot_alloc(&lmpb->chan.local_cap);
+ if (err_is_fail(err)) {
+ return err_push(err, LIB_ERR_SLOT_ALLOC);
+ }
+
+ /* allocate a local endpoint */
+ err = lmp_endpoint_create_in_slot(buflen_words, lmpb->chan.local_cap,
+ &lmpb->chan.endpoint);
+ if (err_is_fail(err)) {
+ // TODO(razvan): Free cap slot.
+ return err_push(err, LIB_ERR_ENDPOINT_CREATE);
+ }
+
+ /* allocate an initial receive slot */
+ err = lmp_chan_alloc_recv_slot(&lmpb->chan);
+ if (err_is_fail(err)) {
+ return err;
+ }
+
+ /* setup error handler */
+ lmpb->b.error_handler = error_handler;
+
+ /* setup initial receive handlers */
+ // TODO(razvan): Don't think this is needed, but dunno for sure yet.
+ // lmpb->b.rx_vtbl = monitor_rx_vtbl;
+
+ // connect handlers
+ lmpb->b.change_waitset(&lmpb->b, lmpb->b.waitset);
+ return SYS_ERR_OK;
+}
+
+/**
+ * \brief Accept a new LMP binding to a proc mgmt client.
+ *
+ * Should only be used in the process manager.
+ *
+ * \param lmpb Storage for binding state
+ * \param ws Waitset for handling incoming messages
+ * \param buflen_words Size of incoming buffer, in number of words
+ */
+errval_t proc_mgmt_client_lmp_accept(struct proc_mgmt_lmp_binding *lmpb,
+ struct waitset *ws,
+ size_t lmp_buflen_words)
+{
+ errval_t err = init_lmp_binding(lmpb, ws, lmp_buflen_words);
+ if (err_is_fail(err)) {
+ return err;
+ }
+
+ lmpb->chan.connstate = LMP_MONITOR_ACCEPT; // TODO(razvan): LMP_PROC_MGMT_ACCEPT?
+ lmpb->chan.remote_cap = NULL_CAP; // will be sent to us by the client
+
+ /* Register for receive notification on our special handler */
+ struct event_closure receive_handler = {
+ .handler = proc_mgmt_accept_recv_handler,
+ .arg = lmpb,
+ };
+ err = lmp_chan_register_recv(&lmpb->chan, ws, receive_handler);
+ if (err_is_fail(err)) {
+ return err; // TODO(razvan): cleanup?
+ }
+
+ return SYS_ERR_OK;
+}
+
+
+/**
+ * \brief Initiate a new LMP binding to the process manager
+ *
+ * To be used by the monitor for setting up the privileged channel used for
+ * spawnd discovery.
+ * Requires an explicit remote endpoint cap allocated by the process manager.
+ *
+ * \param lmpb Storage for binding state
+ * \param ep Remote endpoint of the process manager
+ * \param ws Waitset for handling incoming messages
+ * \param cont Continuation for when binding completes or fails
+ * \param st State passed to continuation function
+ * \param buflen_words Size of incoming buffer, in number of words
+ */
+errval_t proc_mgmt_client_lmp_bind(struct proc_mgmt_lmp_binding *lmpb,
+ struct capref ep,
+ proc_mgmt_bind_continuation_fn *cont,
+ void *st,
+ struct waitset *ws,
+ size_t lmp_buflen_words)
+{
+ errval_t err = init_lmp_binding(lmpb, ws, lmp_buflen_words);
+ if (err_is_fail(err)) {
+ return err;
+ }
+
+ lmpb->chan.remote_cap = ep;
+
+ // Send the local endpoint cap to the process manager.
+ lmpb->chan.connstate = LMP_CONNECTED; /* pre-established */
+ err = lmp_chan_send0(&lmpb->chan, 0, lmpb->chan.local_cap);
+ if (err_is_fail(err)) {
+ // TODO(razvan): This, below.
+ /* XXX: I'm lazily assuming this can never fail with a transient error,
+ * since we only do it once at dispatcher startup. If not, we need to
+ * register and retry here */
+ assert(!lmp_err_is_transient(err));
+ return err;
+ }
+
+ /* Run the RX handler; has a side-effect of registering for receive events */
+ proc_mgmt_lmp_rx_handler(lmpb);
+
+ /* Run the continuation */
+ cont(st, SYS_ERR_OK, &lmpb->b);
+
+ return SYS_ERR_OK;
+}
+
+errval_t proc_mgmt_bind_client(void)
+{
+ struct proc_mgmt_binding *b = get_proc_mgmt_binding();
+ if (b != NULL) {
+ return SYS_ERR_OK;
+ }
+
+ errval_t err;
+ iref_t iref;
+ // Try using nameserver to retrievew the proc mgmt iref.
+ err = nameservice_blocking_lookup("proc_mgmt", &iref);
+ if (err_is_fail(err)) {
+ return err;
+ }
+
+ // Initiate bind.
+ struct proc_mgmt_bind_retst bindst = {
+ .present = false
+ };
+
+ err = proc_mgmt_bind(iref, proc_mgmt_bind_cont, &bindst,
+ get_default_waitset(), /*IDC_BIND_FLAG_RPC_CAP_TRANSFER*/IDC_BIND_FLAGS_DEFAULT);
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "proc_mgmt_bind");
+ }
+
+ // Wait for bind completion.
+ while (!bindst.present) {
+ messages_wait_and_handle_next();
+ }
+
+ if (err_is_fail(bindst.err)) {
+ return bindst.err;
+ }
+
+ proc_mgmt_rpc_client_init(bindst.b);
+
+ set_proc_mgmt_binding(bindst.b);
+
+ return SYS_ERR_OK;
+}
+
+errval_t proc_mgmt_add_spawnd(iref_t iref, coreid_t core_id)
+{
+ errval_t err = proc_mgmt_bind_client();
+ if (err_is_fail(err)) {
+ DEBUG_ERR(err, "proc_mgmt_bind_client");
+ return err;
+ }
+
+ struct proc_mgmt_binding *b = get_proc_mgmt_binding();
+ assert(b != NULL);
+
+ err = b->tx_vtbl.add_spawnd(b, NOP_CONT, core_id, iref);
+ if (err_is_fail(err)) {
+ DEBUG_ERR(err, "add_spawnd");
+ }
+
+ return err;
+}
+
+/**
+ * \brief Request the process manager to spawn a program on a specific core
+ *
+ * \param coreid Core ID on which to spawn the program
+ * \param path Absolute path in the file system to an executable
+ * image suitable for the given core
+ * \param argv Command-line arguments, NULL-terminated
+ * \param envp Optional environment, NULL-terminated
+ * (pass NULL to inherit)
+ * \param inheritcn_cap Cap to a CNode containing capabilities to be inherited
+ * \param argcn_cap Cap to a CNode containing capabilities passed as
+ * arguments
+ * \param flags Flags to spawn
+ * \param ret_domain_cap If non-NULL, filled in with domain cap of new domain
+ *
+ * \bug flags are currently ignored
+ */
+errval_t proc_mgmt_spawn_program_with_caps(coreid_t core_id, const char *path,
+ char *const argv[],
+ char *const envp[],
+ struct capref inheritcn_cap,
+ struct capref argcn_cap,
+ uint8_t flags,
+ struct capref *ret_domain_cap)
+{
+ errval_t err, msgerr;
+
+ // default to copying our environment
+ if (envp == NULL) {
+ envp = environ;
+ }
+
+ err = proc_mgmt_bind_client();
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "proc_mgmt_bind_client");
+ }
+
+ struct proc_mgmt_binding *b = get_proc_mgmt_binding();
+ assert(b != NULL);
+
+ // construct argument "string"
+ // \0-separated strings in contiguous character buffer
+ // this is needed, as flounder can't send variable-length arrays of strings
+ size_t argstrlen = 0;
+ for (int i = 0; argv[i] != NULL; i++) {
+ argstrlen += strlen(argv[i]) + 1;
+ }
+
+ char argstr[argstrlen];
+ size_t argstrpos = 0;
+ for (int i = 0; argv[i] != NULL; i++) {
+ strcpy(&argstr[argstrpos], argv[i]);
+ argstrpos += strlen(argv[i]);
+ argstr[argstrpos++] = '\0';
+ }
+ assert(argstrpos == argstrlen);
+
+ // repeat for environment
+ size_t envstrlen = 0;
+ for (int i = 0; envp[i] != NULL; i++) {
+ envstrlen += strlen(envp[i]) + 1;
+ }
+
+ char envstr[envstrlen];
+ size_t envstrpos = 0;
+ for (int i = 0; envp[i] != NULL; i++) {
+ strcpy(&envstr[envstrpos], envp[i]);
+ envstrpos += strlen(envp[i]);
+ envstr[envstrpos++] = '\0';
+ }
+ assert(envstrpos == envstrlen);
+
+ // make an unqualified path absolute using the $PATH variable
+ // TODO: implement search (currently assumes PATH is a single directory)
+ char *searchpath = getenv("PATH");
+ if (searchpath == NULL) {
+ searchpath = VFS_PATH_SEP_STR; // XXX: just put it in the root
+ }
+ size_t buflen = strlen(path) + strlen(searchpath) + 2;
+ char pathbuf[buflen];
+ if (path[0] != VFS_PATH_SEP) {
+ snprintf(pathbuf, buflen, "%s%c%s", searchpath, VFS_PATH_SEP, path);
+ pathbuf[buflen - 1] = '\0';
+ //vfs_path_normalise(pathbuf);
+ path = pathbuf;
+ }
+
+ struct capref domain_cap;
+
+ if (capref_is_null(inheritcn_cap) && capref_is_null(argcn_cap)) {
+ err = b->rpc_tx_vtbl.spawn(b, core_id, path, argstr, argstrlen, envstr,
+ envstrlen, flags, &msgerr, &domain_cap);
+ } else {
+ err = b->rpc_tx_vtbl.spawn_with_caps(b, core_id, path, argstr,
+ argstrlen, envstr, envstrlen,
+ inheritcn_cap, argcn_cap, flags,
+ &msgerr, &domain_cap);
+ }
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "error sending spawn request to process manager");
+ } else if (err_is_fail(msgerr)) {
+ goto out;
+ }
+
+ if (ret_domain_cap != NULL) {
+ *ret_domain_cap = domain_cap;
+ }
+
+out:
+ return msgerr;
+
+}
+
+/**
+ * \brief Request the process manager to spawn a program on a specific core
+ *
+ * \param coreid Core ID on which to spawn the program
+ * \param path Absolute path in the file system to an executable
+ * image suitable for the given core
+ * \param argv Command-line arguments, NULL-terminated
+ * \param envp Optional environment, NULL-terminated
+ * (pass NULL to inherit)
+ * \param inheritcn_cap Cap to a CNode containing capabilities to be inherited
+ * \param argcn_cap Cap to a CNode containing capabilities passed as
+ * arguments
+ * \param flags Flags to spawn
+ * \param ret_domain_cap If non-NULL, filled in with domain cap of new domain
+ *
+ * \bug flags are currently ignored
+ */
+errval_t proc_mgmt_spawn_program(coreid_t core_id, const char *path,
+ char *const argv[], char *const envp[],
+ uint8_t flags, struct capref *ret_domain_cap)
+{
+ return proc_mgmt_spawn_program_with_caps(core_id, path, argv, envp,
+ NULL_CAP, NULL_CAP, flags,
+ ret_domain_cap);
+}
+
+/**
+ * \brief Request the process manager to span onto a new core.
+ *
+ * \param core_id ID of core to span onto.
+ *
+ * Blocks until the new dispatcher has established an interdispatcher connection
+ * to the current one.
+ */
+errval_t proc_mgmt_span(coreid_t core_id)
+{
+ coreid_t my_core_id = disp_get_core_id();
+ assert (core_id != my_core_id);
+
+ errval_t err, msgerr;
+ err = proc_mgmt_bind_client();
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "proc_mgmt_bind_client");
+ }
+
+ struct span_domain_state *st;
+ err = domain_new_dispatcher_setup_only(core_id, &st);
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "failed to setup new dispatcher");
+ }
+
+ struct proc_mgmt_binding *b = get_proc_mgmt_binding();
+ assert(b != NULL);
+
+ err = b->rpc_tx_vtbl.span(b, cap_domainid, core_id, st->vroot, st->frame,
+ &msgerr);
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "error sending span request to process manager");
+ }
+
+ if (err_is_fail(msgerr)) {
+ return msgerr;
+ }
+
+ while(!st->initialized) {
+ event_dispatch(get_default_waitset());
+ }
+ free(st);
+
+ return SYS_ERR_OK;
+}
+
+/**
+ * \brief Request the process manager to kill a domain
+ *
+ * \param domain_cap Domain ID cap for the victim
+ */
+errval_t proc_mgmt_kill(struct capref domain_cap)
+{
+ errval_t err, msgerr;
+ err = proc_mgmt_bind_client();
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "proc_mgmt_bind_client");
+ }
+
+ struct proc_mgmt_binding *b = get_proc_mgmt_binding();
+ assert(b != NULL);
+
+ err = b->rpc_tx_vtbl.kill(b, domain_cap, &msgerr);
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "error sending kill request to process manager");
+ }
+
+ return msgerr;
+}
+
+/**
+ * \brief Inform the process manager about exiting execution.
+ */
+errval_t proc_mgmt_exit(uint8_t status)
+{
+ errval_t err = proc_mgmt_bind_client();
+ if (err_is_fail(err)) {
+ return err;
+ }
+
+ struct proc_mgmt_binding *b = get_proc_mgmt_binding();
+ assert(b != NULL);
+
+ err = b->tx_vtbl.exit(b, NOP_CONT, cap_domainid, status);
+ if (err_is_fail(err)) {
+ return err;
+ }
+
+ return SYS_ERR_OK;
+}
+
+errval_t proc_mgmt_wait(struct capref domain_cap, uint8_t *status)
+{
+ errval_t err, msgerr;
+ err = proc_mgmt_bind_client();
+ if (err_is_fail(err)) {
+ return err;
+ }
+
+ struct proc_mgmt_binding *b = get_proc_mgmt_binding();
+ assert(b != NULL);
+
+ err = b->rpc_tx_vtbl.wait(b, domain_cap, &msgerr, status);
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "error sending wait request to process manager");
+ }
+
+ return msgerr;
+}
return err;
}
+errval_t spawn_bind_iref(iref_t iref, struct spawn_binding **ret_client)
+{
+ assert(ret_client != NULL);
+
+ struct spawn_bind_retst bindst = { .present = false };
+ errval_t err = spawn_bind(iref, spawn_bind_cont, &bindst,
+ get_default_waitset(), IDC_BIND_FLAGS_DEFAULT);
+ if (err_is_fail(err)) {
+ DEBUG_ERR(err, "spawn_bind failed");
+ return err;
+ }
+
+ // XXX: block for bind completion
+ while (!bindst.present) {
+ messages_wait_and_handle_next();
+ }
+
+ if (err_is_fail(bindst.err)) {
+ return bindst.err;
+ }
+
+ spawn_rpc_client_init(bindst.b);
+ *ret_client = bindst.b;
+ // set_spawn_binding(coreid, bindst.b);
+
+ return err;
+}
/**
* \brief Request the spawn daemon on a specific core to spawn a program
errval_t err;
size_t blocksize = sizeof(struct thread) + tls_block_total_len;
- err = vspace_mmu_aware_map(&thread_slabs_vm, blocksize,
+ err = vspace_mmu_aware_map(&thread_slabs_vm, 64 * blocksize,
&buf, &size);
if (err_is_fail(err)) {
if (err_no(err) == LIB_ERR_VSPACE_MMU_AWARE_NO_SPACE) {
mdb_dump(mdb_root, 0);
}
-STATIC_ASSERT(48 == ObjType_Num, "Knowledge of all cap types");
+STATIC_ASSERT(50 == ObjType_Num, "Knowledge of all cap types");
static void print_cte(struct cte *cte, char *indent_buff)
{
struct mdbnode *node = N(cte);
"init",
"skb",
"spawnd",
+ "proc_mgmt",
"startd",
"mem_serv",
"monitor",
m.add_module("monitor")
m.add_module("ramfsd", ["boot"])
m.add_module("skb", ["boot"])
+ m.add_module("proc_mgmt", ["boot"])
m.add_module("spawnd", ["boot"])
m.add_module("startd", ["boot"])
m.add_module("/eclipseclp_ramfs.cpio.gz", ["nospawn"])
--- /dev/null
+##########################################################################
+# Copyright (c) 2017, ETH Zurich.
+# All rights reserved.
+#
+# This file is distributed under the terms in the attached LICENSE file.
+# If you do not find this file, copies can be found by writing to:
+# ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+##########################################################################
+
+import datetime
+import tests
+from common import TestCommon
+from results import PassFailResult
+
+@tests.add_test
+class ProcMgmtTest(TestCommon):
+ '''Process management service API. Requires at least 2 cores.'''
+ name = "proc_mgmt_test"
+
+ def setup(self, build, machine, testdir):
+ super(ProcMgmtTest, self).setup(build, machine, testdir)
+ self.test_timeout_delta = datetime.timedelta(seconds=15*60)
+
+ def get_modules(self, build, machine):
+ modules = super(ProcMgmtTest, self).get_modules(build, machine)
+ n = 1
+ for i in range(n):
+ modules.add_module("proc_mgmt_test", ["core=3", str(i), str(n)])
+ # modules.add_module("proc_mgmt_test", ["core=3", "1"])
+ return modules
+
+ def get_finish_string(self):
+ return "TEST DONE"
+
+ def process_data(self, testdir, rawiter):
+ for line in rawiter:
+ if line.startswith("FAIL:"):
+ return PassFailResult(False)
+
+ return PassFailResult(True)
return err_push(err, INIT_ERR_COPY_IPI);
}
+ /* Give monitor the ProcessManager capability */
+ dest.cnode = si->taskcn;
+ dest.slot = TASKCN_SLOT_PROC_MNG;
+ src.cnode = cnode_task;
+ src.slot = TASKCN_SLOT_PROC_MNG;
+ err = cap_copy(dest, src);
+ if (err_is_fail(err)) {
+ return err_push(err, INIT_ERR_COPY_PROC_MNG_CAP);
+ }
+
/* Give monitor modulecn */
dest.cnode = si->rootcn;
dest.slot = ROOTCN_SLOT_MODULECN;
extern iref_t mem_serv_iref;
extern iref_t name_serv_iref;
extern iref_t ramfs_serv_iref;
+extern iref_t spawn_iref;
extern iref_t monitor_rpc_iref;
extern iref_t monitor_mem_iref;
extern coreid_t my_core_id;
#include <sys/param.h>
#include <monitor.h>
#include <barrelfish/dispatch.h>
+#include <barrelfish/proc_mgmt_client.h>
#include <trace/trace.h>
#include "send_cap.h"
#include "capops.h"
}
}
+static void add_spawnd(struct intermon_binding *b, iref_t iref)
+{
+ struct intermon_state *st = (struct intermon_state*) b->st;
+ errval_t err = proc_mgmt_add_spawnd(iref, st->core_id);
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "Sending proc_mgmt_add_spawnd request failed");
+ }
+}
+
static void trace_caps_request(struct intermon_binding *b)
{
errval_t err;
.span_domain_request = span_domain_request,
.span_domain_reply = span_domain_reply,
+ .add_spawnd = add_spawnd,
+
.rsrc_join = inter_rsrc_join,
.rsrc_join_complete = inter_rsrc_join_complete,
.rsrc_timer_sync = inter_rsrc_timer_sync,
#include <barrelfish/dispatch.h>
#include <barrelfish/deferred.h>
#include <barrelfish/domain.h>
+#include <barrelfish/proc_mgmt_client.h>
#include <trace/trace.h>
#ifdef __k1om__
extern char **environ;
#endif
-/* irefs for mem server name service and ramfs */
+/* irefs for mem server, name service, ramfs and spawnd*/
iref_t mem_serv_iref = 0;
iref_t ramfs_serv_iref = 0;
iref_t name_serv_iref = 0;
+iref_t spawn_iref = 0;
iref_t monitor_rpc_iref = 0;
// Capref to trace cap
return err;
}
+ // Spawn process manager, to be used by further domains.
+ set_proc_mgmt_binding(NULL);
+ err = spawn_domain("proc_mgmt");
+ if (err_is_fail(err)) {
+ DEBUG_ERR(err, "failed spawning proc_mgmt");
+ return err;
+ }
+ // XXX: Wait for proc_mgmt to initialize
+ while (get_proc_mgmt_binding() == NULL) {
+ messages_wait_and_handle_next();
+ }
+
/* Spawn boot domains in menu.lst */
err = spawn_all_domains();
if (err_is_fail(err)) {
return err;
}
+ // XXX: Wait for spawnd to initialize
+ while (spawn_iref == 0) {
+ messages_wait_and_handle_next();
+ }
+ // Now tell process manager about our new spawnd.
+ err = proc_mgmt_add_spawnd(spawn_iref, disp_get_core_id());
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "error sending spawnd iref to process manager");
+ }
+
return SYS_ERR_OK;
}
if (err_is_fail(err)) {
USER_PANIC_ERR(err, "error spawning spawnd");
}
+ // XXX: Wait for spawnd to initialize
+ while (spawn_iref == 0) {
+ messages_wait_and_handle_next();
+ }
+ // Use monitor.0 to tell the process manager about our new spawnd.
+ struct intermon_binding *ib0;
+ err = intermon_binding_get(0, &ib0);
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "error retrieving intermon_binding for monitor 0");
+ }
+ err = ib0->tx_vtbl.add_spawnd(ib0, NOP_CONT, spawn_iref);
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "error sending add_spawnd request to monitor 0");
+ }
/* Signal the monitor that booted us that we have initialized */
err = intermon_binding->tx_vtbl.monitor_initialized(intermon_binding, NOP_CONT);
#include <trace_definitions/trace_defs.h>
#include <if/mem_defs.h>
#include <barrelfish/monitor_client.h>
+#include <barrelfish/proc_mgmt_client.h>
#include <barrelfish/syscalls.h>
#include <barrelfish_kpi/distcaps.h>
#include <if/monitor_loopback_defs.h>
cslot_t slot;
};
+struct proc_mgmt_bind_st {
+ errval_t err;
+ bool present;
+};
+
static void ms_multiboot_cap_request(struct monitor_binding *b, cslot_t slot);
static void ms_multiboot_cap_request_handler(struct monitor_binding *b,
ramfs_serv_iref = iref;
}
+static void proc_mgmt_bind_cont(void *st,
+ errval_t err,
+ struct proc_mgmt_binding *b)
+{
+ struct proc_mgmt_bind_st* bind_st = (struct proc_mgmt_bind_st*) st;
+ assert(!bind_st->present);
+ bind_st->err = err;
+ bind_st->present = true;
+}
+
+static void set_proc_mgmt_ep_request(struct monitor_binding *b,
+ struct capref ep)
+{
+ // We got the endpoint which the process manager has allocated for us.
+ // Time to set up our part of the LMP connection and finish the handshake.
+ struct proc_mgmt_lmp_binding *lmpb =
+ malloc(sizeof(struct proc_mgmt_lmp_binding));
+ assert(lmpb != NULL);
+
+ set_proc_mgmt_binding(&lmpb->b);
+
+ struct proc_mgmt_bind_st bind_st = {
+ .present = false
+ };
+ errval_t err = proc_mgmt_client_lmp_bind(lmpb,
+ ep,
+ proc_mgmt_bind_cont,
+ &bind_st,
+ get_default_waitset(),
+ DEFAULT_LMP_BUF_WORDS);
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "proc_mgmt_client_lmp_bind");
+ }
+
+ // Dispatch events on the waitset until proc_mgmt binding completes.
+ while (!bind_st.present) {
+ err = event_dispatch(get_default_waitset());
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "monitor event dispatch");
+ }
+ }
+
+ if(err_is_fail(bind_st.err)) {
+ USER_PANIC_ERR(err, "during proc_mgmt bind initialization");
+ }
+
+ proc_mgmt_rpc_client_init(&lmpb->b);
+}
+
+static void set_spawn_iref_request(struct monitor_binding *b, iref_t iref)
+{
+ if (spawn_iref != 0) {
+ // Called multiple times, return error
+ DEBUG_ERR(0, "Attempt to reset spawn IREF ignored");
+ return;
+ }
+
+ spawn_iref = iref;
+}
+
struct send_cap_st {
struct intermon_msg_queue_elem qe; // must be first
uintptr_t my_mon_id;
.set_mem_iref_request = set_mem_iref_request,
.set_name_iref_request = set_name_iref_request,
.set_ramfs_iref_request = set_ramfs_iref_request,
+ .set_proc_mgmt_ep_request = set_proc_mgmt_ep_request,
+ .set_spawn_iref_request = set_spawn_iref_request,
.get_monitor_rpc_iref_request = get_monitor_rpc_iref_request,
.cap_send_request = cap_send_request,
}
}
+ if (!strcmp(name, "proc_mgmt")) {
+ // Pass ProcessManager cap.
+ dest.cnode = si->taskcn;
+ dest.slot = TASKCN_SLOT_PROC_MNG;
+ src = cap_procmng;
+ err = cap_copy(dest, src);
+ if (err_is_fail(err)) {
+ DEBUG_ERR(err, "Can not give ProcessManager cap");
+ return err_push(err, SPAWN_ERR_COPY_PROC_MNG_CAP);
+ }
+ }
+
#ifdef __k1om__
if (!strcmp(name, "xeon_phi")) {
dest.cnode = si->taskcn;
|| !strcmp(short_name, "monitor")
|| !strcmp(short_name, "mem_serv")
|| !strcmp(short_name, "xeon_phi")
+ || !strcmp(short_name, "proc_mgmt")
)
{
continue;
--- /dev/null
+--------------------------------------------------------------------------
+-- Copyright (c) 2017, ETH Zurich.
+-- All rights reserved.
+--
+-- This file is distributed under the terms in the attached LICENSE file.
+-- If you do not find this file, copies can be found by writing to:
+-- ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+--
+-- Hakefile for /usr/proc_mgmt
+--
+--------------------------------------------------------------------------
+
+[ build application { target = "proc_mgmt",
+ cFiles = [ "main.c", "service.c", "spawnd_state.c",
+ "domain.c" ],
+ addLibraries = libDeps [ "skb", "dist", "lwip",
+ "collections" ],
+ flounderDefs = [ "monitor", "monitor_blocking" ],
+ flounderExtraDefs = [ ("monitor_blocking",["rpcclient"]),
+ ("spawn",["rpcclient"]) ],
+ flounderBindings = [ "proc_mgmt" ],
+ architectures = [ "x86_64", "x86_32" ]
+ },
+ build application { target = "proc_mgmt",
+ cFiles = [ "main.c", "service.c", "spawnd_state.c",
+ "domain.c" ],
+ addLibraries = libDeps [ "skb", "dist", "lwip",
+ "collections" ],
+ flounderDefs = [ "monitor", "monitor_blocking"],
+ flounderExtraDefs = [ ("monitor_blocking",["rpcclient"]),
+ ("spawn",["rpcclient"]) ],
+ flounderBindings = [ "proc_mgmt" ],
+ architectures = [ "k1om" ]
+ },
+ build application { target = "proc_mgmt",
+ cFiles = [ "main.c", "service.c", "spawnd_state.c",
+ "domain.c" ],
+ addLibraries = libDeps [ "skb", "dist", "lwip",
+ "collections" ],
+ flounderDefs = [ "monitor", "monitor_blocking"],
+ flounderExtraDefs = [ ("monitor_blocking",["rpcclient"]),
+ ("spawn",["rpcclient"]) ],
+ flounderBindings = [ "proc_mgmt" ],
+ architectures = [ "armv7", "armv8" ]
+ }
+]
--- /dev/null
+/*
+ * \brief Domain internals for the process manager.
+ *
+ * Copyright (c) 2017, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#include <barrelfish/barrelfish.h>
+#include <collections/hash_table.h>
+#include <if/spawn_defs.h>
+
+#include "domain.h"
+#include "spawnd_state.h"
+
+#define HASH_INDEX_BUCKETS 6151
+static collections_hash_table* domain_table = NULL;
+
+#define DOMAIN_CAP_REFILL_COUNT L2_CNODE_SLOTS//1
+static struct domain_cap_node *domain_cap_list = NULL;
+static uint32_t free_domain_caps = 0;
+
+inline bool domain_should_refill_caps(void) {
+ return free_domain_caps == 0;
+}
+
+/**
+ * \brief Allocates a new L2 cnode and fills it with domain capabilities.
+ */
+errval_t domain_prealloc_caps(void)
+{
+ struct capref new_cnode_cap;
+ struct cnoderef new_cnode;
+ errval_t err = cnode_create_l2(&new_cnode_cap, &new_cnode);
+ if (err_is_fail(err)) {
+ DEBUG_ERR(err, "cnode_create_l2");
+ return err_push(err, PROC_MGMT_ERR_CREATE_DOMAIN_CAP);
+ }
+
+ struct capref cap_iter = {
+ .cnode = new_cnode,
+ .slot = 0
+ };
+ err = cap_retype(cap_iter, cap_procmng, 0, ObjType_Domain, 0,
+ DOMAIN_CAP_REFILL_COUNT);
+ if (err_is_fail(err)) {
+ DEBUG_ERR(err, "cap_retype");
+ return err_push(err, PROC_MGMT_ERR_CREATE_DOMAIN_CAP);
+ }
+
+ for (cap_iter.slot = 0; cap_iter.slot < DOMAIN_CAP_REFILL_COUNT; ++cap_iter.slot) {
+ struct domain_cap_node *node = (struct domain_cap_node*) malloc(
+ sizeof(struct domain_cap_node));
+ node->domain_cap = cap_iter;
+
+ err = domain_cap_hash(node->domain_cap, &node->hash);
+ if (err_is_fail(err)) {
+ DEBUG_ERR(err, "domain_cap_hash");
+ return err_push(err, PROC_MGMT_ERR_CREATE_DOMAIN_CAP);
+ }
+
+ node->next = domain_cap_list;
+ domain_cap_list = node;
+ ++free_domain_caps;
+ }
+
+ return SYS_ERR_OK;
+}
+
+/**
+ * \brief Returns the next node in the list of available domain caps.
+ */
+struct domain_cap_node *next_cap_node(void)
+{
+ assert(domain_cap_list != NULL);
+ assert(free_domain_caps > 0);
+
+ struct domain_cap_node *tmp = domain_cap_list;
+ domain_cap_list = domain_cap_list->next;
+ --free_domain_caps;
+
+ return tmp;
+}
+
+/**
+ * \brief Creates and returns a new domain entry.
+ *
+ * \param cap_node preallocated domain cap node.
+ * \param ret_entry returned domain entry, must be passed in non-NULL.
+ */
+errval_t domain_new(struct domain_cap_node *cap_node,
+ struct domain_entry **ret_entry)
+{
+ assert(ret_entry != NULL);
+
+ struct domain_entry *entry = (struct domain_entry*) malloc(
+ sizeof(struct domain_entry));
+ if (entry == NULL) {
+ return LIB_ERR_MALLOC_FAIL;
+ }
+
+ entry->cap_node = cap_node;
+ entry->status = DOMAIN_STATUS_NIL;
+ memset(entry->spawnds, 0, sizeof(entry->spawnds));
+ entry->num_spawnds_running = 0;
+ entry->num_spawnds_resources = 0;
+ entry->waiters = NULL;
+
+ if (domain_table == NULL) {
+ collections_hash_create_with_buckets(&domain_table, HASH_INDEX_BUCKETS,
+ NULL);
+ if (domain_table == NULL) {
+ return PROC_MGMT_ERR_CREATE_DOMAIN_TABLE;
+ }
+ }
+
+ collections_hash_insert(domain_table, cap_node->hash, entry);
+
+ *ret_entry = entry;
+
+ return SYS_ERR_OK;
+}
+
+/**
+ * \brief Returns the domain entry associated with the given domain cap.
+ *
+ * \param domain_cap identifying cap for which to look up the domain entry.
+ * \param returned domain entry, must be passed in non-NULL.
+ */
+errval_t domain_get_by_cap(struct capref domain_cap,
+ struct domain_entry **ret_entry)
+{
+ assert(ret_entry != NULL);
+
+ uint64_t key;
+ errval_t err = domain_cap_hash(domain_cap, &key);
+ if (err_is_fail(err)) {
+ return err;
+ }
+
+ void *table_entry = collections_hash_find(domain_table, key);
+ if (table_entry == NULL) {
+ return PROC_MGMT_ERR_DOMAIN_TABLE_FIND;
+ }
+ *ret_entry = (struct domain_entry*) table_entry;
+
+ return SYS_ERR_OK;
+}
+
+/**
+ * \brief Adds a new core to the list of cores where the given domain runs.
+ *
+ * \param entry domain entry to add a new core for.
+ * \param core_id new core running a dispatcher for the domain.
+ */
+void domain_run_on_core(struct domain_entry *entry, coreid_t core_id)
+{
+ assert(entry != NULL);
+ assert(core_id < MAX_COREID);
+ assert(entry->status == DOMAIN_STATUS_NIL ||
+ entry->status == DOMAIN_STATUS_RUNNING);
+
+ entry->status = DOMAIN_STATUS_RUNNING;
+
+ entry->spawnds[core_id] = spawnd_state_get(core_id);
+ ++entry->num_spawnds_running;
+ ++entry->num_spawnds_resources;
+}
+
+/**
+ * \brief Creates a new domain entry for the given cap node and core.
+ *
+ * \param cap_node preallocated capability node for the new domain.
+ * \param core_id core that runs the new domain.
+ */
+errval_t domain_spawn(struct domain_cap_node *cap_node, coreid_t core_id)
+{
+ struct domain_entry *entry = NULL;
+ errval_t err = domain_new(cap_node, &entry);
+ if (err_is_fail(err)) {
+ if (entry != NULL) {
+ free(entry);
+ }
+ return err;
+ }
+
+ domain_run_on_core(entry, core_id);
+
+ return SYS_ERR_OK;
+}
+
+/**
+ * \brief Marks that the domain identified by the given cap spans a new core.
+ *
+ * \param domain_cap identifying capability for the spanning domain.
+ * \param core_id new core which the domain spans.
+ */
+errval_t domain_span(struct capref domain_cap, coreid_t core_id)
+{
+ struct domain_entry *entry = NULL;
+ errval_t err = domain_get_by_cap(domain_cap, &entry);
+ if (err_is_fail(err)) {
+ return err;
+ }
+ assert(entry != NULL);
+
+ domain_run_on_core(entry, core_id);
+
+ return SYS_ERR_OK;
+}
--- /dev/null
+/*
+ * \brief Domain internals for the process manager.
+ *
+ * Copyright (c) 2017, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef PROC_MGMT_DOMAIN_H
+#define PROC_MGMT_DOMAIN_H
+
+#include <barrelfish/barrelfish.h>
+#include <if/proc_mgmt_defs.h>
+
+#include "spawnd_state.h"
+
+#define EXIT_STATUS_KILLED 9
+
+enum domain_status {
+ DOMAIN_STATUS_NIL,
+ DOMAIN_STATUS_RUNNING,
+ DOMAIN_STATUS_STOP_PEND,
+ DOMAIN_STATUS_STOPPED,
+ DOMAIN_STATUS_CLEANED
+};
+
+struct domain_waiter {
+ struct proc_mgmt_binding *b;
+ struct domain_waiter *next;
+};
+
+struct domain_cap_node {
+ struct capref domain_cap;
+ uint64_t hash;
+
+ struct domain_cap_node *next;
+};
+
+struct domain_entry {
+ struct domain_cap_node *cap_node;
+ enum domain_status status; // Current domain state.
+
+ struct spawnd_state *spawnds[MAX_COREID]; // Spawnds running this domain.
+ coreid_t num_spawnds_running;
+ coreid_t num_spawnds_resources;
+
+ struct domain_waiter *waiters; // Clients waiting after this domain.
+
+ uint8_t exit_status;
+};
+
+bool domain_should_refill_caps(void);
+errval_t domain_prealloc_caps(void);
+struct domain_cap_node *next_cap_node(void);
+
+errval_t domain_new(struct domain_cap_node *cap_node,
+ struct domain_entry **ret_entry);
+errval_t domain_get_by_cap(struct capref domain_cap,
+ struct domain_entry **ret_entry);
+void domain_run_on_core(struct domain_entry *entry, coreid_t core_id);
+
+errval_t domain_spawn(struct domain_cap_node *cap_node, coreid_t core_id);
+errval_t domain_can_span(struct capref domain_cap, coreid_t core_id);
+errval_t domain_span(struct capref domain_cap, coreid_t core_id);
+static inline void domain_stop_pending(struct domain_entry *entry)
+{
+ assert(entry != NULL);
+ entry->status = DOMAIN_STATUS_STOP_PEND;
+}
+
+#endif // PROC_MGMT_DOMAIN_H
\ No newline at end of file
--- /dev/null
+/**
+ * \file
+ * \brief Internal proc_mgmt functions.
+ */
+
+/*
+ * Copyright (c) 2017, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef PROC_MGMT_INTERNAL_H_
+#define PROC_MGMT_INTERNAL_H_
+
+#define SERVICE_BASENAME "proc_mgmt"
+
+extern coreid_t my_core_id;
+
+errval_t start_service(void);
+
+#endif //PROC_MGMT_INTERNAL_H_
--- /dev/null
+/**
+ * \file
+ * \brief Process management server.
+ */
+
+/*
+ * Copyright (c) 2017, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#include <barrelfish/barrelfish.h>
+
+#include "internal.h"
+
+coreid_t my_core_id;
+
+int main(int argc, const char *argv[])
+{
+ errval_t err;
+
+ err = start_service();
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "failed to start proc_mgmt service loop");
+ }
+
+ debug_printf("ready\n");
+
+ messages_handler_loop();
+}
--- /dev/null
+/*
+ * \brief Client handling internals for the process manager.
+ *
+ * Copyright (c) 2017, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef PENDING_CLIENTS_H
+#define PENDING_CLIENTS_H
+
+#include <barrelfish/barrelfish.h>
+#include <if/proc_mgmt_defs.h>
+#include <if/spawn_defs.h>
+
+#include "domain.h"
+
+#define HASH_INDEX_BUCKETS 6151
+
+enum ClientType {
+ ClientType_Spawn,
+ ClientType_SpawnWithCaps,
+ ClientType_Span,
+ ClientType_Kill,
+ ClientType_Exit,
+ ClientType_Cleanup
+};
+
+struct pending_spawn {
+ struct domain_cap_node *cap_node;
+
+ struct spawn_binding *b;
+ coreid_t core_id;
+
+ const char *path;
+
+ const char *argvbuf;
+ size_t argvbytes;
+ const char *envbuf;
+ size_t envbytes;
+
+ struct capref inheritcn_cap;
+ struct capref argcn_cap;
+
+ uint8_t flags;
+};
+
+struct pending_span {
+ struct capref domain_cap;
+ struct domain_entry *entry;
+
+ struct spawn_binding *b;
+
+ coreid_t core_id;
+ struct capref vroot;
+ struct capref dispframe;
+};
+
+struct pending_kill_cleanup {
+ struct capref domain_cap;
+ struct domain_entry *entry;
+ struct spawn_binding *b;
+};
+
+struct pending_client {
+ struct proc_mgmt_binding *b;
+ enum ClientType type;
+ void *st;
+};
+
+#endif // PENDING_CLIENTS_H
\ No newline at end of file
--- /dev/null
+/**
+ * \file
+ * \brief Process management service.
+ */
+
+/*
+ * Copyright (c) 2017, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#include <barrelfish/barrelfish.h>
+#include <barrelfish/nameservice_client.h>
+#include <barrelfish/proc_mgmt_client.h>
+#include <barrelfish/spawn_client.h>
+#include <if/monitor_defs.h>
+#include <if/proc_mgmt_defs.h>
+#include <if/spawn_defs.h>
+
+#include "domain.h"
+#include "internal.h"
+#include "pending_clients.h"
+#include "spawnd_state.h"
+
+/**
+ * \brief Handler for message add_spawnd, for the local monitor binding.
+ */
+static void add_spawnd_handler(struct proc_mgmt_binding *b, coreid_t core_id,
+ iref_t iref)
+{
+ if (spawnd_state_exists(core_id)) {
+ DEBUG_ERR(PROC_MGMT_ERR_SPAWND_EXISTS, "spawnd_state_exists");
+ return;
+ }
+
+ // Bind with the spawnd.
+ struct spawn_binding *spawnb;
+ errval_t err = spawn_bind_iref(iref, &spawnb);
+ if (err_is_fail(err)) {
+ DEBUG_ERR(err, "spawn_bind_iref");
+ return;
+ }
+
+ err = spawnd_state_alloc(core_id, spawnb);
+ if (err_is_fail(err)) {
+ DEBUG_ERR(err, "spawnd_state_alloc");
+ }
+
+ debug_printf("Process manager bound with spawnd.%u on iref %u\n", core_id,
+ iref);
+}
+
+/**
+ * \brief Handler for message add_spawnd, for non-monitor bindings.
+ */
+static void add_spawnd_handler_non_monitor(struct proc_mgmt_binding *b,
+ coreid_t core_id, iref_t iref)
+{
+ // debug_printf("Ignoring add_spawnd call: %s\n",
+ // err_getstring(PROC_MGMT_ERR_NOT_MONITOR));
+}
+
+static bool cleanup_request_sender(struct msg_queue_elem *m);
+
+/**
+ * General-purpose handler for replies from spawnd.
+ */
+static void spawn_reply_handler(struct spawn_binding *b, errval_t spawn_err)
+{
+ struct pending_client *cl =
+ (struct pending_client*) spawnd_state_dequeue_recv(b->st);
+
+ struct pending_spawn *spawn = NULL;
+ struct pending_span *span = NULL;
+ struct pending_kill_cleanup *kc = NULL;
+
+ struct domain_entry *entry;
+
+ errval_t err, resp_err;
+
+ switch (cl->type) {
+ case ClientType_Spawn:
+ case ClientType_SpawnWithCaps:
+ spawn = (struct pending_spawn*) cl->st;
+ err = spawn_err;
+ if (err_is_ok(spawn_err)) {
+ err = domain_spawn(spawn->cap_node, spawn->core_id);
+ if (cl->type == ClientType_Spawn) {
+ resp_err = cl->b->tx_vtbl.spawn_response(cl->b, NOP_CONT,
+ err, spawn->cap_node->domain_cap);
+ } else {
+ resp_err = cl->b->tx_vtbl.spawn_with_caps_response(cl->b,
+ NOP_CONT, err, spawn->cap_node->domain_cap);
+ }
+ }
+
+ free(spawn);
+ break;
+
+ case ClientType_Span:
+ span = (struct pending_span*) cl->st;
+ entry = span->entry;
+ if (entry->status == DOMAIN_STATUS_RUNNING) {
+ resp_err = cl->b->tx_vtbl.span_response(cl->b, NOP_CONT,
+ spawn_err);
+ }
+
+ free(span);
+ break;
+
+ case ClientType_Cleanup:
+ kc = (struct pending_kill_cleanup*) cl->st;
+ entry = kc->entry;
+
+ assert(entry->num_spawnds_resources > 0);
+ assert(entry->status != DOMAIN_STATUS_CLEANED);
+
+ --entry->num_spawnds_resources;
+ if (entry->num_spawnds_resources == 0) {
+ entry->status = DOMAIN_STATUS_CLEANED;
+
+ // At this point, the domain exists in state CLEANED for history
+ // reasons. For instance, if some other domain issues a wait
+ // call for this one, the process manager can return the exit
+ // status directly. At some point, however, we might want to
+ // just clean up the domain entry and recycle the domain cap.
+ }
+
+ free(kc);
+ break;
+
+ case ClientType_Kill:
+ case ClientType_Exit:
+ kc = (struct pending_kill_cleanup*) cl->st;
+ entry = kc->entry;
+
+ assert(entry->num_spawnds_running > 0);
+ assert(entry->status != DOMAIN_STATUS_STOPPED);
+
+ --entry->num_spawnds_running;
+
+ if (entry->num_spawnds_running == 0) {
+ entry->status = DOMAIN_STATUS_STOPPED;
+
+ if (cl->type == ClientType_Kill) {
+ entry->exit_status = EXIT_STATUS_KILLED;
+ resp_err = cl->b->tx_vtbl.kill_response(cl->b, NOP_CONT,
+ spawn_err);
+ }
+
+ struct domain_waiter *waiter = entry->waiters;
+ while (waiter != NULL) {
+ waiter->b->tx_vtbl.wait_response(waiter->b, NOP_CONT,
+ SYS_ERR_OK,
+ entry->exit_status);
+ struct domain_waiter *tmp = waiter;
+ waiter = waiter->next;
+ free(tmp);
+ }
+
+ for (coreid_t i = 0; i < MAX_COREID; ++i) {
+ if (entry->spawnds[i] == NULL) {
+ continue;
+ }
+
+ struct spawn_binding *spb = entry->spawnds[i]->b;
+
+ struct pending_kill_cleanup *cleanup =
+ (struct pending_kill_cleanup*) malloc(
+ sizeof(struct pending_kill_cleanup));
+ cleanup->b = spb;
+ cleanup->domain_cap = kc->domain_cap;
+ cleanup->entry = entry;
+
+ struct pending_client *cleanup_cl =
+ (struct pending_client*) malloc(
+ sizeof(struct pending_client));
+ cleanup_cl->b = cl->b;
+ cleanup_cl->type = ClientType_Cleanup;
+ cleanup_cl->st = cleanup;
+
+ struct msg_queue_elem *msg = (struct msg_queue_elem*) malloc(
+ sizeof(struct msg_queue_elem));
+ msg->st = cleanup_cl;
+ msg->cont = cleanup_request_sender;
+
+ err = spawnd_state_enqueue_send(entry->spawnds[i], msg);
+
+ if (err_is_fail(err)) {
+ DEBUG_ERR(err, "enqueuing cleanup request");
+ free(cleanup);
+ free(cleanup_cl);
+ free(msg);
+ }
+ }
+ }
+
+ free(kc);
+ break;
+
+ default:
+ USER_PANIC("Unknown client type in spawn_reply_handler: %u\n",
+ cl->type);
+ }
+
+ free(cl);
+}
+
+/**
+ * \brief Handler for sending spawn requests.
+ */
+static bool spawn_request_sender(struct msg_queue_elem *m)
+{
+ struct pending_client *cl = (struct pending_client*) m->st;
+ struct pending_spawn *spawn = (struct pending_spawn*) cl->st;
+ spawn->b->rx_vtbl.spawn_reply = spawn_reply_handler;
+
+ errval_t err;
+ bool with_caps = !(capref_is_null(spawn->inheritcn_cap) &&
+ capref_is_null(spawn->argcn_cap));
+ if (with_caps) {
+ err = spawn->b->tx_vtbl.spawn_with_caps_request(spawn->b, NOP_CONT,
+ cap_procmng,
+ spawn->cap_node->domain_cap,
+ spawn->path,
+ spawn->argvbuf,
+ spawn->argvbytes,
+ spawn->envbuf,
+ spawn->envbytes,
+ spawn->inheritcn_cap,
+ spawn->argcn_cap,
+ spawn->flags);
+ } else {
+ err = spawn->b->tx_vtbl.spawn_request(spawn->b, NOP_CONT, cap_procmng,
+ spawn->cap_node->domain_cap,
+ spawn->path, spawn->argvbuf,
+ spawn->argvbytes, spawn->envbuf,
+ spawn->envbytes, spawn->flags);
+ }
+
+ if (err_is_fail(err)) {
+ if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
+ return false;
+ } else {
+ USER_PANIC_ERR(err, "sending spawn request");
+ }
+ }
+
+ free(m);
+
+ return true;
+}
+
+/**
+ * \brief Handler for sending span requests.
+ */
+static bool span_request_sender(struct msg_queue_elem *m)
+{
+ struct pending_client *cl = (struct pending_client*) m->st;
+ struct pending_span *span = (struct pending_span*) cl->st;
+
+ errval_t err;
+ span->b->rx_vtbl.spawn_reply = spawn_reply_handler;
+ err = span->b->tx_vtbl.span_request(span->b, NOP_CONT, cap_procmng,
+ span->domain_cap, span->vroot,
+ span->dispframe);
+
+ if (err_is_fail(err)) {
+ if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
+ return false;
+ } else {
+ USER_PANIC_ERR(err, "sending span request");
+ }
+ }
+
+ free(m);
+
+ return true;
+}
+
+/**
+ * \brief Handler for sending kill requests.
+ */
+static bool kill_request_sender(struct msg_queue_elem *m)
+{
+ struct pending_client *cl = (struct pending_client*) m->st;
+ struct pending_kill_cleanup *kill = (struct pending_kill_cleanup*) cl->st;
+
+ errval_t err;
+ kill->b->rx_vtbl.spawn_reply = spawn_reply_handler;
+ err = kill->b->tx_vtbl.kill_request(kill->b, NOP_CONT, cap_procmng,
+ kill->domain_cap);
+
+ if (err_is_fail(err)) {
+ if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
+ return false;
+ } else {
+ USER_PANIC_ERR(err, "sending kill request");
+ }
+ }
+
+ free(m);
+
+ return true;
+}
+
+/**
+ * \brief Handler for sending cleanup requests.
+ */
+static bool cleanup_request_sender(struct msg_queue_elem *m)
+{
+ struct pending_client *cl = (struct pending_client*) m->st;
+ struct pending_kill_cleanup *cleanup = (struct pending_kill_cleanup*) cl->st;
+
+ errval_t err;
+ cleanup->b->rx_vtbl.spawn_reply = spawn_reply_handler;
+ err = cleanup->b->tx_vtbl.cleanup_request(cleanup->b, NOP_CONT,
+ cap_procmng,
+ cleanup->domain_cap);
+
+ if (err_is_fail(err)) {
+ if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
+ return false;
+ } else {
+ USER_PANIC_ERR(err, "sending cleanup request");
+ }
+ }
+
+ free(m);
+
+ return true;
+}
+
+/**
+ * \brief Common bits of the spawn and spawn_with_caps handlers.
+ */
+static errval_t spawn_handler_common(struct proc_mgmt_binding *b,
+ enum ClientType type,
+ coreid_t core_id, const char *path,
+ const char *argvbuf, size_t argvbytes,
+ const char *envbuf, size_t envbytes,
+ struct capref inheritcn_cap,
+ struct capref argcn_cap, uint8_t flags)
+{
+ if (!spawnd_state_exists(core_id)) {
+ return PROC_MGMT_ERR_INVALID_SPAWND;
+ }
+
+ struct spawnd_state *spawnd = spawnd_state_get(core_id);
+ assert(spawnd != NULL);
+ struct spawn_binding *cl = spawnd->b;
+ assert(cl != NULL);
+
+ errval_t err;
+ if (domain_should_refill_caps()) {
+ err = domain_prealloc_caps();
+ if (err_is_fail(err)) {
+ return err_push(err, PROC_MGMT_ERR_CREATE_DOMAIN_CAP);
+ }
+ }
+
+ struct domain_cap_node *cap_node = next_cap_node();
+
+ struct pending_spawn *spawn = (struct pending_spawn*) malloc(
+ sizeof(struct pending_spawn));
+ spawn->cap_node = cap_node;
+ // spawn->domain_cap = domain_cap;
+ spawn->b = cl;
+ spawn->core_id = core_id;
+ spawn->path = path;
+ spawn->argvbuf = argvbuf;
+ spawn->argvbytes = argvbytes;
+ spawn->envbuf = envbuf;
+ spawn->envbytes = envbytes;
+ spawn->inheritcn_cap = inheritcn_cap;
+ spawn->argcn_cap = argcn_cap;
+ spawn->flags = flags;
+
+ struct pending_client *spawn_cl = (struct pending_client*) malloc(
+ sizeof(struct pending_client));
+ spawn_cl->b = b;
+ spawn_cl->type = type;
+ spawn_cl->st = spawn;
+
+ struct msg_queue_elem *msg = (struct msg_queue_elem*) malloc(
+ sizeof(struct msg_queue_elem));
+ msg->st = spawn_cl;
+ msg->cont = spawn_request_sender;
+
+ err = spawnd_state_enqueue_send(spawnd, msg);
+ if (err_is_fail(err)) {
+ DEBUG_ERR(err, "enqueuing spawn request");
+ free(spawn);
+ free(spawn_cl);
+ free(msg);
+ }
+
+ return SYS_ERR_OK;
+}
+
+/**
+ * \brief Handler for rpc spawn.
+ */
+static void spawn_handler(struct proc_mgmt_binding *b, coreid_t core_id,
+ const char *path, const char *argvbuf,
+ size_t argvbytes, const char *envbuf, size_t envbytes,
+ uint8_t flags)
+{
+ errval_t err, resp_err;
+ err = spawn_handler_common(b, ClientType_Spawn, core_id, path, argvbuf,
+ argvbytes, envbuf, envbytes, NULL_CAP, NULL_CAP,
+ flags);
+
+ if (err_is_fail(err)) {
+ resp_err = b->tx_vtbl.spawn_response(b, NOP_CONT, err, NULL_CAP);
+ if (err_is_fail(resp_err)) {
+ DEBUG_ERR(resp_err, "failed to send spawn_response");
+ }
+ }
+}
+
+/**
+ * \brief Handler for rpc spawn_with_caps.
+ */
+static void spawn_with_caps_handler(struct proc_mgmt_binding *b,
+ coreid_t core_id, const char *path,
+ const char *argvbuf, size_t argvbytes,
+ const char *envbuf, size_t envbytes,
+ struct capref inheritcn_cap,
+ struct capref argcn_cap, uint8_t flags)
+{
+ errval_t err, resp_err;
+ err = spawn_handler_common(b, ClientType_SpawnWithCaps, core_id, path,
+ argvbuf, argvbytes, envbuf, envbytes,
+ inheritcn_cap, argcn_cap, flags);
+ if (err_is_ok(err)) {
+ // Will respond to client when we get the reply from spawnd.
+ return;
+ }
+
+ resp_err = b->tx_vtbl.spawn_with_caps_response(b, NOP_CONT, err,
+ NULL_CAP);
+ if (err_is_fail(resp_err)) {
+ DEBUG_ERR(resp_err, "failed to send spawn_with_caps_response");
+ }
+}
+
+/**
+ * \brief Handler for rpc span.
+ */
+static void span_handler(struct proc_mgmt_binding *b, struct capref domain_cap,
+ coreid_t core_id, struct capref vroot,
+ struct capref dispframe)
+{
+ errval_t err, resp_err;
+ struct domain_entry *entry = NULL;
+ err = domain_get_by_cap(domain_cap, &entry);
+ if (err_is_fail(err)) {
+ goto respond_with_err;
+ }
+
+ assert(entry != NULL);
+ if (entry->status != DOMAIN_STATUS_RUNNING) {
+ err = PROC_MGMT_ERR_DOMAIN_NOT_RUNNING;
+ goto respond_with_err;
+ }
+
+ if (entry->spawnds[core_id] != NULL) {
+ // TODO(razvan): Maybe we want to allow the same domain to span multiple
+ // dispatchers onto the same core?
+ err = PROC_MGMT_ERR_ALREADY_SPANNED;
+ goto respond_with_err;
+ }
+
+ if (!spawnd_state_exists(core_id)) {
+ err = PROC_MGMT_ERR_INVALID_SPAWND;
+ goto respond_with_err;
+ }
+
+ struct spawnd_state *spawnd = spawnd_state_get(core_id);
+ assert(spawnd != NULL);
+ struct spawn_binding *cl = spawnd->b;
+ assert(cl != NULL);
+
+ struct pending_span *span = (struct pending_span*) malloc(
+ sizeof(struct pending_span));
+ span->domain_cap = domain_cap;
+ span->entry = entry;
+ span->b = cl;
+ span->core_id = core_id;
+ span->vroot = vroot;
+ span->dispframe = dispframe;
+
+ struct pending_client *span_cl = (struct pending_client*) malloc(
+ sizeof(struct pending_client));
+ span_cl->b = b;
+ span_cl->type = ClientType_Span;
+ span_cl->st = span;
+
+ struct msg_queue_elem *msg = (struct msg_queue_elem*) malloc(
+ sizeof(struct msg_queue_elem));
+ msg->st = span_cl;
+ msg->cont = span_request_sender;
+
+ err = spawnd_state_enqueue_send(spawnd, msg);
+
+ if (err_is_fail(err)) {
+ DEBUG_ERR(err, "enqueuing span request");
+ free(span);
+ free(span_cl);
+ free(msg);
+ }
+
+respond_with_err:
+ resp_err = b->tx_vtbl.span_response(b, NOP_CONT, err);
+ if (err_is_fail(resp_err)) {
+ DEBUG_ERR(resp_err, "failed to send span_response");
+ }
+}
+
+/**
+ * \brief Common bits of the kill and exit handlers.
+ */
+static errval_t kill_handler_common(struct proc_mgmt_binding *b,
+ struct capref domain_cap,
+ enum ClientType type,
+ uint8_t exit_status)
+{
+ struct domain_entry *entry;
+ errval_t err = domain_get_by_cap(domain_cap, &entry);
+ if (err_is_fail(err)) {
+ return err;
+ }
+
+ entry->exit_status = exit_status;
+ domain_stop_pending(entry);
+
+ for (coreid_t i = 0; i < MAX_COREID; ++i) {
+ if (entry->spawnds[i] == NULL) {
+ continue;
+ }
+
+ struct spawn_binding *spb = entry->spawnds[i]->b;
+
+ struct pending_kill_cleanup *cmd = (struct pending_kill_cleanup*) malloc(
+ sizeof(struct pending_kill_cleanup));
+ cmd->domain_cap = domain_cap;
+ cmd->entry = entry;
+ cmd->b = spb;
+
+ struct pending_client *cl = (struct pending_client*) malloc(
+ sizeof(struct pending_client));
+ cl->b = b;
+ cl->type = type;
+ cl->st = cmd;
+
+ struct msg_queue_elem *msg = (struct msg_queue_elem*) malloc(
+ sizeof(struct msg_queue_elem));
+ msg->st = cl;
+ msg->cont = kill_request_sender;
+
+ err = spawnd_state_enqueue_send(entry->spawnds[i], msg);
+ if (err_is_fail(err)) {
+ DEBUG_ERR(err, "enqueuing kill request");
+ free(cmd);
+ free(cl);
+ free(msg);
+ }
+ }
+
+ return SYS_ERR_OK;
+}
+
+/**
+ * \brief Handler for rpc kill.
+ */
+static void kill_handler(struct proc_mgmt_binding *b,
+ struct capref victim_domain_cap)
+{
+ errval_t err = kill_handler_common(b, victim_domain_cap, ClientType_Kill,
+ EXIT_STATUS_KILLED);
+ if (err_is_fail(err)) {
+ errval_t resp_err = b->tx_vtbl.kill_response(b, NOP_CONT, err);
+ if (err_is_fail(resp_err)) {
+ DEBUG_ERR(resp_err, "failed to send kill_response");
+ }
+ }
+}
+
+/**
+ * \brief Handler for message exit.
+ */
+static void exit_handler(struct proc_mgmt_binding *b, struct capref domain_cap,
+ uint8_t exit_status)
+{
+ errval_t err = kill_handler_common(b, domain_cap, ClientType_Exit,
+ exit_status);
+ if (err_is_fail(err)) {
+ DEBUG_ERR(err, "processing exit_handler for requesting domain, exit "
+ "code %u", exit_status);
+ }
+ // Error or not, there's no client to respond to anymore.
+}
+
+/**
+ * \brief Handler for rpc wait.
+ */
+static void wait_handler(struct proc_mgmt_binding *b, struct capref domain_cap)
+{
+ errval_t err, resp_err;
+ struct domain_entry *entry;
+ err = domain_get_by_cap(domain_cap, &entry);
+ if (err_is_fail(err)) {
+ goto respond;
+ }
+
+ if (entry->status == DOMAIN_STATUS_STOPPED) {
+ // Domain has already been stopped, so just reply with exit status.
+ goto respond;
+ }
+
+ struct domain_waiter *waiter = (struct domain_waiter*) malloc(
+ sizeof(struct domain_waiter));
+ waiter->b = b;
+ waiter->next = entry->waiters;
+ entry->waiters = waiter;
+ // Will respond when domain is stopped.
+ return;
+
+respond:
+ resp_err = b->tx_vtbl.wait_response(b, NOP_CONT, err, entry->exit_status);
+ if (err_is_fail(resp_err)) {
+ DEBUG_ERR(resp_err, "failed to send wait_response");
+ }
+}
+
+static struct proc_mgmt_rx_vtbl monitor_vtbl = {
+ .add_spawnd = add_spawnd_handler,
+ .spawn_call = spawn_handler,
+ .spawn_with_caps_call = spawn_with_caps_handler,
+ .span_call = span_handler,
+ .kill_call = kill_handler,
+ .exit = exit_handler,
+ .wait_call = wait_handler
+};
+
+static struct proc_mgmt_rx_vtbl non_monitor_vtbl = {
+ .add_spawnd = add_spawnd_handler_non_monitor,
+ .spawn_call = spawn_handler,
+ .spawn_with_caps_call = spawn_with_caps_handler,
+ .span_call = span_handler,
+ .kill_call = kill_handler,
+ .exit = exit_handler,
+ .wait_call = wait_handler
+};
+
+/**
+ * \brief Allocates a special LMP endpoint for authenticating with the monitor.
+ */
+static errval_t alloc_ep_for_monitor(struct capref *ep)
+{
+ struct proc_mgmt_lmp_binding *lmpb =
+ malloc(sizeof(struct proc_mgmt_lmp_binding));
+ assert(lmpb != NULL);
+
+ // setup our end of the binding
+ errval_t err = proc_mgmt_client_lmp_accept(lmpb, get_default_waitset(),
+ DEFAULT_LMP_BUF_WORDS);
+ if (err_is_fail(err)) {
+ free(lmpb);
+ return err_push(err, LIB_ERR_PROC_MGMT_CLIENT_ACCEPT);
+ }
+
+ *ep = lmpb->chan.local_cap;
+ lmpb->b.rx_vtbl = monitor_vtbl;
+
+ return SYS_ERR_OK;
+}
+
+static void export_cb(void *st, errval_t err, iref_t iref)
+{
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "export failed");
+ }
+
+ // Allocate an endpoint for the local monitor, who will use it to inform
+ // us about new spawnd irefs on behalf of other monitors.
+ struct capref ep;
+ err = alloc_ep_for_monitor(&ep);
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "failed to allocate LMP EP for local monitor");
+ }
+
+ // Send the endpoint to the monitor, so it can finish the handshake.
+ struct monitor_binding *mb = get_monitor_binding();
+ err = mb->tx_vtbl.set_proc_mgmt_ep_request(mb, NOP_CONT, ep);
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "failed to send set_proc_mgmt_ep_request to "
+ "monitor");
+ }
+
+ // Also register this iref with the name service, for arbitrary client
+ // domains to use for spawn-related ops.
+ err = nameservice_register(SERVICE_BASENAME, iref);
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "nameservice_register failed");
+ }
+}
+
+static errval_t connect_cb(void *st, struct proc_mgmt_binding *b)
+{
+ b->rx_vtbl = non_monitor_vtbl;
+ return SYS_ERR_OK;
+}
+
+errval_t start_service(void)
+{
+ errval_t err = domain_prealloc_caps();
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err_push(err, PROC_MGMT_ERR_CREATE_DOMAIN_CAP),
+ "domain_prealloc_caps in start_service");
+ }
+
+ return proc_mgmt_export(NULL, export_cb, connect_cb, get_default_waitset(),
+ IDC_EXPORT_FLAGS_DEFAULT);
+}
--- /dev/null
+/*
+ * \brief Spawnd state internals for the process manager.
+ *
+ * Copyright (c) 2017, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#include <barrelfish/barrelfish.h>
+
+#include "spawnd_state.h"
+
+static struct spawnd_state *spawnds[MAX_COREID];
+
+/**
+ * \brief Allocates a state structure for a new spawnd binding.
+ *
+ * \param core_id core where the spawnd newly bound with runs.
+ * \param spawn_binding Flounder binding structure for the spawnd.
+ */
+errval_t spawnd_state_alloc(coreid_t core_id, struct spawn_binding *b)
+{
+ spawnds[core_id] = (struct spawnd_state*) malloc(
+ sizeof(struct spawnd_state));
+ if (spawnds[core_id] == NULL) {
+ return LIB_ERR_MALLOC_FAIL;
+ }
+
+ spawnds[core_id]->b = b;
+ spawnds[core_id]->core_id = core_id;
+ spawnds[core_id]->sendq.head = NULL;
+ spawnds[core_id]->sendq.tail = NULL;
+ spawnds[core_id]->recvq.head = NULL;
+ spawnds[core_id]->recvq.tail = NULL;
+
+ b->st = spawnds[core_id];
+
+ return SYS_ERR_OK;
+}
+
+/**
+ * \brief Returns whether connected to spawnd on the given core.
+ */
+inline bool spawnd_state_exists(coreid_t core_id)
+{
+ return spawnds[core_id] != NULL;
+}
+
+/**
+ * \brief Returns the state element for the spawnd on the given core.
+ */
+inline struct spawnd_state *spawnd_state_get(coreid_t core_id)
+{
+ return spawnds[core_id];
+}
+
+/**
+ * \brief Enqueue on a waitset queue.
+ *
+ * \param q Pointer to queue to enqueue on
+ * \param m Pointer to element to enqueue
+ *
+ * \return true if queue was empty, false if not.
+ */
+static bool enqueue(struct msg_queue *q, struct msg_queue_elem *m)
+{
+ assert(m->next == NULL);
+
+ // Enqueue on the queue
+ if(q->tail != NULL) {
+ q->tail->next = m;
+ } else {
+ assert(q->head == NULL);
+ q->head = m;
+ }
+ q->tail = m;
+
+ return q->head == q->tail ? true : false;
+}
+
+/**
+ * \brief Dequeues from a waitset queue.
+ *
+ * \param q Pointer to queue to dequeue from
+ *
+ * \return the newly dequeued element.
+ */
+static struct msg_queue_elem *dequeue(struct msg_queue *q)
+{
+ // Queue should have at least one element
+ assert(q->head != NULL && q->tail != NULL);
+
+ struct msg_queue_elem *e = q->head;
+ q->head = e->next;
+ if(q->head == NULL) {
+ q->tail = NULL;
+ }
+
+ return e;
+}
+
+/**
+ * \brief Enqueue an element on a waitset queue IN FRONT.
+ *
+ * \param q Pointer to queue to enqueue on
+ * \param m Pointer to element to enqueue
+ *
+ * \return true if queue was empty, false if not.
+ */
+static bool enqueue_at_front(struct msg_queue *q, struct msg_queue_elem *m)
+{
+ assert(m->next == NULL);
+ if(q->tail == NULL) {
+ assert(q->head == NULL);
+ q->head = m;
+ q->tail = m;
+ } else {
+ m->next = q->head;
+ q->head = m;
+ }
+ return q->head == q->tail ? true : false;
+}
+
+/**
+ * \brief Event-based handler for sending requests to spawnd.
+ *
+ * This function pops the next request from the send queue of the targeted
+ * spawnd (wrapped inside arg). It attempts to send the request, re-enqueuing
+ * it at front if sending fails. It then re-registers a new send if the queue
+ * still has pending requests.
+ *
+ * \param arg Wrapper over the spawnd_state structure for the target spawnd.
+ */
+static void spawnd_send_handler(void *arg)
+{
+ struct spawnd_state *spawnd = (struct spawnd_state*) arg;
+ struct msg_queue *q = &spawnd->sendq;
+
+ // Dequeue next element from the queue
+ struct msg_queue_elem *m = (struct msg_queue_elem*) dequeue(q);
+
+ assert(m->cont != NULL);
+ if (m->cont(m)) {
+ // Send continuation succeeded, need to enqueue a receive.
+ struct msg_queue_elem *recvm = (struct msg_queue_elem*) malloc(
+ sizeof(struct msg_queue_elem));
+ recvm->st = m->st;
+ recvm->next = NULL;
+ enqueue(&spawnd->recvq, recvm);
+ } else {
+ // Send continuation failed, need to re-enqueue message.
+ enqueue_at_front(q, m);
+ }
+
+ if (q->head != NULL) {
+ // Queue is non-empty, therefore re-register.
+ errval_t err = spawnd->b->register_send(spawnd->b, spawnd->b->waitset,
+ MKCONT(spawnd_send_handler,
+ arg));
+ if (err_is_fail(err)) {
+ DEBUG_ERR(err, "regitering for spawnd send");
+ return;
+ }
+ }
+}
+
+/**
+ * \brief Enqueues a new send request event.
+ *
+ * \param spawnd target spawnd to send the request to.
+ * \param msg request to enqueue.
+ */
+errval_t spawnd_state_enqueue_send(struct spawnd_state *spawnd,
+ struct msg_queue_elem *msg)
+{
+ msg->next = NULL;
+
+ // If queue was empty, enqueue on waitset
+ if(enqueue(&spawnd->sendq, msg)) {
+ return spawnd->b->register_send(spawnd->b, spawnd->b->waitset,
+ MKCONT(spawnd_send_handler, spawnd));
+ } else {
+ return SYS_ERR_OK;
+ }
+}
+
+/**
+ * \brief Dequeues and returns the next message in a receive queue.
+ *
+ * \param spawnd spawnd instance whose receive queue to pop.
+ */
+void *spawnd_state_dequeue_recv(struct spawnd_state *spawnd)
+{
+ struct msg_queue_elem *m = dequeue(&spawnd->recvq);
+ assert(m != NULL);
+ return m->st;
+}
\ No newline at end of file
--- /dev/null
+/*
+ * \brief Spawnd state internals for the process manager.
+ *
+ * Copyright (c) 2017, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef SPAWND_STATE_H
+#define SPAWND_STATE_H
+
+#include <stdbool.h>
+
+#include <if/spawn_defs.h>
+#include <barrelfish/barrelfish.h>
+
+struct spawnd_state;
+struct msg_queue_elem;
+typedef bool (*msg_cont_handler_fn)(struct msg_queue_elem*);
+
+struct msg_queue_elem {
+ void *st;
+ msg_cont_handler_fn cont;
+
+ struct msg_queue_elem *next;
+};
+
+struct msg_queue {
+ struct msg_queue_elem *head, *tail;
+};
+
+struct spawnd_state {
+ coreid_t core_id;
+ struct spawn_binding *b;
+
+ struct msg_queue sendq;
+ struct msg_queue recvq;
+};
+
+errval_t spawnd_state_alloc(coreid_t core_id, struct spawn_binding *b);
+bool spawnd_state_exists(coreid_t core_id);
+struct spawnd_state *spawnd_state_get(coreid_t core_id);
+
+errval_t spawnd_state_enqueue_send(struct spawnd_state *spawnd,
+ struct msg_queue_elem *msg);
+void *spawnd_state_dequeue_recv(struct spawnd_state *spawnd);
+
+#endif // SPAWND_STATE
#include <string.h>
#include <assert.h>
#include <barrelfish/barrelfish.h>
+#include <collections/hash_table.h>
#include <vfs/vfs.h>
#include "ps.h"
+#define HASH_INDEX_BUCKETS 6151
+static collections_hash_table* ps_table = NULL;
+
static struct ps_entry *entries[MAX_DOMAINS];
errval_t ps_allocate(struct ps_entry *entry, domainid_t *domainid)
if(entries[i] == NULL) {
entries[i] = entry;
*domainid = i;
+ entry->domain_id = i;
return SYS_ERR_OK;
}
}
return entries[domain_id];
}
+
+errval_t ps_hash_domain(struct ps_entry *entry, struct capref domain_cap)
+{
+ entry->domain_cap = domain_cap;
+
+ if (ps_table == NULL) {
+ collections_hash_create_with_buckets(&ps_table, HASH_INDEX_BUCKETS,
+ NULL);
+ if (ps_table == NULL) {
+ return SPAWN_ERR_CREATE_DOMAIN_TABLE;
+ }
+ }
+
+ uint64_t key;
+ errval_t err = domain_cap_hash(entry->domain_cap, &key);
+ if (err_is_fail(err)) {
+ return err;
+ }
+
+ collections_hash_insert(ps_table, key, entry);
+
+ return SYS_ERR_OK;
+}
+
+errval_t ps_get_domain(struct capref domain_cap, struct ps_entry **ret_entry,
+ uint64_t *ret_hash_key)
+{
+ assert(ret_entry != NULL);
+
+ uint64_t key;
+ errval_t err = domain_cap_hash(domain_cap, &key);
+ if (err_is_fail(err)) {
+ return err;
+ }
+
+ void *table_entry = collections_hash_find(ps_table, key);
+ if (table_entry == NULL) {
+ return SPAWN_ERR_DOMAIN_TABLE_FIND;
+ }
+ *ret_entry = (struct ps_entry*) table_entry;
+
+ if (ret_hash_key != NULL) {
+ *ret_hash_key = key;
+ }
+
+ return SYS_ERR_OK;
+}
+
+errval_t ps_release_domain(struct capref domain_cap,
+ struct ps_entry **ret_entry)
+{
+ assert(ret_entry != NULL);
+
+ uint64_t key;
+ errval_t err = ps_get_domain(domain_cap, ret_entry, &key);
+ if (err_is_fail(err)) {
+ return err;
+ }
+
+ collections_hash_delete(ps_table, key);
+
+ return SYS_ERR_OK;
+}
char *argv[MAX_CMDLINE_ARGS];
char *argbuf;
size_t argbytes;
- struct capref rootcn_cap, dcb;
+
+ domainid_t domain_id;
+
+ struct capref domain_cap;
+ uint64_t domain_cap_hash;
+
+ struct capref rootcn_cap;
+ struct capref dispframe;
+ struct capref dcb;
struct cnoderef rootcn;
uint8_t exitcode;
enum ps_status status;
bool ps_exists(domainid_t domain_id);
struct ps_entry *ps_get(domainid_t domain_id);
+errval_t ps_hash_domain(struct ps_entry *entry, struct capref domain_cap);
+errval_t ps_get_domain(struct capref domain_cap, struct ps_entry **ret_entry,
+ uint64_t *ret_hash_key);
+errval_t ps_release_domain(struct capref domain_cap,
+ struct ps_entry **ret_entry);
+
#endif
#include <string.h>
#include <barrelfish/barrelfish.h>
#include <spawndomain/spawndomain.h>
+#include <barrelfish/monitor_client.h>
#include <barrelfish/nameservice_client.h>
#include <barrelfish/cpu_arch.h>
#include <vfs/vfs.h>
#include <vfs/vfs_path.h>
#include <dist/barrier.h>
#include <if/spawn_defs.h>
+#include <if/monitor_defs.h>
#include <if/monitor_blocking_defs.h>
#include <barrelfish/dispatcher_arch.h>
#include <barrelfish/invocations_arch.h>
#include "ps.h"
-static errval_t spawn(const char *path, char *const argv[], const char *argbuf,
- size_t argbytes, char *const envp[],
- struct capref inheritcn_cap, struct capref argcn_cap,
- uint8_t flags, domainid_t *domainid)
+static errval_t spawn(struct capref domain_cap, const char *path,
+ char *const argv[], const char *argbuf, size_t argbytes,
+ char *const envp[], struct capref inheritcn_cap,
+ struct capref argcn_cap, uint8_t flags,
+ domainid_t *domainid)
{
errval_t err, msgerr;
src.slot = TASKCN_SLOT_PERF_MON;
err = cap_copy(dest, src);
if (err_is_fail(err)) {
- return err_push(err, INIT_ERR_COPY_PERF_MON);
+ return err_push(err, SPAWN_ERR_COPY_PERF_MON);
+ }
+
+ if (!capref_is_null(domain_cap)) {
+ // Pass over the domain cap.
+ dest.cnode = si.taskcn;
+ dest.slot = TASKCN_SLOT_DOMAINID;
+ err = cap_copy(dest, domain_cap);
+ if (err_is_fail(err)) {
+ return err_push(err, SPAWN_ERR_COPY_DOMAIN_CAP);
+ }
}
/* run the domain */
err = cap_copy(pe->dcb, si.dcb);
assert(err_is_ok(err));
pe->status = PS_STATUS_RUNNING;
+
+ if (!capref_is_null(domain_cap)) {
+ err = ps_hash_domain(pe, domain_cap);
+ if (err_is_fail(err)) {
+ free(pe);
+ spawn_free(&si);
+ return err_push(err, SPAWN_ERR_DOMAIN_CAP_HASH);
+ }
+ }
+
err = ps_allocate(pe, domainid);
if(err_is_fail(err)) {
free(pe);
domainid_t domainid;
};
-static errval_t spawn_with_caps_common(const char *path, const char *argbuf,
+static errval_t spawn_with_caps_common(struct capref domain_cap,
+ const char *path, const char *argbuf,
size_t argbytes, const char *envbuf,
size_t envbytes,
struct capref inheritcn_cap,
strcpy(npath, path);
vfs_path_normalise(npath);
- err = spawn(npath, argv, argbuf, argbytes, envp, inheritcn_cap, argcn_cap,
- flags, domainid);
+ err = spawn(domain_cap, npath, argv, argbuf, argbytes, envp, inheritcn_cap,
+ argcn_cap, flags, domainid);
// XXX: do we really want to delete the inheritcn and the argcn here? iaw:
// do we copy these somewhere? -SG
if (!capref_is_null(inheritcn_cap)) {
struct capref inheritcn_cap, struct capref argcn_cap, uint8_t flags,
errval_t *err, spawn_domainid_t *domain_id)
{
- *err = spawn_with_caps_common(path, argvbuf, argvbytes, envbuf, envbytes,
- inheritcn_cap, argcn_cap, flags, domain_id);
+ *err = spawn_with_caps_common(NULL_CAP, path, argvbuf, argvbytes, envbuf,
+ envbytes, inheritcn_cap, argcn_cap, flags,
+ domain_id);
return SYS_ERR_OK;
}
const char *argvbuf, size_t argvbytes, const char *envbuf, size_t envbytes,
uint8_t flags, errval_t *err, spawn_domainid_t *domain_id)
{
- *err = spawn_with_caps_common(path, argvbuf, argvbytes, envbuf, envbytes,
- NULL_CAP, NULL_CAP, flags, domain_id);
+ *err = spawn_with_caps_common(NULL_CAP, path, argvbuf, argvbytes, envbuf,
+ envbytes, NULL_CAP, NULL_CAP, flags,
+ domain_id);
return SYS_ERR_OK;
}
+static void spawn_with_caps_request_handler(struct spawn_binding *b,
+ struct capref procmng_cap,
+ struct capref domain_cap,
+ const char *path,
+ const char *argvbuf,
+ size_t argvbytes,
+ const char *envbuf,
+ size_t envbytes,
+ struct capref inheritcn_cap,
+ struct capref argcn_cap,
+ uint8_t flags)
+{
+ errval_t err, reply_err;
+ struct capability ret;
+ err = monitor_cap_identify_remote(procmng_cap, &ret);
+ if (err_is_fail(err)) {
+ err = err_push(err, SPAWN_ERR_IDENTIFY_PROC_MNGR_CAP);
+ goto reply;
+ }
+
+ if (ret.type != ObjType_ProcessManager) {
+ err = SPAWN_ERR_NOT_PROC_MNGR;
+ goto reply;
+ }
+
+ spawn_domainid_t dummy_domain_id;
+ err = spawn_with_caps_common(domain_cap, path, argvbuf, argvbytes, envbuf,
+ envbytes, inheritcn_cap, argcn_cap, flags,
+ &dummy_domain_id);
+
+reply:
+ reply_err = b->tx_vtbl.spawn_reply(b, NOP_CONT, err);
+ if (err_is_fail(reply_err)) {
+ DEBUG_ERR(err, "failed to send spawn_with_caps_reply");
+ }
+}
+
+static void spawn_request_handler(struct spawn_binding *b,
+ struct capref procmng_cap,
+ struct capref domain_cap, const char *path,
+ const char *argvbuf, size_t argvbytes,
+ const char *envbuf, size_t envbytes,
+ uint8_t flags)
+{
+ errval_t err, reply_err;
+ struct capability ret;
+ err = monitor_cap_identify_remote(procmng_cap, &ret);
+ if (err_is_fail(err)) {
+ err = err_push(err, SPAWN_ERR_IDENTIFY_PROC_MNGR_CAP);
+ goto reply;
+ }
+
+ if (ret.type != ObjType_ProcessManager) {
+ err = SPAWN_ERR_NOT_PROC_MNGR;
+ goto reply;
+ }
+
+ spawn_domainid_t dummy_domain_id;
+ err = spawn_with_caps_common(domain_cap, path, argvbuf, argvbytes, envbuf,
+ envbytes, NULL_CAP, NULL_CAP, flags,
+ &dummy_domain_id);
+
+reply:
+ reply_err = b->tx_vtbl.spawn_reply(b, NOP_CONT, err);
+ if (err_is_fail(reply_err)) {
+ DEBUG_ERR(err, "failed to send spawn_reply");
+ }
+}
+
+static void span_request_handler(struct spawn_binding *b,
+ struct capref procmng_cap,
+ struct capref domain_cap, struct capref vroot,
+ struct capref dispframe)
+{
+ errval_t err, mon_err, reply_err;
+ struct capability ret;
+ err = monitor_cap_identify_remote(procmng_cap, &ret);
+ if (err_is_fail(err)) {
+ err = err_push(err, SPAWN_ERR_IDENTIFY_PROC_MNGR_CAP);
+ goto reply;
+ }
+
+ if (ret.type != ObjType_ProcessManager) {
+ err = SPAWN_ERR_NOT_PROC_MNGR;
+ goto reply;
+ }
+
+ struct spawninfo si;
+ memset(&si, 0, sizeof(si));
+
+ debug_printf("Spanning domain to core %d\n", disp_get_core_id());
+
+ // Span domain
+ err = spawn_span_domain(&si, vroot, dispframe);
+ if (err_is_fail(err)) {
+ err = err_push(err, SPAWN_ERR_SPAN);
+ goto reply;
+ }
+
+ // Set connection to monitor.
+ struct monitor_blocking_binding *mrpc = get_monitor_blocking_binding();
+ struct capref monep;
+ err = slot_alloc(&monep);
+ if (err_is_fail(err)) {
+ err = err_push(err, SPAWN_ERR_MONEP_SLOT_ALLOC);
+ goto reply;
+ }
+ err = mrpc->rpc_tx_vtbl.alloc_monitor_ep(mrpc, &mon_err, &monep);
+ if (err_is_ok(err)) {
+ err = mon_err;
+ }
+ if (err_is_fail(err)) {
+ err = err_push(err, SPAWN_ERR_MONITOR_CLIENT);
+ goto reply;
+ }
+
+ /* copy connection into the new domain */
+ struct capref destep = {
+ .cnode = si.taskcn,
+ .slot = TASKCN_SLOT_MONITOREP,
+ };
+ err = cap_copy(destep, monep);
+ if (err_is_fail(err)) {
+ spawn_free(&si);
+ cap_destroy(monep);
+ err = err_push(err, SPAWN_ERR_MONITOR_CLIENT);
+ goto reply;
+ }
+
+ err = cap_destroy(monep);
+ if (err_is_fail(err)) {
+ err = err_push(err, SPAWN_ERR_MONITOR_CLIENT);
+ goto reply;
+ }
+
+ /* give the perfmon capability */
+ struct capref dest, src;
+ dest.cnode = si.taskcn;
+ dest.slot = TASKCN_SLOT_PERF_MON;
+ src.cnode = cnode_task;
+ src.slot = TASKCN_SLOT_PERF_MON;
+ err = cap_copy(dest, src);
+ if (err_is_fail(err)) {
+ err = err_push(err, SPAWN_ERR_COPY_PERF_MON);
+ goto reply;
+ }
+
+ // Pass over the domain cap.
+ dest.cnode = si.taskcn;
+ dest.slot = TASKCN_SLOT_DOMAINID;
+ err = cap_copy(dest, domain_cap);
+ if (err_is_fail(err)) {
+ err = err_push(err, SPAWN_ERR_COPY_DOMAIN_CAP);
+ goto reply;
+ }
+
+ // Make runnable
+ err = spawn_run(&si);
+ if (err_is_fail(err)) {
+ err = err_push(err, SPAWN_ERR_RUN);
+ goto reply;
+ }
+
+ // Allocate an id for this dispatcher.
+ struct ps_entry *pe = malloc(sizeof(struct ps_entry));
+ assert(pe != NULL);
+ memset(pe, 0, sizeof(struct ps_entry));
+ /*
+ * NB: It's important to keep a copy of the DCB *and* the root
+ * CNode around. We need to revoke both (in the right order, see
+ * kill_domain() below), so that we ensure no one else is
+ * referring to the domain's CSpace anymore. Especially the loop
+ * created by placing rootcn into its own address space becomes a
+ * problem here.
+ */
+ // TODO(razvan): The following code is here to comply with spawn().
+ err = slot_alloc(&pe->rootcn_cap);
+ assert(err_is_ok(err));
+ err = cap_copy(pe->rootcn_cap, si.rootcn_cap);
+ pe->rootcn = si.rootcn;
+ assert(err_is_ok(err));
+ err = slot_alloc(&pe->dcb);
+ assert(err_is_ok(err));
+ err = cap_copy(pe->dcb, si.dcb);
+ assert(err_is_ok(err));
+ pe->status = PS_STATUS_RUNNING;
+
+ err = ps_hash_domain(pe, domain_cap);
+ if (err_is_fail(err)) {
+ free(pe);
+ spawn_free(&si);
+ err = err_push(err, SPAWN_ERR_DOMAIN_CAP_HASH);
+ goto reply;
+ }
+
+ domainid_t domainid;
+ err = ps_allocate(pe, &domainid);
+ if(err_is_fail(err)) {
+ free(pe);
+ }
+
+ // Cleanup
+ err = spawn_free(&si);
+ if (err_is_fail(err)) {
+ err = err_push(err, SPAWN_ERR_FREE);
+ }
+
+reply:
+ reply_err = b->tx_vtbl.spawn_reply(b, NOP_CONT, err);
+ if (err_is_fail(reply_err)) {
+ DEBUG_ERR(err, "failed to send span_reply");
+ }
+}
+
+static void cleanup_cap(struct capref cap)
+{
+ errval_t err;
+
+ err = cap_revoke(cap);
+ if (err_is_fail(err)) {
+ DEBUG_ERR(err, "cap_revoke");
+ }
+ err = cap_destroy(cap);
+ if (err_is_fail(err)) {
+ DEBUG_ERR(err, "cap_destroy");
+ }
+}
+
+static void kill_request_handler(struct spawn_binding *b,
+ struct capref procmng_cap,
+ struct capref victim_domain_cap)
+{
+ errval_t err, reply_err;
+ struct capability ret;
+ err = monitor_cap_identify_remote(procmng_cap, &ret);
+ if (err_is_fail(err)) {
+ err = err_push(err, SPAWN_ERR_IDENTIFY_PROC_MNGR_CAP);
+ goto reply;
+ }
+
+ if (ret.type != ObjType_ProcessManager) {
+ err = SPAWN_ERR_NOT_PROC_MNGR;
+ goto reply;
+ }
+
+ struct ps_entry *pe;
+ err = ps_get_domain(victim_domain_cap, &pe, NULL);
+ if (err_is_fail(err)) {
+ err = err_push(err, SPAWN_ERR_DOMAIN_NOTFOUND);
+ goto reply;
+ }
+
+ cleanup_cap(pe->dcb);
+
+reply:
+ reply_err = b->tx_vtbl.spawn_reply(b, NOP_CONT, err);
+ if (err_is_fail(reply_err)) {
+ DEBUG_ERR(err, "failed to send kill_reply");
+ }
+}
+
+static void cleanup_request_handler(struct spawn_binding *b,
+ struct capref procmng_cap,
+ struct capref domain_cap)
+{
+ errval_t err, reply_err;
+ struct capability ret;
+ err = monitor_cap_identify_remote(procmng_cap, &ret);
+ if (err_is_fail(err)) {
+ err = err_push(err, SPAWN_ERR_IDENTIFY_PROC_MNGR_CAP);
+ goto reply;
+ }
+
+ if (ret.type != ObjType_ProcessManager) {
+ err = SPAWN_ERR_NOT_PROC_MNGR;
+ goto reply;
+ }
+
+ struct ps_entry *pe;
+ err = ps_release_domain(domain_cap, &pe);
+ if (err_is_fail(err)) {
+ err = err_push(err, SPAWN_ERR_DOMAIN_NOTFOUND);
+ goto reply;
+ }
+
+ cleanup_cap(pe->rootcn_cap);
+
+ // Cleanup struct ps_entry. Note that waiters will be handled by the process
+ // manager, as opposed to the old protocol of handling them here.
+ free(pe->argbuf);
+ ps_remove(pe->domain_id);
+ free(pe);
+
+reply:
+ reply_err = b->tx_vtbl.spawn_reply(b, NOP_CONT, err);
+ if (err_is_fail(reply_err)) {
+ DEBUG_ERR(err, "failed to send cleanup_reply");
+ }
+}
+
/**
* \brief Removes a zombie domain.
*/
ps_remove(domainid);
}
-static void cleanup_cap(struct capref cap)
-{
- errval_t err;
-
- err = cap_revoke(cap);
- if (err_is_fail(err)) {
- DEBUG_ERR(err, "cap_revoke");
- }
- err = cap_destroy(cap);
- if (err_is_fail(err)) {
- DEBUG_ERR(err, "cap_destroy");
- }
-}
-
static errval_t kill_domain(domainid_t domainid, uint8_t exitcode)
{
struct ps_entry *ps = ps_get(domainid);
static struct spawn_rx_vtbl rx_vtbl = {
// .spawn_domain_call = spawn_handler,
// .spawn_domain_with_caps_call = spawn_with_caps_handler,
+
+ // Async messages for the process manager.
+ .spawn_request = spawn_request_handler,
+ .spawn_with_caps_request = spawn_with_caps_request_handler,
+ .span_request = span_request_handler,
+ .kill_request = kill_request_handler,
+ .cleanup_request = cleanup_request_handler,
+
.use_local_memserv_call = use_local_memserv_handler,
.kill_call = kill_handler,
.exit_call = exit_handler,
USER_PANIC_ERR(err, "export failed");
}
+ // Send iref back to monitor, which will forward it to the process manager.
+ struct monitor_binding *mb = get_monitor_binding();
+ err = mb->tx_vtbl.set_spawn_iref_request(mb, NOP_CONT, iref);
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "failed to send set_spawn_iref_request to "
+ "monitor");
+ }
+
// construct name
char namebuf[32];
size_t len = snprintf(namebuf, sizeof(namebuf), "%s.%d", SERVICE_BASENAME,
--- /dev/null
+--------------------------------------------------------------------------
+-- Copyright (c) 2017, ETH Zurich.
+-- All rights reserved.
+--
+-- This file is distributed under the terms in the attached LICENSE file.
+-- If you do not find this file, copies can be found by writing to:
+-- ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+--
+-- Hakefile for proc_mgmt_test
+--
+--------------------------------------------------------------------------
+
+[
+build application { target = "proc_mgmt_test",
+ cFiles = [ "main.c" ],
+ addLibraries = libDeps [ "bench", "vfs" ],
+ flounderDefs = [ "proc_mgmt" ],
+ flounderExtraDefs = [ ("proc_mgmt",["rpcclient"]) ]
+ }
+]
--- /dev/null
+/** \file
+ * \brief Process Management test.
+ */
+
+/*
+ * Copyright (c) 2017, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#include <stdlib.h>
+
+#include <barrelfish/barrelfish.h>
+#include <barrelfish/deferred.h>
+#include <barrelfish/proc_mgmt_client.h>
+#include <bench/bench.h>
+
+#define PROC_MGMT_BENCH 1
+#define PROC_MGMT_BENCH_MIN_RUNS 150
+
+int my_id;
+// int total_ids;
+
+static void test_spawn(coreid_t core_id, char *argv[],
+ struct capref *ret_domain_cap)
+{
+ assert(ret_domain_cap != NULL);
+
+ errval_t err = proc_mgmt_spawn_program(core_id,
+ "/x86_64/sbin/proc_mgmt_test",
+ argv, NULL, 0, ret_domain_cap);
+ if (err_is_ok(err)) {
+ // printf("OK: my_id=%i, spawn(%u, proc_mgmt_test)\n", my_id, core_id);
+ } else {
+ // printf("FAIL: spawn(%u, proc_mgmt_test), err=%s\n", core_id,
+ // err_getstring(err));
+ }
+}
+
+// static void test_span(coreid_t core_id)
+// {
+// errval_t err = proc_mgmt_span(core_id);
+// if (err_is_ok(err)) {
+// printf("OK: span(%u)\n", core_id);
+// } else {
+// printf("FAIL: span(%u), err=%s\n", core_id, err_getstring(err));
+// }
+// }
+
+// static void test_kill(struct capref domain_cap)
+// {
+// errval_t err = proc_mgmt_kill(domain_cap);
+// if (err_is_ok(err)) {
+// printf("OK: kill\n");
+// } else {
+// printf("FAIL: kill, err=%s\n", err_getstring(err));
+// }
+// }
+
+// static void test_wait(struct capref domain_cap)
+// {
+// uint8_t code;
+// errval_t err = proc_mgmt_wait(domain_cap, &code);
+// if (err_is_ok(err)) {
+// printf("OK: wait, code=%u\n", code);
+// } else {
+// printf("FAIL: wait, err=%s\n", err_getstring(err));
+// }
+// }
+
+static inline cycles_t calculate_time(cycles_t tsc_start, cycles_t tsc_end)
+{
+ cycles_t result;
+ if (tsc_end < tsc_start) {
+ result = (LONG_MAX - tsc_start) + tsc_end - bench_tscoverhead();
+ } else {
+ result = (tsc_end - tsc_start - bench_tscoverhead());
+ }
+ return result;
+}
+
+int main(int argc, char **argv)
+{
+ if (argc == 3) {
+ bench_init();
+
+ cycles_t tsc_start, tsc_end;
+ cycles_t result;
+ uint64_t tscperus;
+
+ bench_ctl_t *ctl = calloc(1, sizeof(*ctl));
+ ctl->mode = BENCH_MODE_FIXEDRUNS;
+ ctl->result_dimensions = 1;
+ ctl->min_runs = PROC_MGMT_BENCH_MIN_RUNS;
+ ctl->data = calloc(ctl->min_runs * ctl->result_dimensions,
+ sizeof(*ctl->data));
+
+ errval_t err = sys_debug_get_tsc_per_ms(&tscperus);
+ assert(err_is_ok(err));
+ tscperus /= 1000;
+
+ coreid_t target_core = 7;//strcmp(argv[1], "1") == 0 ? 1 : 2;
+ char *spawn_argv[] = { "proc_mgmt_test", "noop", NULL };
+ my_id = atoi(argv[1]);
+ struct capref domain_cap;
+ // total_ids = atoi(argv[2]);
+
+ barrelfish_usleep((0 - 1 * my_id) * 100 * 1000);
+
+ // ctl = bench_ctl_init(BENCH_MODE_FIXEDRUNS, 1, 100);
+ do {
+ // debug_printf("BEFORE test_spawn\n");
+ tsc_start = bench_tsc();
+
+ test_spawn(target_core, spawn_argv, &domain_cap);
+ // test_wait(domain_cap);
+
+ tsc_end = bench_tsc();
+ result = calculate_time(tsc_start, tsc_end);
+
+ barrelfish_usleep(0 * 1000 * 1000);
+ }while (!bench_ctl_add_run(ctl, &result));
+ // } while (true);
+
+ bench_ctl_dump_analysis(ctl, 0, "client", tscperus);
+
+ bench_ctl_destroy(ctl);
+
+ barrelfish_usleep(5 * 1000 * 1000);
+
+ printf("TEST DONE\n");
+ } else {
+ // for(;;) {
+ // errval_t err = event_dispatch(get_default_waitset());
+ // if(err_is_fail(err)) {
+ // USER_PANIC_ERR(err, "event_dispatch");
+ // }
+ // }
+ }
+
+ // printf("HELLO from proc_mgmt_test, argc=%u\n", argc);
+ // // errval_t err;
+ // if (argc == 1) {
+ // struct capref domain_cap_0;
+ // char *argv_0[] = { "proc_mgmt_test", "1", NULL };
+ // test_spawn(0, argv_0, &domain_cap_0);
+ // barrelfish_usleep(5*1000*1000);
+ // // while (true);
+ // test_kill(domain_cap_0);
+ // test_wait(domain_cap_0);
+ // // barrelfish_usleep(0 * 1*1*1);
+
+ // struct capref domain_cap_1;
+ // char *argv_1[] = { "proc_mgmt_test", "1", "2", NULL };
+ // test_spawn(1, argv_1, &domain_cap_1);
+ // test_wait(domain_cap_1);
+
+ // printf("TEST DONE\n");
+ // // barrelfish_usleep(5*1000*1000);
+
+ // // err = proc_mgmt_kill(domain_cap);
+ // // if (err_is_fail(err)) {
+ // // USER_PANIC_ERR(err, "failed to kill proc_mgmt_test");
+ // // }
+
+ // // uint8_t status;
+ // // err = proc_mgmt_wait(domain_cap, &status);
+ // // printf("2nd proc_mgmt_test finished with status %u\n", status);
+ // } else if (argc == 2) {
+ // // test_span(disp_get_core_id() == 0 ? 1 : 0);
+
+ // // // struct capability ret;
+ // // // err = debug_cap_identify(cap_domainid, &ret);
+ // // // if (err_is_fail(err)) {
+ // // // USER_PANIC_ERR(err, "failed to identify cap_domainid");
+ // // // }
+ // // // assert(ret.type == ObjType_Domain);
+ // // // printf("proc_mgmt_test: cap_domainid = { .coreid=%u, .core_local_id=%u "
+ // // // "}\n", ret.u.domain.coreid, ret.u.domain.core_local_id);
+
+ // // // coreid_t other_core = disp_get_core_id() == 0 ? 1 : 0;
+ // // // err = proc_mgmt_span(other_core);//domain_new_dispatcher(other_core, span_cb, NULL);
+ // // // if (err_is_fail(err)) {
+ // // // USER_PANIC_ERR(err, "failed to span proc_mgmt_test on core %u\n",
+ // // // other_core);
+ // // // }
+
+ // // // barrelfish_usleep(5*1000*1000);
+ // // // printf("Main dispatcher exiting...\n");
+
+ // for(;;) {
+ // errval_t err = event_dispatch(get_default_waitset());
+ // if(err_is_fail(err)) {
+ // USER_PANIC_ERR(err, "event_dispatch");
+ // }
+ // }
+ // } else {
+ // // barrelfish_usleep(5 * 1000 * 1000);
+ // // We'll just exit normally here, spawner should be waiting for us.
+ // }
+
+ return 0;
+}