failure I2C_WAIT_FOR_BUS "Wait for bus free timed out",
failure I2C_FAILURE "I2C subsystem failure",
+ // KCB and related errors
+ failure KCB_NOT_FOUND "Did not find the given kcb.",
};
-// errors generated by libcaps
+// errors generated by libmdb
errors libcaps CAPS_ERR_ {
// general errors
failure INVALID_ARGS "Invalid arguments",
failure INCOMPLETE_ROUTE "(Portion of) routing table not present",
// Resource controller
- failure RSRC_ALLOC "Out of resource domains",
- failure RSRC_MEMBER_LIMIT "Reached member limit of resource domain",
- failure RSRC_ILL_MANIFEST "Illegal manifest",
- failure RSRC_NOT_FOUND "Resource domain not found on this core",
+ failure RSRC_ALLOC "Out of resource domains",
+ failure RSRC_MEMBER_LIMIT "Reached member limit of resource domain",
+ failure RSRC_ILL_MANIFEST "Illegal manifest",
+ failure RSRC_NOT_FOUND "Resource domain not found on this core",
+
+ // capops
+ failure CAPOPS_BUSY "Other end not ready for operation",
+
+ // Invocations
+ failure INVOKE_IRQ_ALLOCATE "Unable to allocate IRQ vector",
+ failure INVOKE_IRQ_SET "Unable to install IRQ vector",
};
// errors related to the routing library
message monitor_mem_iref_request();
message monitor_mem_iref_reply(iref iref);
- message bind_monitor_request(coreid_t core_id, caprep cap);
- message bind_monitor_reply(errval err);
+ /**
+ * New monitor communication set-up.
- * 1. monitor_initialized (new monitor -> bsp)
- * 2. new_monitor_nofity (monitor.0 -> <all available monitors>)
- * 3. bind_monitor_proxy (<all available monitors> -> monitor.0)
- * 4. bind_monitor_request (monitor.0 -> new monitor)
- * 5. bind_monitor_reply (new monitor -> monitor.0)
++ * 1. capops_ready (new monitor -> bsp)
++ * 2. monitor_initialized (new monitor -> bsp)
++ * 3. new_monitor_nofity (monitor.0 -> <all available monitors>)
++ * 4. bind_monitor_proxy (<all available monitors> -> monitor.0)
++ * 5. bind_monitor_request (monitor.0 -> new monitor)
++ * 6. bind_monitor_reply (new monitor -> monitor.0)
+ */
++ message capops_ready();
+ message monitor_initialized();
+ message new_monitor_notify(coreid_t core_id);
message bind_monitor_proxy(coreid_t dst_core_id, caprep cap);
+ call bind_monitor_request(coreid_t core_id, caprep cap);
+ response bind_monitor_reply(errval err);
message bind_monitor_request_scc(coreid_t core_id,
caprep cap,
message multihop_routing_table_grow(coreid forwarder, coreid destinations[len]);
// set up a new multihop virtual circuit
- message bind_multihop_intermon_request(iref iref, vci_t sender_vci, coreid core_id);
- message bind_multihop_intermon_reply(vci_t receiver_vci, vci_t sender_vci, errval err);
+ message bind_multihop_intermon_request(iref iref, vci_t sender_vci,
+ coreid core_id);
+ message bind_multihop_intermon_reply(vci_t receiver_vci, vci_t sender_vci,
+ errval err);
- message multihop_message(vci_t vci, uint8 direction, uint8 flags, uint32 ack, uint8 payload[size]);
- message multihop_cap_send(vci_t vci, uint8 direction, capid_t capid, errval err, caprep cap, bool null_cap);
+ message multihop_message(vci_t vci, uint8 direction, uint8 flags, uint32 ack,
+ uint8 payload[size]);
+ message multihop_cap_send(vci_t vci, uint8 direction, capid_t capid, errval err,
+ caprep cap, bool null_cap);
+ // cap operation messages
+ message capops_request_copy(coreid dest, caprep cap, capop_st st);
+ message capops_recv_copy(caprep cap, uint8 owner_relations, capop_st st);
+ message capops_recv_copy_result(errval status, capaddr_t cap, capbits_t bits, capslot_t slot, capop_st st);
+
+ message capops_move_request(caprep cap, uint8 relations, capop_st st);
+ message capops_move_result(errval status, capop_st st);
+ message capops_retrieve_request(caprep cap, capop_st st);
+ message capops_retrieve_result(errval status, uint8 relations, capop_st st);
+
+ message capops_delete_remote(caprep cap, capop_st st);
+ message capops_delete_remote_result(errval status, capop_st st);
+
+ message capops_revoke_mark(caprep cap, capop_st st);
+ message capops_revoke_ready(capop_st st);
+ message capops_revoke_commit(capop_st st);
+ message capops_revoke_done(capop_st st);
+
+ // XXX: uint32 for bits? -MN
+ message capops_request_retype(caprep src, uint32 desttype, uint32 destbits, capop_st st);
+ message capops_retype_response(errval status, capop_st st);
+
+ // ownership / relation messages
+ message capops_update_owner(caprep cap, capop_st st);
+ message capops_owner_updated(capop_st st);
+
+ message capops_find_cap(caprep cap, capop_st st);
+ message capops_find_cap_result(errval status, capop_st st);
+
+ message capops_find_descendants(caprep cap, capop_st st);
+ message capops_find_descendants_result(errval status, capop_st st);
+
/* Tracing Framework */
// Notify a core that it should prepare the tracing state. The origin core
* implementation of the default (non-THC) Flounder RPC client stubs.
*/
interface monitor_blocking "The monitor to client RPC interface" {
- typedef struct {
- uint64 w0;
- uint64 w1;
- uint64 w2;
- uint64 w3;
- } caprep;
-
- // For special boot domains to request the bootinfo frame
- rpc get_bootinfo(out errval err, out cap frame, out size frame_size);
-
- /* Remote cap operation messages */
- rpc remote_cap_retype(in cap croot, in uint32 src,
- in uint64 objtype, in uint8 size_bits,
- in uint32 to, in uint32 slot,
- in int to_vbits, out errval err);
- rpc remote_cap_delete(in cap croot, in uint32 src, in uint8 vbits,
- out errval err);
- rpc remote_cap_revoke(in cap croot, in uint32 src, in uint8 vbits,
- out errval err);
+ typedef struct {
+ uint64 w0;
+ uint64 w1;
+ uint64 w2;
+ uint64 w3;
+ } caprep;
+
+ // For special boot domains to request the bootinfo frame
+ rpc get_bootinfo(out errval err, out cap frame, out size frame_size);
+
- /* Remote cap operation messages */
++ /* Remote cap operation messages */
+ rpc remote_cap_retype(in cap croot, in uint32 src,
+ in uint64 objtype, in uint8 size_bits,
+ in uint32 to, in uint32 slot,
+ in int dcn_vbits, out errval err);
+ rpc remote_cap_delete(in cap croot, in uint32 src, in uint8 vbits,
+ out errval err);
+ rpc remote_cap_revoke(in cap croot, in uint32 src, in uint8 vbits,
+ out errval err);
rpc get_phyaddr_cap(out cap pyaddr, out errval err);
rpc get_io_cap(out cap io, out errval err);
#include <barrelfish_kpi/paging_arch.h>
#include <barrelfish/debug.h>
-
- static inline struct sysret cap_invoke(struct capref to, uintptr_t cmd,
- uintptr_t arg1, uintptr_t arg2,
- uintptr_t arg3, uintptr_t arg4,
- uintptr_t arg5)
- {
- uint8_t invoke_bits = get_cap_valid_bits(to);
- capaddr_t invoke_cptr = get_cap_addr(to) >> (CPTR_BITS - invoke_bits);
+/**
+ * capability invocation syscall wrapper, copied from x86_64 version
+ */
+static inline struct sysret cap_invoke(struct capref to, uintptr_t arg1,
+ uintptr_t arg2, uintptr_t arg3,
+ uintptr_t arg4, uintptr_t arg5,
+ uintptr_t arg6)
+{
+ uint8_t invoke_bits = get_cap_valid_bits(to);
+ capaddr_t invoke_cptr = get_cap_addr(to) >> (CPTR_BITS - invoke_bits);
- return syscall((invoke_bits << 16) | (cmd << 8) | SYSCALL_INVOKE,
- invoke_cptr, arg1, arg2, arg3, arg4, arg5);
- }
+ // invoke_bits << 16 | cmd << 8 | syscall_invoke
+ uint32_t invocation = ((invoke_bits << 16) | (arg1 << 8) | SYSCALL_INVOKE);
+
+ return syscall(invocation, invoke_cptr, arg2, arg3, arg4, arg5, arg6);
+}
-#define cap_invoke6(to, _a, _b, _c, _d, _e, _f) \
+#define cap_invoke6(to, _a, _b, _c, _d, _e, _f) \
cap_invoke(to, _a, _b, _c, _d, _e, _f)
-#define cap_invoke5(to, _a, _b, _c, _d, _e) \
+#define cap_invoke5(to, _a, _b, _c, _d, _e) \
cap_invoke6(to, _a, _b, _c, _d, _e, 0)
-#define cap_invoke4(to, _a, _b, _c, _d) \
+#define cap_invoke4(to, _a, _b, _c, _d) \
cap_invoke5(to, _a, _b, _c, _d, 0)
-#define cap_invoke3(to, _a, _b, _c) \
+#define cap_invoke3(to, _a, _b, _c) \
cap_invoke4(to, _a, _b, _c, 0)
-#define cap_invoke2(to, _a, _b) \
+#define cap_invoke2(to, _a, _b) \
cap_invoke3(to, _a, _b, 0)
-#define cap_invoke1(to, _a) \
+#define cap_invoke1(to, _a) \
cap_invoke2(to, _a, 0)
-
/**
* \brief Retype a capability.
*
return sysret.error;
}
+
+ static inline errval_t invoke_send_init_ipi(struct capref ipi_cap, coreid_t core_id)
+ {
+ uint8_t invoke_bits = get_cap_valid_bits(ipi_cap);
+ capaddr_t invoke_cptr = get_cap_addr(ipi_cap) >> (CPTR_BITS - invoke_bits);
+
+ return
+ syscall3((invoke_bits << 16) | (IPICmd_Send_Init << 8) | SYSCALL_INVOKE,
+ invoke_cptr, (uintptr_t) core_id).error;
+ }
+
+ static inline errval_t invoke_send_start_ipi(struct capref ipi_cap, coreid_t core_id, forvaddr_t entry)
+ {
+ uint8_t invoke_bits = get_cap_valid_bits(ipi_cap);
+ capaddr_t invoke_cptr = get_cap_addr(ipi_cap) >> (CPTR_BITS - invoke_bits);
+
+ return
+ syscall4((invoke_bits << 16) | (IPICmd_Send_Start << 8) | SYSCALL_INVOKE,
+ invoke_cptr, (uintptr_t) core_id, (uintptr_t) entry).error;
+
+ }
+
+ static inline errval_t invoke_get_global_paddr(struct capref kernel_cap, genpaddr_t* global)
+ {
+ struct sysret sr = cap_invoke1(kernel_cap, KernelCmd_GetGlobalPhys);
+ if (err_is_ok(sr.error)) {
+ *global = sr.value;
+ }
+
+ return sr.error;
+ }
++
+#endif // INVOCATIONS_ARCH_H
// Size of dispatcher
#define OBJBITS_DISPATCHER 10
+ // Size of kernel control block
+ #define OBJBITS_KCB 16
+
#ifndef __ASSEMBLER__
+#include <assert.h>
+#include <stdbool.h>
+#include <barrelfish_kpi/types.h>
+
#define CAPRIGHTS_READ (1 << 0)
#define CAPRIGHTS_WRITE (1 << 1)
#define CAPRIGHTS_EXECUTE (1 << 2)
--- /dev/null
+/*
+ * Copyright (c) 2012, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef BARRELFISH_DISTCAPS_H
+#define BARRELFISH_DISTCAPS_H
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <barrelfish_kpi/capabilities.h>
+
+/*
+ * capstate: locking and ownership
+ */
+
+#define DISTCAP_STATE_FOREIGN (1 << 0)
+#define DISTCAP_STATE_BUSY (1 << 1)
+
+typedef uint8_t distcap_state_t;
+
+static inline bool
+distcap_state_is_busy(distcap_state_t state)
+{
+ return state & DISTCAP_STATE_BUSY;
+}
+
+static inline bool
+distcap_state_is_foreign(distcap_state_t state)
+{
+ return state & DISTCAP_STATE_FOREIGN;
+}
+
+/*
+ * Predicates related to sharing capabilities
+ */
+
- STATIC_ASSERT(ObjType_Num == 25, "Knowledge of all cap types");
++STATIC_ASSERT(ObjType_Num == 27, "Knowledge of all cap types");
+static inline bool
+distcap_needs_locality(enum objtype type)
+{
+ switch (type) {
+ case ObjType_PhysAddr:
+ case ObjType_RAM:
+ case ObjType_CNode:
+ case ObjType_FCNode:
+ case ObjType_Dispatcher:
+ case ObjType_EndPoint:
+ case ObjType_Frame:
+ case ObjType_DevFrame:
+ case ObjType_VNode_x86_64_pml4:
+ case ObjType_VNode_x86_64_pdpt:
+ case ObjType_VNode_x86_64_pdir:
+ case ObjType_VNode_x86_64_ptable:
+ case ObjType_VNode_x86_32_pdpt:
+ case ObjType_VNode_x86_32_pdir:
+ case ObjType_VNode_x86_32_ptable:
+ case ObjType_VNode_ARM_l1:
+ case ObjType_VNode_ARM_l2:
++ // XXX: KCB should need locality?
++ //case ObjType_KernelControlBlock:
+ return true;
+ default:
+ return false;
+ }
+}
+
- STATIC_ASSERT(ObjType_Num == 25, "Knowledge of all cap types");
++STATIC_ASSERT(ObjType_Num == 27, "Knowledge of all cap types");
+static inline bool
+distcap_is_moveable(enum objtype type)
+{
+ switch (type) {
+ case ObjType_PhysAddr:
+ case ObjType_RAM:
+ case ObjType_Frame:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/*
+ * Caching remote relations
+ */
+
+#define RRELS_COPY_BIT (1<<0)
+#define RRELS_ANCS_BIT (1<<1)
+#define RRELS_DESC_BIT (1<<2)
+
+#endif
scheduler = case Config.scheduler of
Config.RR -> "schedule_rr.c"
Config.RBED -> "schedule_rbed.c"
- common_c = [ "gdb_stub.c",
- "capabilities.c",
+ common_c = [ "gdb_stub.c",
+ "capabilities.c",
+ "cap_delete.c",
"dispatch.c",
- scheduler,
+ scheduler,
+ "kcb.c",
- "memset.c",
- "memmove.c",
+ "memset.c",
+ "memmove.c",
+ "monitor.c",
"paging_generic.c",
"printf.c",
- "startup.c",
- "stdlib.c",
- "string.c",
+ "startup.c",
+ "stdlib.c",
+ "string.c",
"syscall.c",
- "wakeup.c",
- "useraccess.c" ]
+ "wakeup.c",
+ "useraccess.c",
+ "coreboot.c" ]
++ (if Config.microbenchmarks then ["microbenchmarks.c"] else [])
++ (if Config.oneshot_timer then ["timer.c"] else [])
common_libs = [ "getopt", "mdb_kernel" ]
--
cpuDriver {
architectures = [ "armv5" ],
- assemblyFiles = [ "arch/armv5/boot.S",
+ assemblyFiles = [ "arch/armv5/boot.S",
"arch/armv5/exceptions.S" ],
- cFiles = [ "arch/arm/exn.c",
- "arch/arm/exec.c",
- "arch/arm/misc.c",
- "arch/arm/phys_mmap.c",
- "arch/armv5/init.c",
- "arch/armv5/integrator.c",
- "arch/armv5/kludges.c",
- "arch/armv5/kputchar.c",
- "arch/armv5/pl011_uart.c",
+ cFiles = [ "arch/arm/exn.c",
+ "arch/arm/exec.c",
+ "arch/arm/misc.c",
+ "arch/arm/phys_mmap.c",
+ "arch/arm/syscall.c",
+ "arch/armv5/init.c",
+ "arch/armv5/integrator.c",
+ "arch/armv5/kludges.c",
+ "arch/armv5/kputchar.c",
+ "arch/armv5/pl011_uart.c",
"arch/armv5/cp15.c",
- "arch/armv5/paging.c",
- "arch/armv5/startup_arch.c",
- "arch/armv5/syscall.c" ],
- mackerelDevices = [ "arm",
- "arm_icp_pit",
- "arm_icp_pic0",
+ "arch/armv5/paging.c",
+ "arch/armv5/startup_arch.c" ],
+ mackerelDevices = [ "arm",
+ "arm_icp_pit",
+ "arm_icp_pic0",
"pl011_uart" ],
addLibraries = [ "elf", "cpio" ]
},
--
cpuDriver {
architectures = [ "arm11mp" ],
- assemblyFiles = [ "arch/armv5/boot.S",
- "arch/armv5/exceptions.S",
+ assemblyFiles = [ "arch/armv5/boot.S",
+ "arch/armv5/exceptions.S",
"arch/paging_cp.S" ],
- cFiles = [ "arch/arm/exn.c",
- "arch/arm/exec.c",
- "arch/arm/misc.c",
- "arch/arm/phys_mmap.c",
- "arch/armv5/init.c",
- "arch/armv5/integrator.c",
- "arch/armv5/kludges.c",
- "arch/armv5/kputchar.c",
- "arch/armv5/pl011_uart.c",
- "arch/armv5/paging.c",
- "arch/armv5/startup_arch.c",
- "arch/armv5/syscall.c" ],
- mackerelDevices = [ "arm",
- "arm_icp_pit",
- "arm_icp_pic0",
+ cFiles = [ "arch/arm/exn.c",
+ "arch/arm/exec.c",
+ "arch/arm/misc.c",
+ "arch/arm/phys_mmap.c",
+ "arch/arm/syscall.c",
+ "arch/armv5/init.c",
+ "arch/armv5/integrator.c",
+ "arch/armv5/kludges.c",
+ "arch/armv5/kputchar.c",
+ "arch/armv5/pl011_uart.c",
+ "arch/armv5/paging.c",
+ "arch/armv5/startup_arch.c" ],
+ mackerelDevices = [ "arm",
+ "arm_icp_pit",
+ "arm_icp_pic0",
"pl011_uart" ],
addLibraries = [ "elf", "cpio" ]
},
cpuDriver {
target = "ixp2800",
architectures = [ "xscale" ],
- assemblyFiles = [ "arch/xscale/boot.S",
+ assemblyFiles = [ "arch/xscale/boot.S",
"arch/armv5/exceptions.S" ],
- cFiles = [ "arch/arm/exn.c",
- "arch/arm/exec.c",
- "arch/arm/misc.c",
- "arch/arm/phys_mmap.c",
+ cFiles = [ "arch/arm/exn.c",
+ "arch/arm/exec.c",
+ "arch/arm/misc.c",
+ "arch/arm/phys_mmap.c",
+ "arch/arm/syscall.c",
-- according to the Xscale documentation, the MMU is a
-- standard ARMv5 design, so we should be able to use
-- the armv5 cp15 code. -SG, 8/1/13
"arch/armv5/cp15.c",
- "arch/armv5/init.c",
- "arch/armv5/kludges.c",
- "arch/armv5/startup_arch.c",
- "arch/armv5/syscall.c",
- "arch/xscale/ixp2800_integrator.c",
- "arch/xscale/ixp2800_uart.c",
- "arch/xscale/ixp2800_paging.c",
+ "arch/armv5/init.c",
+ "arch/armv5/kludges.c",
+ "arch/armv5/startup_arch.c",
+ "arch/xscale/ixp2800_integrator.c",
+ "arch/xscale/ixp2800_uart.c",
+ "arch/xscale/ixp2800_paging.c",
"arch/xscale/kputchar.c" ],
- mackerelDevices = [ "arm",
- "ixp2800_icp_pit",
- "ixp2800_icp_pic0",
+ mackerelDevices = [ "arm",
+ "ixp2800_icp_pit",
+ "ixp2800_icp_pic0",
"ixp2800_uart" ],
addLibraries = [ "elf", "cpio" ]
},
assemblyFiles = [ "arch/omap44xx/boot.S",
"arch/armv7/cp15.S",
"arch/armv7/exceptions.S" ],
- cFiles = [ "arch/arm/exec.c",
- "arch/arm/misc.c",
- "arch/arm/exn.c",
+ cFiles = [ "arch/arm/exec.c",
+ "arch/arm/misc.c",
+ "arch/arm/exn.c",
+ "arch/arm/syscall.c",
"arch/arm/phys_mmap.c",
"arch/armv7/gic.c",
- "arch/armv7/kludges.c",
- "arch/armv7/multiboot.c",
+ "arch/armv7/kludges.c",
+ "arch/armv7/multiboot.c",
"arch/armv7/paging.c",
- "arch/armv7/syscall.c",
"arch/armv7/irq.c",
- "arch/omap44xx/init.c",
- "arch/omap44xx/omap.c",
- "arch/omap44xx/paging.c",
- "arch/omap44xx/startup_arch.c",
- "arch/omap44xx/omap_uart.c",
+ "arch/omap44xx/init.c",
+ "arch/omap44xx/omap.c",
+ "arch/omap44xx/paging.c",
+ "arch/omap44xx/startup_arch.c",
+ "arch/omap44xx/omap_uart.c",
"arch/omap44xx/start_aps.c",
- "arch/omap44xx/spinlock.c",
+ "arch/omap44xx/spinlock.c",
"arch/omap44xx/cortexm3_heteropanda.c", --will be empty if heteropanda = False
"arch/armv7/kputchar.c"],
- mackerelDevices = [ "arm",
- "arm_icp_pit",
- "pl130_gic",
- "sp804_pit",
- "cortex_a9_pit",
+ mackerelDevices = [ "arm",
+ "arm_icp_pit",
+ "pl130_gic",
+ "sp804_pit",
+ "cortex_a9_pit",
"cortex_a9_gt",
- "a9scu",
- "omap/omap_uart",
- "omap/omap44xx_id",
+ "a9scu",
+ "omap/omap_uart",
+ "omap/omap44xx_id",
"omap/omap44xx_emif",
"omap/omap44xx_gpio",
"omap/omap44xx_sysctrl_padconf_core",
architectures = [ "armv7-m" ],
assemblyFiles = [ "arch/armv7-m/boot.S",
"arch/armv7-m/exceptions.S" ],
- cFiles = [
- "arch/armv7-m/exec.c",
- "arch/arm/misc.c",
- "arch/armv7-m/exn.c",
+ cFiles = [
+ "arch/arm/misc.c",
"arch/arm/phys_mmap.c",
- "arch/armv7/kludges.c",
- "arch/armv7/multiboot.c",
- "arch/armv7/syscall.c",
- "arch/armv7-m/init.c",
- "arch/armv7-m/omap.c",
- "arch/armv7-m/paging.c",
- "arch/omap44xx/startup_arch.c",
- "arch/omap44xx/omap_uart.c",
--- "arch/omap44xx/start_aps.c",
- "arch/armv7/kputchar.c",
+ "arch/arm/syscall.c",
+ "arch/armv7/kludges.c",
+ "arch/armv7/multiboot.c",
+ "arch/armv7-m/exec.c",
+ "arch/armv7-m/exn.c",
+ "arch/armv7-m/init.c",
+ "arch/armv7-m/omap.c",
+ "arch/armv7-m/paging.c",
+ "arch/omap44xx/startup_arch.c",
+ "arch/omap44xx/omap_uart.c",
+-- "arch/omap44xx/start_aps.c",
+ "arch/armv7/kputchar.c",
"arch/omap44xx/spinlock.c"
],
- mackerelDevices = [ "arm",
- "omap/omap44xx_cortex_m3_nvic",
- "omap/omap_uart",
- "omap/omap44xx_id",
+ mackerelDevices = [ "arm",
+ "omap/omap44xx_cortex_m3_nvic",
+ "omap/omap_uart",
+ "omap/omap44xx_id",
"omap/omap44xx_emif",
"omap/omap44xx_gpio",
"omap/omap44xx_mmu",
[IRQTableCmd_Delete] = handle_irq_table_delete,
},
[ObjType_Kernel] = {
- [KernelCmd_Get_core_id] = monitor_get_core_id,
- [KernelCmd_Get_arch_id] = monitor_get_arch_id,
- [KernelCmd_Register] = monitor_handle_register,
- [KernelCmd_Create_cap] = monitor_create_cap,
- [KernelCmd_Remote_cap] = monitor_remote_cap,
- [KernelCmd_Spawn_core] = monitor_spawn_core,
- [KernelCmd_Identify_cap] = monitor_identify_cap,
+ [KernelCmd_Cap_has_relations] = monitor_cap_has_relations,
+ [KernelCmd_Clear_step] = monitor_handle_clear_step,
+ [KernelCmd_Copy_existing] = monitor_copy_existing,
+ [KernelCmd_Create_cap] = monitor_create_cap,
+ [KernelCmd_Delete_foreigns] = monitor_handle_delete_foreigns,
+ [KernelCmd_Delete_last] = monitor_handle_delete_last,
+ [KernelCmd_Delete_step] = monitor_handle_delete_step,
+ [KernelCmd_Domain_Id] = monitor_handle_domain_id,
+ [KernelCmd_Get_arch_id] = monitor_get_arch_id,
+ [KernelCmd_Get_cap_owner] = monitor_get_cap_owner,
+ [KernelCmd_Get_core_id] = monitor_get_core_id,
+ [KernelCmd_Has_descendants] = monitor_handle_has_descendants,
+ [KernelCmd_Identify_cap] = monitor_identify_cap,
+ [KernelCmd_Identify_domains_cap] = monitor_identify_domains_cap,
+ [KernelCmd_Lock_cap] = monitor_lock_cap,
+ [KernelCmd_Nullify_cap] = monitor_nullify_cap,
+ [KernelCmd_Register] = monitor_handle_register,
+ [KernelCmd_Remote_relations] = monitor_remote_relations,
+ [KernelCmd_Retype] = monitor_handle_retype,
+ [KernelCmd_Revoke_mark_relations] = monitor_handle_revoke_mark_rels,
+ [KernelCmd_Revoke_mark_target] = monitor_handle_revoke_mark_tgt,
+ [KernelCmd_Set_cap_owner] = monitor_set_cap_owner,
+ //[KernelCmd_Setup_trace] = handle_trace_setup,
+ [KernelCmd_Spawn_core] = monitor_spawn_core,
+ [KernelCmd_Unlock_cap] = monitor_unlock_cap,
},
+ [ObjType_IPI] = {
+ [IPICmd_Send_Start] = monitor_spawn_core,
+ },
[ObjType_ID] = {
[IDCmd_Identify] = handle_idcap_identify
}
#include <exec.h>
#include <stdio.h>
#include <syscall.h>
+#include <arch/arm/syscall_arm.h>
+ #include <kcb.h>
#include <arch/armv7/start_aps.h>
- /*
- * Interrupt controller (Cortex-A9 MPU INTC) with up to 128 interrupt requests
- */
- #define NUM_INTR (128+32)
-
- /// Size of hardware IRQ dispatch table == #NIDT - #NEXCEPTIONS exceptions
- #define NDISPATCH (NUM_INTR)
#define GIC_IRQ_PRIO_LOWEST (0xF)
#define GIC_IRQ_CPU_TRG_ALL (0x3) // For two cores on the PandaBoard
{
capaddr_t cptr = args[0];
int bits = args[1];
-
- TRACE(KERNEL, SC_DELETE, 0);
- struct sysret sr = sys_delete(root, cptr, bits, from_monitor);
- TRACE(KERNEL, SC_DELETE, 1);
-
- return sr;
+ return sys_delete(root, cptr, bits);
}
-static struct sysret handle_delete(struct capability *root,
+static struct sysret handle_revoke(struct capability *root,
- int cmd, uintptr_t *args)
+ int cmd, uintptr_t *args)
{
- return handle_delete_common(root, args, false);
-}
-
-
-static struct sysret handle_revoke_common(struct capability *root,
- uintptr_t *args,
- bool from_monitor)
-{
capaddr_t cptr = args[0];
int bits = args[1];
-
- TRACE(KERNEL, SC_REVOKE, 0);
- struct sysret sr = sys_revoke(root, cptr, bits, from_monitor);
- TRACE(KERNEL, SC_REVOKE, 1);
-
- return sr;
+ return sys_revoke(root, cptr, bits);
}
-static struct sysret handle_revoke(struct capability *root,
- int cmd, uintptr_t *args)
+static struct sysret handle_get_state(struct capability *root,
+ int cmd, uintptr_t *args)
{
- return handle_revoke_common(root, args, false);
+ capaddr_t cptr = args[0];
+ int bits = args[1];
+ return sys_get_state(root, cptr, bits);
}
-
static struct sysret handle_unmap(struct capability *pgtable,
int cmd, uintptr_t *args)
{
#include <offsets.h>
#include <capabilities.h>
#include <cap_predicates.h>
+#include <distcaps.h>
#include <dispatch.h>
+ #include <kcb.h>
#include <paging_kernel_arch.h>
#include <mdb/mdb.h>
#include <mdb/mdb_tree.h>
}
/**
+ * \brief Initialize the objects for which local caps are about to be created.
+ *
+ * For the meaning of the parameters, see the 'caps_create' function.
+ */
- STATIC_ASSERT(ObjType_Num == 25, "Knowledge of all cap types");
++STATIC_ASSERT(ObjType_Num == 27, "Knowledge of all cap types");
+
+static errval_t caps_init_objects(enum objtype type, lpaddr_t lpaddr, uint8_t
+ bits, uint8_t objbits, size_t numobjs)
+{
+ // Virtual address of the memory the kernel object resides in
+ // XXX: A better of doing this,
+ // this is creating caps that the kernel cannot address.
+ // It assumes that the cap is not of the type which will have to zeroed out.
+ lvaddr_t lvaddr;
+ if(lpaddr < PADDR_SPACE_LIMIT) {
+ lvaddr = local_phys_to_mem(lpaddr);
+ } else {
+ lvaddr = 0;
+ }
+
+ switch (type) {
+
+ case ObjType_Frame:
+ trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 1);
+ // XXX: SCC hack, while we don't have a devframe allocator
+ if(lpaddr + ((lpaddr_t)1 << bits) < PADDR_SPACE_LIMIT) {
+ memset((void*)lvaddr, 0, (lvaddr_t)1 << bits);
+ } else {
+ printk(LOG_WARN, "Allocating RAM at 0x%" PRIxLPADDR
+ " uninitialized\n", lpaddr);
+ }
+ trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 0);
+ break;
+
+ case ObjType_CNode:
+ case ObjType_VNode_ARM_l1:
+ case ObjType_VNode_ARM_l2:
+ case ObjType_VNode_x86_32_ptable:
+ case ObjType_VNode_x86_32_pdir:
+ case ObjType_VNode_x86_32_pdpt:
+ case ObjType_VNode_x86_64_ptable:
+ case ObjType_VNode_x86_64_pdir:
+ case ObjType_VNode_x86_64_pdpt:
+ case ObjType_VNode_x86_64_pml4:
+ case ObjType_Dispatcher:
- trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 1);
++ case ObjType_KernelControlBlock:
++ TRACE(KERNEL, BZERO, 1);
+ memset((void*)lvaddr, 0, 1UL << bits);
- trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 0);
++ TRACE(KERNEL, BZERO, 0);
+ break;
+
+ default:
+ break;
+
+ }
+
+ return SYS_ERR_OK;
+}
+
+/**
* \brief Create capabilities to kernel objects.
*
* This function creates kernel objects of 'type' into the memory
*/
// If you create more capability types you need to deal with them
// in the table below.
- STATIC_ASSERT(ObjType_Num == 25, "Knowledge of all cap types");
+ STATIC_ASSERT(27 == ObjType_Num, "Knowledge of all cap types");
static errval_t caps_create(enum objtype type, lpaddr_t lpaddr, uint8_t bits,
- uint8_t objbits, size_t numobjs,
+ uint8_t objbits, size_t numobjs, coreid_t owner,
struct cte *dest_caps)
{
errval_t err;
printk(LOG_WARN, "Allocating RAM at 0x%" PRIxLPADDR
" uninitialized\n", lpaddr);
}
- trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 0);
+ TRACE(KERNEL, BZERO, 0);
- for(size_t i = 0; i < numobjs; i++) {
+ for(dest_i = 0; dest_i < numobjs; dest_i++) {
// Initialize type specific fields
- src_cap.u.frame.base = genpaddr + i * ((genpaddr_t)1 << objbits);
+ src_cap.u.frame.base = genpaddr + dest_i * ((genpaddr_t)1 << objbits);
src_cap.u.frame.bits = objbits;
// Insert the capabilities
- err = set_cap(&dest_caps[i].cap, &src_cap);
+ err = set_cap(&dest_caps[dest_i].cap, &src_cap);
if (err_is_fail(err)) {
- return err;
+ break;
}
}
- return SYS_ERR_OK;
+ break;
case ObjType_PhysAddr:
- for(size_t i = 0; i < numobjs; i++) {
+ for(dest_i = 0; dest_i < numobjs; dest_i++) {
// Initialize type specific fields
- src_cap.u.physaddr.base = genpaddr + i * ((genpaddr_t)1 << objbits);
+ src_cap.u.physaddr.base = genpaddr + dest_i * ((genpaddr_t)1 << objbits);
src_cap.u.physaddr.bits = objbits;
// Insert the capabilities
- err = set_cap(&dest_caps[i].cap, &src_cap);
+ err = set_cap(&dest_caps[dest_i].cap, &src_cap);
if (err_is_fail(err)) {
- return err;
+ break;
}
}
- return SYS_ERR_OK;
+ break;
+
case ObjType_RAM:
- for(size_t i = 0; i < numobjs; i++) {
+ for(dest_i = 0; dest_i < numobjs; dest_i++) {
// Initialize type specific fields
- src_cap.u.ram.base = genpaddr + i * ((genpaddr_t)1 << objbits);
+ src_cap.u.ram.base = genpaddr + dest_i * ((genpaddr_t)1 << objbits);
src_cap.u.ram.bits = objbits;
// Insert the capabilities
- err = set_cap(&dest_caps[i].cap, &src_cap);
+ err = set_cap(&dest_caps[dest_i].cap, &src_cap);
if (err_is_fail(err)) {
- return err;
+ break;
}
}
- return SYS_ERR_OK;
+ break;
+
case ObjType_DevFrame:
- for(size_t i = 0; i < numobjs; i++) {
+ for(dest_i = 0; dest_i < numobjs; dest_i++) {
// Initialize type specific fields
- src_cap.u.devframe.base = genpaddr + i * ((genpaddr_t)1 << objbits);
+ src_cap.u.devframe.base = genpaddr + dest_i * ((genpaddr_t)1 << objbits);
src_cap.u.devframe.bits = objbits;
// Insert the capabilities
- err = set_cap(&dest_caps[i].cap, &src_cap);
+ err = set_cap(&dest_caps[dest_i].cap, &src_cap);
if (err_is_fail(err)) {
- return err;
+ break;
}
}
- return SYS_ERR_OK;
+ break;
case ObjType_CNode:
assert((1UL << OBJBITS_CTE) >= sizeof(struct cte));
- trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 1);
+ TRACE(KERNEL, BZERO, 1);
memset((void*)lvaddr, 0, 1UL << bits);
- trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 0);
+ TRACE(KERNEL, BZERO, 0);
- for(size_t i = 0; i < numobjs; i++) {
+ for(dest_i = 0; dest_i < numobjs; dest_i++) {
// Initialize type specific fields
src_cap.u.cnode.cnode =
- lpaddr + i * ((lpaddr_t)1 << (objbits + OBJBITS_CTE));
+ lpaddr + dest_i * ((lpaddr_t)1 << (objbits + OBJBITS_CTE));
src_cap.u.cnode.bits = objbits;
src_cap.u.cnode.guard = 0;
src_cap.u.cnode.guard_size = 0;
{
size_t objbits_vnode = vnode_objbits(type);
- trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 1);
+ TRACE(KERNEL, BZERO, 1);
memset((void*)lvaddr, 0, 1UL << bits);
- trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 0);
+ TRACE(KERNEL, BZERO, 0);
- for(size_t i = 0; i < numobjs; i++) {
+ for(dest_i = 0; dest_i < numobjs; dest_i++) {
// Initialize type specific fields
src_cap.u.vnode_arm_l1.base =
- genpaddr + i * ((genpaddr_t)1 << objbits_vnode);
+ genpaddr + dest_i * ((genpaddr_t)1 << objbits_vnode);
#ifdef __arm__
// Insert kernel/mem mappings into new table.
{
size_t objbits_vnode = vnode_objbits(type);
- trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 1);
+ TRACE(KERNEL, BZERO, 1);
memset((void*)lvaddr, 0, 1UL << bits);
- trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 0);
+ TRACE(KERNEL, BZERO, 0);
- for(size_t i = 0; i < numobjs; i++) {
+ for(dest_i = 0; dest_i < numobjs; dest_i++) {
// Initialize type specific fields
src_cap.u.vnode_arm_l2.base =
- genpaddr + i * ((genpaddr_t)1 << objbits_vnode);
+ genpaddr + dest_i * ((genpaddr_t)1 << objbits_vnode);
// Insert the capability
- err = set_cap(&dest_caps[i].cap, &src_cap);
+ err = set_cap(&dest_caps[dest_i].cap, &src_cap);
if (err_is_fail(err)) {
- return err;
+ break;
}
}
- return SYS_ERR_OK;
+ break;
}
case ObjType_VNode_x86_32_ptable:
{
size_t objbits_vnode = vnode_objbits(type);
- trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 1);
+ TRACE(KERNEL, BZERO, 1);
memset((void*)lvaddr, 0, 1UL << bits);
- trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 0);
+ TRACE(KERNEL, BZERO, 0);
- for(size_t i = 0; i < numobjs; i++) {
+ for(dest_i = 0; dest_i < numobjs; dest_i++) {
// Initialize type specific fields
src_cap.u.vnode_x86_32_ptable.base =
- genpaddr + i * ((genpaddr_t)1 << objbits_vnode);
+ genpaddr + dest_i * ((genpaddr_t)1 << objbits_vnode);
// Insert the capability
- err = set_cap(&dest_caps[i].cap, &src_cap);
+ err = set_cap(&dest_caps[dest_i].cap, &src_cap);
if (err_is_fail(err)) {
- return err;
+ break;
}
}
- return SYS_ERR_OK;
+ break;
}
case ObjType_VNode_x86_32_pdir:
{
size_t objbits_vnode = vnode_objbits(type);
- trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 1);
+ TRACE(KERNEL, BZERO, 1);
memset((void*)lvaddr, 0, 1UL << bits);
- trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 0);
+ TRACE(KERNEL, BZERO, 0);
- for(size_t i = 0; i < numobjs; i++) {
+ for(dest_i = 0; dest_i < numobjs; dest_i++) {
// Initialize type specific fields
src_cap.u.vnode_x86_32_pdir.base =
- genpaddr + i * ((genpaddr_t)1 << objbits_vnode);
+ genpaddr + dest_i * ((genpaddr_t)1 << objbits_vnode);
#if defined(__i386__) && !defined(CONFIG_PAE)
// Make it a good PDE by inserting kernel/mem VSpaces
{
size_t objbits_vnode = vnode_objbits(type);
- trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 1);
+ TRACE(KERNEL, BZERO, 1);
memset((void*)lvaddr, 0, 1UL << bits);
- trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 0);
+ TRACE(KERNEL, BZERO, 0);
- for(size_t i = 0; i < numobjs; i++) {
+ for(dest_i = 0; dest_i < numobjs; dest_i++) {
// Initialize type specific fields
src_cap.u.vnode_x86_32_pdir.base =
- genpaddr + i * ((genpaddr_t)1 << objbits_vnode);
+ genpaddr + dest_i * ((genpaddr_t)1 << objbits_vnode);
#if defined(__i386__) && defined(CONFIG_PAE)
// Make it a good PDPTE by inserting kernel/mem VSpaces
{
size_t objbits_vnode = vnode_objbits(type);
- trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 1);
+ TRACE(KERNEL, BZERO, 1);
memset((void*)lvaddr, 0, 1UL << bits);
- trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 0);
+ TRACE(KERNEL, BZERO, 0);
- for(size_t i = 0; i < numobjs; i++) {
+ for(dest_i = 0; dest_i < numobjs; dest_i++) {
// Initialize type specific fields
src_cap.u.vnode_x86_64_ptable.base =
- genpaddr + i * ((genpaddr_t)1 << objbits_vnode);
+ genpaddr + dest_i * ((genpaddr_t)1 << objbits_vnode);
// Insert the capability
- err = set_cap(&dest_caps[i].cap, &src_cap);
+ err = set_cap(&dest_caps[dest_i].cap, &src_cap);
if (err_is_fail(err)) {
- return err;
+ break;
}
}
- return SYS_ERR_OK;
+ break;
}
case ObjType_VNode_x86_64_pdir:
{
size_t objbits_vnode = vnode_objbits(type);
- trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 1);
+ TRACE(KERNEL, BZERO, 1);
memset((void*)lvaddr, 0, 1UL << bits);
- trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 0);
+ TRACE(KERNEL, BZERO, 0);
- for(size_t i = 0; i < numobjs; i++) {
+ for(dest_i = 0; dest_i < numobjs; dest_i++) {
// Initialize type specific fields
src_cap.u.vnode_x86_64_pdir.base =
- genpaddr + i * ((genpaddr_t)1 << objbits_vnode);
+ genpaddr + dest_i * ((genpaddr_t)1 << objbits_vnode);
// Insert the capability
- err = set_cap(&dest_caps[i].cap, &src_cap);
+ err = set_cap(&dest_caps[dest_i].cap, &src_cap);
if (err_is_fail(err)) {
- return err;
+ break;
}
}
- return SYS_ERR_OK;
+ break;
}
case ObjType_VNode_x86_64_pdpt:
{
size_t objbits_vnode = vnode_objbits(type);
- trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 1);
+ TRACE(KERNEL, BZERO, 1);
memset((void*)lvaddr, 0, 1UL << bits);
- trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 0);
+ TRACE(KERNEL, BZERO, 0);
- for(size_t i = 0; i < numobjs; i++) {
+ for(dest_i = 0; dest_i < numobjs; dest_i++) {
// Initialize type specific fields
src_cap.u.vnode_x86_64_pdpt.base =
- genpaddr + i * ((genpaddr_t)1 << objbits_vnode);
+ genpaddr + dest_i * ((genpaddr_t)1 << objbits_vnode);
// Insert the capability
- err = set_cap(&dest_caps[i].cap, &src_cap);
+ err = set_cap(&dest_caps[dest_i].cap, &src_cap);
if (err_is_fail(err)) {
- return err;
+ break;
}
}
- return SYS_ERR_OK;
+ break;
}
case ObjType_VNode_x86_64_pml4:
{
size_t objbits_vnode = vnode_objbits(type);
- trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 1);
+ TRACE(KERNEL, BZERO, 1);
memset((void*)lvaddr, 0, 1UL << bits);
- trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 0);
+ TRACE(KERNEL, BZERO, 0);
- for(size_t i = 0; i < numobjs; i++) {
+ for(dest_i = 0; dest_i < numobjs; dest_i++) {
// Initialize type specific fields
src_cap.u.vnode_x86_64_pml4.base =
- genpaddr + i * ((genpaddr_t)1 << objbits_vnode);
+ genpaddr + dest_i * ((genpaddr_t)1 << objbits_vnode);
#ifdef __k1om__
lpaddr_t var = gen_phys_to_local_phys(src_cap.u.vnode_x86_64_pml4.base);
case ObjType_Dispatcher:
assert((1UL << OBJBITS_DISPATCHER) >= sizeof(struct dcb));
- trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 1);
+ TRACE(KERNEL, BZERO, 1);
memset((void*)lvaddr, 0, 1UL << bits);
- trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 0);
+ TRACE(KERNEL, BZERO, 0);
- for(size_t i = 0; i < numobjs; i++) {
+ for(dest_i = 0; dest_i < numobjs; dest_i++) {
// Initialize type specific fields
src_cap.u.dispatcher.dcb = (struct dcb *)
- (lvaddr + i * (1UL << OBJBITS_DISPATCHER));
+ (lvaddr + dest_i * (1UL << OBJBITS_DISPATCHER));
// Insert the capability
- err = set_cap(&dest_caps[i].cap, &src_cap);
+ err = set_cap(&dest_caps[dest_i].cap, &src_cap);
if (err_is_fail(err)) {
- return err;
+ break;
}
}
- return SYS_ERR_OK;
+ break;
case ObjType_ID:
// ID type does not refer to a kernel object
assert(numobjs == 1);
// Insert the capability
- return set_cap(&dest_caps->cap, &src_cap);
+ err = set_cap(&dest_caps->cap, &src_cap);
+ if (err_is_ok(err)) {
+ dest_i = 1;
+ }
+ break;
+ case ObjType_KernelControlBlock:
+ assert((1UL << OBJBITS_KCB) >= sizeof(struct dcb));
- TRACE(KERNEL, BZERO, 1);
- memset((void*)lvaddr, 0, 1UL << bits);
- TRACE(KERNEL, BZERO, 0);
+
+ for(size_t i = 0; i < numobjs; i++) {
+ // Initialize type specific fields
+ src_cap.u.kernelcontrolblock.kcb = (struct kcb *)
+ (lvaddr + i * (1UL << OBJBITS_DISPATCHER));
+ // Insert the capability
+ err = set_cap(&dest_caps[i].cap, &src_cap);
+ if (err_is_fail(err)) {
+ return err;
+ }
+ }
+ return SYS_ERR_OK;
+
default:
panic("Unhandled capability type or capability of this type cannot"
" be created");
// If lookup exceeded expected depth then table is malformed
if (bits_resolved > vbits) {
debug(SUBSYS_CAPS, "caps_lookup_slot: Lookup exceeded valid bits\n"
- "Cnode bits = %u, guard size = %u, valid bits = %u\n",
+ "Cnode bits = %u, guard size = %u, valid bits = %u, bits_resolved = %u\n",
cnode_cap->u.cnode.bits, cnode_cap->u.cnode.guard_size,
- vbits);
+ vbits, bits_resolved);
+ TRACE(KERNEL, CAP_LOOKUP_SLOT, 1);
return SYS_ERR_DEPTH_EXCEEDED;
}
*
* Used when sending capabilities across cores. The metadata is sent across
* cores and the receiving monitor can create the new capability on its core.
+ *
+ * \bug Does not check that supplied owner matches existing copies of cap.
*/
errval_t caps_create_from_existing(struct capability *root, capaddr_t cnode_cptr,
- int cnode_vbits, cslot_t dest_slot,
+ int cnode_vbits, cslot_t dest_slot, coreid_t owner,
struct capability *src)
{
+ TRACE(KERNEL, CAP_CREATE_FROM_EXISTING, 0);
errval_t err;
struct capability *cnode;
err = caps_lookup_cap(root, cnode_cptr, cnode_vbits, &cnode,
return err;
}
- set_init_mapping(dest, 1);
+ dest->mdbnode.owner = owner;
+
+ err = mdb_insert(dest);
+ assert(err_is_ok(err));
+
+ struct cte *neighbour = NULL;
+ if (!neighbour
+ && (neighbour = mdb_predecessor(dest))
+ && !is_copy(&dest->cap, &neighbour->cap))
+ {
+ neighbour = NULL;
+ }
+ if (!neighbour
+ && (neighbour = mdb_successor(dest))
+ && !is_copy(&dest->cap, &neighbour->cap))
+ {
+ neighbour = NULL;
+ }
+
+ if (neighbour) {
+ assert(!neighbour->mdbnode.in_delete);
+ assert(neighbour->mdbnode.owner == owner);
+#define CP_ATTR(a) dest->mdbnode.a = neighbour->mdbnode.a
+ CP_ATTR(locked);
+ CP_ATTR(remote_copies);
+ CP_ATTR(remote_ancs);
+ CP_ATTR(remote_descs);
+#undef CP_ATTR
+ }
+ else {
+ dest->mdbnode.locked = false;
+ if (owner != my_core_id) {
+ // For foreign caps it does not really matter if ancestors or
+ // descendants exist
+ dest->mdbnode.remote_copies = true;
+ dest->mdbnode.remote_ancs = false;
+ dest->mdbnode.remote_descs = false;
+ }
+ else {
+ // We just created a new copy of a owned capability from nothing.
+ // This is either caused by a retype, or by sharing a capability
+ // that does not care about locality.
+ // XXX: This should probably be done more explicitly -MN
+ if (distcap_needs_locality(dest->cap.type)) {
+ // Retype, so have ancestors and no descendants
+ dest->mdbnode.remote_copies = false;
+ dest->mdbnode.remote_ancs = true;
+ dest->mdbnode.remote_descs = false;
+ }
+ else {
+ dest->mdbnode.remote_copies = false;
+ dest->mdbnode.remote_ancs = false;
+ dest->mdbnode.remote_descs = false;
+ }
+ }
+ }
+
+ TRACE_CAP_MSG("created", dest);
+
+ TRACE(KERNEL, CAP_CREATE_FROM_EXISTING, 1);
return SYS_ERR_OK;
}
/// Create caps to new kernel objects.
errval_t caps_create_new(enum objtype type, lpaddr_t addr, size_t bits,
- size_t objbits, struct cte *caps)
+ size_t objbits, coreid_t owner, struct cte *caps)
{
+ TRACE(KERNEL, CAP_CREATE_NEW, 0);
/* Parameter checking */
assert(type != ObjType_EndPoint); // Cap of this type cannot be created
// Handle the mapping database
set_init_mapping(caps, numobjs);
+
+ TRACE_CAP_MSG("created", &caps[0]);
+
+ TRACE(KERNEL, CAP_CREATE_NEW, 1);
return SYS_ERR_OK;
}
mdb_insert(&dest_cte[i]);
}
+#ifdef TRACE_PMEM_CAPS
+ for (size_t i = 0; i < numobjs; i++) {
+ TRACE_CAP_MSG("created", &dest_cte[i]);
+ }
+#endif
+
+ TRACE(KERNEL, CAP_RETYPE, 1);
return SYS_ERR_OK;
}
#include <capabilities.h>
- struct sysret sys_monitor_spawn_core(coreid_t core_id, enum cpu_type cpu_type,
- genvaddr_t entry);
-
-
- //needed because to resume an interrupted IT block, there literally is only one way:
- //exiting handler mode, restoring the context
- //if the dispatcher has to restore a context with IT-bits set, it can only do so with help
- //from the kernel.
- //XXX: registers is an area in the userspace of the currently executing process,
- //it is NOT the set of arguments given to the syscall
- errval_t sys_resume_context(arch_registers_state_t* registers);
+ // needed because to resume an interrupted IT block, there literally is only one way:
+ // exiting handler mode, restoring the context
+ // if the dispatcher has to restore a context with IT-bits set, it can only do so with help
+ // from the kernel.
+ // XXX: registers is an area in the userspace of the currently executing process,
+ // it is NOT the set of arguments given to the syscall
+ errval_t sys_resume_context(arch_registers_state_t *registers);
-#endif // ARMV7_SYSCALL_H
+#endif // KERNEL_ARM_SYSCALL_H
+
errval_t caps_lookup_slot(struct capability *cnode_cap, capaddr_t cptr,
uint8_t vbits, struct cte **ret, CapRights rights);
+/*
+ * Delete and revoke
+ */
+
+errval_t caps_delete_last(struct cte *cte, struct cte *ret_ram_cap);
+errval_t caps_delete_foreigns(struct cte *cte);
+errval_t caps_mark_revoke(struct capability *base, struct cte *revoked);
+errval_t caps_delete_step(struct cte *ret_next);
+errval_t caps_clear_step(struct cte *ret_ram_cap);
+errval_t caps_delete(struct cte *cte);
+errval_t caps_revoke(struct cte *cte);
+
+/*
+ * Cap tracing
+ */
+
+#ifdef TRACE_PMEM_CAPS
- STATIC_ASSERT(ObjType_Num == 25, "knowledge of all cap types");
++STATIC_ASSERT(ObjType_Num == 27, "knowledge of all cap types");
+#define ALL_PMEM_TYPES \
+ ((1ul<<ObjType_RAM) | \
+ (1ul<<ObjType_Frame) | \
+ (1ul<<ObjType_DevFrame) | \
+ (1ul<<ObjType_CNode) | \
+ (1ul<<ObjType_FCNode) | \
+ (1ul<<ObjType_VNode_x86_64_pml4) | \
+ (1ul<<ObjType_VNode_x86_64_pdpt) | \
+ (1ul<<ObjType_VNode_x86_64_pdir) | \
+ (1ul<<ObjType_VNode_x86_64_ptable) | \
+ (1ul<<ObjType_VNode_x86_32_pdpt) | \
+ (1ul<<ObjType_VNode_x86_32_pdir) | \
+ (1ul<<ObjType_VNode_x86_32_ptable) | \
+ (1ul<<ObjType_VNode_ARM_l1) | \
+ (1ul<<ObjType_VNode_ARM_l2) | \
- (1ul<<ObjType_PhysAddr))
++ (1ul<<ObjType_PhysAddr) | \
++ (1ul<<ObjType_KernelControlBlock))
+
+//#define TRACE_TYPES_ENABLED_INITIAL 0x0
+#define TRACE_TYPES_ENABLED_INITIAL ALL_PMEM_TYPES
+#define TRACE_PMEM_BEGIN_INITIAL 0x0
+#define TRACE_PMEM_SIZE_INITIAL (~(uint32_t)0)
+
+extern uint64_t trace_types_enabled;
+extern genpaddr_t TRACE_PMEM_BEGIN;
+extern gensize_t TRACE_PMEM_SIZE;
+void caps_trace_ctrl(uint64_t types, genpaddr_t start, gensize_t size);
+static inline bool caps_should_trace(struct capability *cap)
+{
+ if (!(trace_types_enabled & (1ul<<cap->type))) {
+ return false;
+ }
+ if (!(ALL_PMEM_TYPES & (1ul<<cap->type))) {
+ return true;
+ }
+ genpaddr_t begin = get_address(cap);
+ gensize_t size = get_size(cap);
+ genpaddr_t end = begin+size;
+ return (begin < TRACE_PMEM_BEGIN && end > TRACE_PMEM_BEGIN)
+ || (begin >= TRACE_PMEM_BEGIN && begin < (TRACE_PMEM_BEGIN+TRACE_PMEM_SIZE));
+}
+#define TRACE_CAP_MSG(msg, trace_cte) do { \
+ struct cte *__tmp_cte = (trace_cte); \
+ if (__tmp_cte && caps_should_trace(&__tmp_cte->cap)) { \
+ caps_trace(__func__, __LINE__, __tmp_cte, (msg)); \
+ } \
+} while (0)
+#define TRACE_CAP(trace_cte) TRACE_CAP_MSG(NULL, trace_cte)
+#else
+#define TRACE_CAP_MSG(msg, trace_cte) ((void)0)
+#define TRACE_CAP(trace_cte) ((void)0)
+#endif
+
#endif
sys_dispatcher_setup_guest (struct capability *to,
capaddr_t epp, capaddr_t vnodep,
capaddr_t vmcbp, capaddr_t ctrlp);
-struct sysret sys_monitor_domain_id(capaddr_t cptr, domainid_t domain_id);
struct sysret sys_trace_setup(struct capability *cap, capaddr_t cptr);
struct sysret sys_idcap_identify(struct capability *cap, idcap_id_t *id);
+ struct sysret sys_monitor_spawn_core(coreid_t core_id, enum cpu_type cpu_type,
+ genvaddr_t entry);
+
+ struct sysret sys_kernel_add_kcb(struct kcb* new_kcb);
+ struct sysret sys_kernel_remove_kcb(struct kcb* kcb_addr);
+ struct sysret sys_kernel_suspend_kcb_sched(bool toggle);
+ struct sysret sys_handle_kcb_identify(struct capability* to);
+/*
+ * Monitor syscalls
+ */
+
+struct sysret sys_monitor_register(capaddr_t ep_caddr);
+struct sysret sys_monitor_domain_id(capaddr_t cptr, domainid_t domain_id);
+struct sysret sys_monitor_remote_relations(capaddr_t root_addr, uint8_t root_bits,
+ capaddr_t cptr, uint8_t bits,
+ uint8_t relations, uint8_t mask);
+struct sysret sys_monitor_identify_cap(struct capability *root,
+ capaddr_t cptr, uint8_t bits,
+ struct capability *retbuf);
+struct sysret sys_monitor_nullify_cap(capaddr_t cptr, uint8_t bits);
+struct sysret sys_get_cap_owner(capaddr_t root_addr, uint8_t root_bits,
+ capaddr_t cptr, uint8_t bits);
+struct sysret sys_set_cap_owner(capaddr_t root_addr, uint8_t root_bits,
+ capaddr_t cptr, uint8_t bits, coreid_t owner);
+struct sysret sys_cap_has_relations(capaddr_t caddr, uint8_t vbits, uint8_t mask);
+struct sysret sys_lock_cap(capaddr_t root_addr, uint8_t root_bits,
+ capaddr_t cptr, uint8_t bits);
+struct sysret sys_unlock_cap(capaddr_t root_addr, uint8_t root_bits,
+ capaddr_t cptr, uint8_t bits);
+struct sysret sys_monitor_copy_existing(struct capability *src,
+ capaddr_t cnode_cptr,
+ uint8_t cnode_vbits,
+ cslot_t slot);
+
+/*
+ * Monitor syscalls for delete & revoke
+ */
+
+struct sysret sys_monitor_delete_last(capaddr_t root_addr, uint8_t root_bits,
+ capaddr_t target_addr, uint8_t target_bits,
+ capaddr_t ret_cn_addr, uint8_t ret_cn_bits,
+ cslot_t ret_slot);
+struct sysret sys_monitor_delete_foreigns(capaddr_t cptr, uint8_t bits);
+struct sysret sys_monitor_revoke_mark_tgt(capaddr_t root_addr,
+ uint8_t root_bits,
+ capaddr_t target_addr,
+ uint8_t target_bits);
+struct sysret sys_monitor_revoke_mark_rels(struct capability *base);
+struct sysret sys_monitor_delete_step(capaddr_t ret_cn_addr,
+ uint8_t ret_cn_bits,
+ cslot_t ret_slot);
+struct sysret sys_monitor_clear_step(capaddr_t ret_cn_addr,
+ uint8_t ret_cn_bits,
+ cslot_t ret_slot);
+
#endif
/* Set up root cnode and the caps it contains */
// must be static, because this CTE will be entered into the MDB!
- static struct cte rootcn;
+ // don't want this to be static, as the memory backing the data section of
+ // the kernel can and will disappear when we reboot a core with a
+ // different kernel but want to restore the state
+ struct cte *rootcn = &kcb_current->init_rootcn;
+ mdb_init(kcb_current);
+ kcb_current->is_valid = true;
+ #if defined(CONFIG_SCHEDULER_RR)
+ kcb_current->sched = SCHED_RR;
+ #elif defined(CONFIG_SCHEDULER_RBED)
+ kcb_current->sched = SCHED_RBED;
+ #else
+ #error invalid scheduler
+ #endif
+
err = caps_create_new(ObjType_CNode, alloc_phys(BASE_PAGE_SIZE),
- BASE_PAGE_BITS, DEFAULT_CNODE_BITS, rootcn);
+ BASE_PAGE_BITS, DEFAULT_CNODE_BITS, my_core_id,
- &rootcn);
++ rootcn);
assert(err_is_ok(err));
// Task cnode in root cnode
- st->taskcn = caps_locate_slot(CNODE(&rootcn), ROOTCN_SLOT_TASKCN);
+ st->taskcn = caps_locate_slot(CNODE(rootcn), ROOTCN_SLOT_TASKCN);
err = caps_create_new(ObjType_CNode, alloc_phys(BASE_PAGE_SIZE),
- BASE_PAGE_BITS, DEFAULT_CNODE_BITS, st->taskcn);
+ BASE_PAGE_BITS, DEFAULT_CNODE_BITS, my_core_id,
+ st->taskcn);
assert(err_is_ok(err));
st->taskcn->cap.u.cnode.guard_size = GUARD_REMAINDER(2 * DEFAULT_CNODE_BITS);
// Page cnode in root cnode
- st->pagecn = caps_locate_slot(CNODE(&rootcn), ROOTCN_SLOT_PAGECN);
+ st->pagecn = caps_locate_slot(CNODE(rootcn), ROOTCN_SLOT_PAGECN);
err = caps_create_new(ObjType_CNode,
alloc_phys(1UL << (OBJBITS_CTE + PAGE_CNODE_BITS)),
- PAGE_CNODE_BITS + OBJBITS_CTE,
- PAGE_CNODE_BITS, st->pagecn);
+ PAGE_CNODE_BITS + OBJBITS_CTE, PAGE_CNODE_BITS,
+ my_core_id, st->pagecn);
assert(err_is_ok(err));
// Base page cnode in root cnode
- st->basepagecn = caps_locate_slot(CNODE(&rootcn), ROOTCN_SLOT_BASE_PAGE_CN);
+ st->basepagecn = caps_locate_slot(CNODE(rootcn), ROOTCN_SLOT_BASE_PAGE_CN);
err = caps_create_new(ObjType_CNode, alloc_phys(BASE_PAGE_SIZE),
- BASE_PAGE_BITS, DEFAULT_CNODE_BITS, st->basepagecn);
+ BASE_PAGE_BITS, DEFAULT_CNODE_BITS, my_core_id,
+ st->basepagecn);
assert(err_is_ok(err));
// Super cnode in root cnode
assert(err_is_ok(err));
// Seg cnode in root cnode
- st->segcn = caps_locate_slot(CNODE(&rootcn), ROOTCN_SLOT_SEGCN);
+ st->segcn = caps_locate_slot(CNODE(rootcn), ROOTCN_SLOT_SEGCN);
err = caps_create_new(ObjType_CNode, alloc_phys(BASE_PAGE_SIZE),
- BASE_PAGE_BITS, DEFAULT_CNODE_BITS, st->segcn);
+ BASE_PAGE_BITS, DEFAULT_CNODE_BITS, my_core_id,
+ st->segcn);
assert(err_is_ok(err));
// Physaddr cnode in root cnode
- st->physaddrcn = caps_locate_slot(CNODE(&rootcn), ROOTCN_SLOT_PACN);
+ st->physaddrcn = caps_locate_slot(CNODE(rootcn), ROOTCN_SLOT_PACN);
err = caps_create_new(ObjType_CNode,
- alloc_phys(1UL << (OBJBITS_CTE + PHYSADDRCN_BITS)),
- OBJBITS_CTE + PHYSADDRCN_BITS,
- PHYSADDRCN_BITS, st->physaddrcn);
+ alloc_phys(1UL << (OBJBITS_CTE + PHYSADDRCN_BITS)),
+ OBJBITS_CTE + PHYSADDRCN_BITS, PHYSADDRCN_BITS,
+ my_core_id, st->physaddrcn);
assert(err_is_ok(err));
if (arch_core_is_bsp()) {
// Cnode for Boot loaded modules
- st->modulecn = caps_locate_slot(CNODE(&rootcn), ROOTCN_SLOT_MODULECN);
+ st->modulecn = caps_locate_slot(CNODE(rootcn), ROOTCN_SLOT_MODULECN);
err = caps_create_new(ObjType_CNode,
alloc_phys(1UL << (OBJBITS_CTE + MODULECN_SIZE_BITS)),
- MODULECN_SIZE_BITS + OBJBITS_CTE, MODULECN_SIZE_BITS,
- st->modulecn);
+ MODULECN_SIZE_BITS + OBJBITS_CTE,
+ MODULECN_SIZE_BITS, my_core_id, st->modulecn);
assert(err_is_ok(err));
}
caps_locate_slot(CNODE(st->taskcn), TASKCN_SLOT_IRQ));
assert(err_is_ok(err));
+ // Create capability for IPI sending
+ struct cte *ipicap_cte = caps_locate_slot(CNODE(st->taskcn),
+ TASKCN_SLOT_IPI);
- err = caps_create_new(ObjType_IPI, 0, 0, 0, ipicap_cte);
++ err = caps_create_new(ObjType_IPI, 0, 0, 0, my_core_id, ipicap_cte);
+ assert(err_is_ok(err));
+
/* Initialize dispatcher */
dispatcher_handle_t init_handle
= local_phys_to_mem(init_dispframe_cte->cap.u.frame.base);
#include <barrelfish_kpi/syscalls.h>
#include <capabilities.h>
#include <cap_predicates.h>
+ #include <coreboot.h>
#include <mdb/mdb.h>
+#include <mdb/mdb_tree.h>
+#include <cap_predicates.h>
#include <dispatch.h>
+#include <distcaps.h>
#include <wakeup.h>
#include <paging_kernel_helper.h>
#include <paging_kernel_arch.h>
return SYSRET(SYS_ERR_OK);
}
+
+ /**
+ * Calls correct handler function to spawn an app core.
+ *
+ * At the moment spawn_core_handlers is set-up per
+ * architecture inside text_init() usually found in init.c.
+ *
+ * \note Generally the x86 terms of BSP and APP core are used
+ * throughout Barrelfish to distinguish between bootstrap core (BSP)
+ * and application cores (APP).
+ *
+ * \param core_id Identifier of the core which we want to boot
+ * \param cpu_type Architecture of the core.
+ * \param entry Entry point for code to start execution.
+ *
+ * \retval SYS_ERR_OK Core successfully booted.
+ * \retval SYS_ERR_ARCHITECTURE_NOT_SUPPORTED No handler registered for
+ * the specified cpu_type.
+ * \retval SYS_ERR_CORE_NOT_FOUND Core failed to boot.
+ */
+ struct sysret sys_monitor_spawn_core(coreid_t core_id, enum cpu_type cpu_type,
+ genvaddr_t entry)
+ {
+ assert(cpu_type < CPU_TYPE_NUM);
+ // TODO(gz): assert core_id valid
+ // TODO(gz): assert entry range?
+
+ if (cpu_type < CPU_TYPE_NUM &&
+ coreboot_get_spawn_handler(cpu_type) == NULL) {
+ assert(!"Architecture not supported -- " \
+ "or you failed to register spawn handler?");
+ return SYSRET(SYS_ERR_ARCHITECTURE_NOT_SUPPORTED);
+ }
+
+ int r = (coreboot_get_spawn_handler(cpu_type))(core_id, entry);
+ if (r != 0) {
+ return SYSRET(SYS_ERR_CORE_NOT_FOUND);
+ }
+
+ return SYSRET(SYS_ERR_OK);
+ }
+
+ struct sysret sys_kernel_add_kcb(struct kcb *new_kcb)
+ {
+ kcb_add(new_kcb);
+
+ // update kernel_now offset
+ new_kcb->kernel_off -= kernel_now;
+ // reset scheduler statistics
+ scheduler_reset_time();
+ // update current core id of all domains
+ kcb_update_core_id(new_kcb);
+ // upcall domains with registered interrupts to tell them to re-register
+ irq_table_notify_domains(new_kcb);
+
+ return SYSRET(SYS_ERR_OK);
+ }
+
+ struct sysret sys_kernel_remove_kcb(struct kcb * to_remove)
+ {
+ return SYSRET(kcb_remove(to_remove));
+ }
+
+ struct sysret sys_kernel_suspend_kcb_sched(bool suspend)
+ {
+ printk(LOG_NOTE, "in kernel_suspend_kcb_sched invocation!\n");
+ kcb_sched_suspended = suspend;
+ return SYSRET(SYS_ERR_OK);
+ }
+
+ struct sysret sys_handle_kcb_identify(struct capability* to)
+ {
+ // Return with physical base address of frame
+ // XXX: pack size into bottom bits of base address
+ assert(to->type == ObjType_KernelControlBlock);
+ lvaddr_t vkcb = (lvaddr_t) to->u.kernelcontrolblock.kcb;
+ assert((vkcb & BASE_PAGE_MASK) == 0);
+
+ return (struct sysret) {
+ .error = SYS_ERR_OK,
+ .value = mem_to_local_phys(vkcb) | OBJBITS_KCB,
+ };
-}
++}
// Use spawnd if spawned through spawnd
if(disp_get_domain_id() == 0) {
+#if 0 // XXX: revocation goes through the mon, but monitor ep is revoked in the process
- errval_t err = cap_revoke(cap_dispatcher);
+ err = cap_revoke(cap_dispatcher);
if (err_is_fail(err)) {
- sys_print("revoking dispatcher failed in _Exit, spinning!", 100);
+ DEBUG_ERR(err, "revoking dispatcher failed in _Exit, spinning!");
+ //sys_print("revoking dispatcher failed in _Exit, spinning!", 100);
while (1) {}
}
err = cap_delete(cap_dispatcher);
// PP switch to change behaviour if invariants fail
#ifdef MDB_FAIL_INVARIANTS
- #define X(i) #i,
- const char *mdb_invariants[] = {
- MDB_INVARIANTS
- };
- #undef X
// on failure, dump mdb and terminate
+__attribute__((noreturn))
static void
- mdb_dump_and_fail(struct cte *cte, enum mdb_inv failure)
-mdb_dump_and_fail(struct cte *cte, int failure)
++mdb_dump_and_fail(struct cte *cte, enum mdb_invariant failure)
{
mdb_dump(cte, 0);
- printf("failed on cte %p with failure %d\n", cte, failure);
+ printf("failed on cte %p with failure %s (%d)\n", cte,
- mdb_invariants[failure], failure);
++ mdb_invariant_to_str(failure), failure);
// XXX: what is "proper" way to always terminate?
+ //mdb_dump_all_the_things();
assert(false);
}
#define MDB_RET_INVARIANT(cte, failure) mdb_dump_and_fail(cte, failure)
} while (0)
#endif
- struct cte *mdb_root = NULL;
+ static struct cte *mdb_root = NULL;
+ #if IN_KERNEL
+ struct kcb *my_kcb = NULL;
+ #endif
+ static void set_root(struct cte *new_root)
+ {
+ mdb_root = new_root;
+ #if IN_KERNEL
+ my_kcb->mdb_root = (lvaddr_t) new_root;
+ #endif
+ }
+
+ /*
+ * (re)initialization
+ */
+ errval_t
+ mdb_init(struct kcb *k)
+ {
+ assert (k != NULL);
+ #if IN_KERNEL
+ #if 0
+ //XXX: write two versions of this; so we can have full sanity checks for
+ //all scenarios -SG
+ if (my_kcb) {
+ printf("MDB has non-null kcb.\n");
+ return CAPS_ERR_MDB_ALREADY_INITIALIZED;
+ }
+ #endif
+ my_kcb = k;
+ if (!my_kcb->is_valid) {
+ // empty kcb, do nothing
+ return SYS_ERR_OK;
+ }
+ #endif
+ // set root
+ mdb_root = (struct cte *)k->mdb_root;
+
+ #if 0
+ // always check invariants here
+ int i = mdb_check_invariants();
+ if (i) {
+ printf("mdb invariant %s violated\n", mdb_invariant_to_str(i));
+ mdb_dump_all_the_things();
+ mdb_root = NULL;
+ return CAPS_ERR_MDB_INVARIANT_VIOLATION;
+ }
+ #endif
+ return SYS_ERR_OK;
+ }
+
/*
* Debug printing.
*/
USER_PANIC_ERR(err, "Watching PCI devices.");
}
- // XXX: This is a bit silly, I add this record
- // because it was previously in spawnd so
- // there may be code out there who relies on this
- // It might be better to get rid of this completely
- err = oct_set("all_spawnds_up { iref: 0 }");
- assert(err_is_ok(err));
+ KALUGA_DEBUG("Kaluga: wait_for_all_spawnds\n");
+
+ err = wait_for_all_spawnds();
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "Unable to wait for spawnds failed.");
+ }
+
#elif __pandaboard__
- printf("Kaluga running on Pandaboard.\n");
+ debug_printf("Kaluga running on Pandaboard.\n");
err = init_cap_manager();
assert(err_is_ok(err));
*
* \bug Verify that cpu_type matches the elf image
*/
- void boot_core_request(struct monitor_binding *b, coreid_t id, int32_t hwid,
- int32_t int_cpu_type, char *cmdline)
+ void boot_core_request(struct monitor_binding *b, coreid_t id,
+ struct capref frame)
{
errval_t err;
- enum cpu_type cpu_type = (enum cpu_type)int_cpu_type;
- struct intermon_binding *new_binding = NULL;
-
- if (id == my_core_id) {
- err = MON_ERR_SAME_CORE;
- goto out;
- }
++ struct intermon_state *imon_st = NULL;
- if (cpu_type >= CPU_TYPE_NUM) {
- err = SPAWN_ERR_UNKNOWN_TARGET_ARCH;
- goto out;
+ struct intermon_binding *ibind;
+ err = intermon_binding_get(id, &ibind);
+ if (err_is_ok(err)) {
- ((struct intermon_state*)ibind->st)->originating_client = b;
++ imon_st = ibind->st;
++ imon_st->originating_client = b;
++ imon_st->capops_ready = false;
+ return;
}
- printf("Monitor %d: booting %s core %d as '%s'\n", my_core_id,
- cpu_type_to_archstr(cpu_type), id, cmdline);
- /* Assure memory server and chips have initialized */
- assert(mem_serv_iref != 0);
- assert(ramfs_serv_iref != 0);
- assert(name_serv_iref != 0);
- assert(monitor_mem_iref != 0);
+ // Setup new inter-monitor connection to ourselves
+ #ifdef CONFIG_FLOUNDER_BACKEND_UMP_IPI
+ struct intermon_ump_ipi_binding *ump_binding = malloc(sizeof(
+ struct intermon_ump_ipi_binding));
+ #else
+ struct intermon_ump_binding *ump_binding = malloc(sizeof(
+ struct intermon_ump_binding));
+ #endif
+ assert(ump_binding != NULL);
- err = spawn_xcore_monitor(id, hwid, cpu_type, cmdline, &new_binding);
- if(err_is_fail(err)) {
- err = err_push(err, MON_ERR_SPAWN_XCORE_MONITOR);
- goto out;
+ // map it in
+ void *buf;
+ err = vspace_map_one_frame(&buf, MON_URPC_SIZE, frame, NULL, NULL);
+ if (err_is_fail(err)) {
+ err = err_push(err, LIB_ERR_VSPACE_MAP);
+ goto cleanup;
}
- // setup new binding
- assert(new_binding != NULL);
- intermon_init(new_binding, id);
-
- // store client that requested the boot, so we can tell them when it completes
- struct intermon_state *st = new_binding->st;
- st->originating_client = b;
- st->capops_ready = false;
-
- out:
- free(cmdline);
-
- if (err_is_ok(err)) {
- num_monitors++;
- } else {
- DEBUG_CAPOPS("sending boot_core_reply: %s (%"PRIuERRV")\n",
- err_getstring(err), err);
- errval_t err2 = b->tx_vtbl.boot_core_reply(b, NOP_CONT, err);
- if (err_is_fail(err2)) {
- USER_PANIC_ERR(err2, "sending boot_core_reply failed");
- }
+ #ifdef CONFIG_FLOUNDER_BACKEND_UMP_IPI
+ // Get my arch ID
+ uintptr_t my_arch_id = 0;
+ err = invoke_monitor_get_arch_id(&my_arch_id);
+ assert(err == SYS_ERR_OK);
+
+ // Bootee's notify channel ID is always 1
+ struct capref notify_cap;
+ err = notification_create_cap(1, hwid, ¬ify_cap);
+ assert(err == SYS_ERR_OK);
+
+ // Allocate my own notification caps
+ struct capref ep, my_notify_cap;
+ struct lmp_endpoint *iep;
+ int chanid;
+ err = endpoint_create(LMP_RECV_LENGTH, &ep, &iep);
+ assert(err_is_ok(err));
+ err = notification_allocate(ep, &chanid);
+ assert(err == SYS_ERR_OK);
+ err = notification_create_cap(chanid, my_arch_id, &my_notify_cap);
+ assert(err == SYS_ERR_OK);
+
+ // init our end of the binding and channel
+ err = intermon_ump_ipi_init(ump_binding, get_default_waitset(),
+ buf, MON_URPC_CHANNEL_LEN,
+ buf + MON_URPC_CHANNEL_LEN,
+ MON_URPC_CHANNEL_LEN, notify_cap,
+ my_notify_cap, ep, iep);
+ #else
+ err = intermon_ump_init(ump_binding, get_default_waitset(),
+ buf, MON_URPC_CHANNEL_LEN,
+ (char *)buf + MON_URPC_CHANNEL_LEN,
+ MON_URPC_CHANNEL_LEN);
+ #endif
+ if (err_is_fail(err)) {
+ err = err_push(err, LIB_ERR_UMP_CHAN_BIND);
+ goto cleanup;
}
- }
-
- /**
- * \brief XXX: This is a hack. Currently, we must know when all cores
- * are booted so that the monitors can initialize with each other,
- * setup routing tables and synchronize clocks.
- */
- void boot_initialize_request(struct monitor_binding *st)
- {
- errval_t err;
- /* Wait for all monitors to initialize. */
- int num_connections = get_num_connections(num_monitors);
- while(num_connections > seen_connections) {
- // This waiting is fine, boot_manager will not send another msg
- // till it gets a reply from this.
- messages_wait_and_handle_next();
+ err = trace_ump_frame_identify(frame, ump_binding,
+ MON_URPC_CHANNEL_LEN);
+ if (err_is_fail(err)) {
+ goto cleanup;
}
- printf("all %d monitors up\n", num_monitors);
+ struct intermon_binding* ib = (struct intermon_binding*)ump_binding;
+ err = intermon_init(ib, id);
- ((struct intermon_state*)ib->st)->originating_client = b;
++ imon_st = ib->st;
++ imon_st->originating_client = b;
++ imon_st->capops_ready = false;
- #ifndef __scc__
- if(num_monitors > 1) {
- printf("monitor: synchronizing clocks\n");
- err = timing_sync_timer();
- assert(err_is_ok(err) || err_no(err) == SYS_ERR_SYNC_MISS);
- if(err_no(err) == SYS_ERR_SYNC_MISS) {
- printf("monitor: failed to sync clocks. Bad reference clock?\n");
- }
- }
- #endif
+ return;
- err = st->tx_vtbl.boot_initialize_reply(st, NOP_CONT);
+ cleanup:
if (err_is_fail(err)) {
- USER_PANIC_ERR(err, "boot_initialize_reply failed");
+ // Cleanup
+ DEBUG_ERR(err, "Failed to register intermon binding.");
+ cap_destroy(frame);
+ free(ump_binding);
}
+
+ errval_t err2 = b->tx_vtbl.boot_core_reply(b, NOP_CONT, err);
+ if (err_is_fail(err2)) {
+ USER_PANIC_ERR(err2, "sending boot_core_reply failed");
+ }
+ }
+ #else
+ void boot_core_request(struct monitor_binding *b, coreid_t id,
+ struct capref frame) {
+ printf("%s:%s:%d: unable to handle: boot_core_request\n",
+ __FILE__, __FUNCTION__, __LINE__);
}
+ #endif
--- /dev/null
+/*
+ * Copyright (c) 2012 ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#include <barrelfish/barrelfish.h>
+#include "capsend.h"
+#include "monitor.h"
+#include "magic.h"
+#include "capops.h"
+#include "internal.h"
+
+/*
+ * Single-cast {{{1
+ */
+
+errval_t
+capsend_target(coreid_t dest, struct msg_queue_elem *queue_elem)
+{
+ errval_t err;
+
+ // get destination intermon_binding and _state
+ struct intermon_binding *dest_b;
+ err = intermon_binding_get(dest, &dest_b);
+ if (err_is_fail(err)) {
+ return err;
+ }
+ DEBUG_CAPOPS("capsend_target: ->%d (%p)\n", dest, queue_elem);
+ struct intermon_state *inter_st = (struct intermon_state*)dest_b->st;
+ if (!inter_st->capops_ready) {
+ // XXX: custom error value
+ return MON_ERR_CAPOPS_BUSY;
+ }
+
+ // enqueue message
+ return intermon_enqueue_send(dest_b, &inter_st->queue, dest_b->waitset, queue_elem);
+}
+
+errval_t
+capsend_owner(struct domcapref capref, struct msg_queue_elem *queue_elem)
+{
+ errval_t err;
+
+ // read cap owner
+ coreid_t owner;
+ err = monitor_get_domcap_owner(capref, &owner);
+ if (err_is_fail(err)) {
+ return err;
+ }
+
+ // enqueue to owner
+ return capsend_target(owner, queue_elem);
+}
+
+/*
+ * Multicast helpers {{{1
+ */
+
+struct capsend_mc_msg_st;
+struct capsend_mc_st;
+
+typedef errval_t (*capsend_mc_send_cont_t)(struct intermon_binding*, struct capsend_mc_st*);
+
+struct capsend_mc_msg_st {
+ struct intermon_msg_queue_elem queue_elem;
+ struct capsend_mc_st *mc_st;
+ coreid_t dest;
+};
+
+static void
+capsend_mc_send_cont(struct intermon_binding *b, struct intermon_msg_queue_elem *e)
+{
+ struct capsend_mc_msg_st *msg_st = (struct capsend_mc_msg_st*)e;
+ struct capsend_mc_st *mc_st = msg_st->mc_st;
+ errval_t err = SYS_ERR_OK;
+
+ // if do_send is false, an error occured in the multicast setup, so do not
+ // send anything
+ if (mc_st->do_send) {
+ err = mc_st->send_fn(b, &mc_st->caprep, mc_st);
+ }
+
+ if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
+ err = capsend_target(msg_st->dest, (struct msg_queue_elem*)msg_st);
+ }
+
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "sending dequeued capops message");
+ }
+
+ // decrement counter of number of queued messages
+ if (!--mc_st->num_queued) {
+ // if counter is zero, cleanup outgoing memory
+ free(mc_st->msg_st_arr);
+ mc_st->msg_st_arr = NULL;
+ if (!mc_st->do_send || !mc_st->num_pending) {
+ // if the send has been aborted, also cleanup cross-call state
+ free(mc_st);
+ }
+ }
+}
+
+static errval_t
+capsend_mc_enqueue(struct capsend_mc_st *mc_st, coreid_t dest)
+{
+ errval_t err;
+
+ // get next msg_st
+ struct capsend_mc_msg_st *msg_st = &mc_st->msg_st_arr[mc_st->num_queued];
+ msg_st->queue_elem.cont = capsend_mc_send_cont;
+ msg_st->mc_st = mc_st;
+ msg_st->dest = dest;
+
+ err = capsend_target(dest, (struct msg_queue_elem*)msg_st);
+ if (err_is_ok(err)) {
+ // count successful enqueue
+ mc_st->num_queued++;
+ if (mc_st->num_pending >= 0) {
+ // also track number of pending exchanges if requested
+ mc_st->num_pending++;
+ }
+ }
+ return err;
+}
+
+static errval_t
+capsend_mc_init(struct capsend_mc_st *mc_st, struct capability *cap,
+ capsend_send_fn send_fn,
+ size_t num_dests, bool track_pending)
+{
+ mc_st->num_queued = 0;
+ mc_st->num_pending = track_pending ? 0 : -1;
+ mc_st->do_send = true;
+ mc_st->send_fn = send_fn;
+ if (cap) {
+ capability_to_caprep(cap, &mc_st->caprep);
+ }
+ mc_st->msg_st_arr = calloc(num_dests, sizeof(*mc_st->msg_st_arr));
+ if (!mc_st->msg_st_arr) {
+ return LIB_ERR_MALLOC_FAIL;
+ }
+ return SYS_ERR_OK;
+}
+
+bool capsend_handle_mc_reply(struct capsend_mc_st *st)
+{
+ // return true iff st->num_pending == 0 after acking one more reply
+ return --st->num_pending == 0;
+}
+
+/*
+ * Broadcast helpers {{{2
+ */
+
+static errval_t
+capsend_broadcast(struct capsend_mc_st *bc_st, struct capability *cap, capsend_send_fn send_cont)
+{
++ if (num_monitors == -1) {
++ USER_PANIC_ERR(ERR_NOTIMP,
++ "do not have number of online monitors, cannot do broadcast");
++ }
+ errval_t err;
+ // do not count self when calculating #dest cores
+ int dest_count = num_monitors - 1;
+ DEBUG_CAPOPS("%s: dest_count = %d\n", __FUNCTION__, dest_count);
+ DEBUG_CAPOPS("%s: num_queued = %d\n", __FUNCTION__, bc_st->num_queued);
+ DEBUG_CAPOPS("%s: num_pending = %d\n", __FUNCTION__, bc_st->num_pending);
+ err = capsend_mc_init(bc_st, cap, send_cont, dest_count, true);
+ if (err_is_fail(err)) {
+ free(bc_st);
+ }
+
+ for (coreid_t dest = 0; dest < MAX_COREID && bc_st->num_queued < dest_count; dest++)
+ {
+ if (dest == my_core_id) {
+ // do not send to self
+ continue;
+ }
+ err = capsend_mc_enqueue(bc_st, dest);
+ if (err_no(err) == MON_ERR_NO_MONITOR_FOR_CORE) {
+ // no connection for this core, skip
+ continue;
+ }
+ else if (err_is_fail(err)) {
+ // failure, disable broadcast
+ bc_st->do_send = false;
+ if (!bc_st->num_queued) {
+ // only cleanup of no messages have been enqueued
+ free(bc_st->msg_st_arr);
+ free(bc_st);
+ }
+ return err;
+ }
+ }
+
+ if (!bc_st->num_pending && dest_count > 1) {
+ // XXX: needs sane error -SG
+ return MON_ERR_NO_MONITOR_FOR_CORE;
+ }
+
+ return SYS_ERR_OK;
+}
+
+/*
+ * Find relations {{{1
+ */
+
+/*
+ * Find copies broadcast {{{2
+ */
+
+struct find_cap_broadcast_msg_st;
+
+struct find_cap_broadcast_st {
+ struct capsend_mc_st bc;
+ capsend_find_cap_result_fn result_handler;
+ bool found;
+ void *st;
+};
+
+static errval_t
+find_cap_broadcast_send_cont(struct intermon_binding *b, intermon_caprep_t *caprep, struct capsend_mc_st *st)
+{
+ return intermon_capops_find_cap__tx(b, NOP_CONT, *caprep, (uintptr_t)st);
+}
+
+errval_t
+capsend_find_cap(struct capability *cap, capsend_find_cap_result_fn result_handler, void *st)
+{
+ struct find_cap_broadcast_st *bc_st = calloc(1, sizeof(struct find_cap_broadcast_st));
+ if (!bc_st) {
+ return LIB_ERR_MALLOC_FAIL;
+ }
+ bc_st->result_handler = result_handler;
+ bc_st->found = false;
+ bc_st->st = st;
+
+ return capsend_broadcast((struct capsend_mc_st*)bc_st, cap, find_cap_broadcast_send_cont);
+}
+
+/*
+ * Find copies result {{{2
+ */
+
+struct find_cap_result_msg_st {
+ struct intermon_msg_queue_elem queue_elem;
+ errval_t result;
+ genvaddr_t st;
+};
+
+static void
+find_cap_result_send_cont(struct intermon_binding *b, struct intermon_msg_queue_elem *e)
+{
+ errval_t err;
+ struct find_cap_result_msg_st *msg_st = (struct find_cap_result_msg_st*)e;
+
+ err = intermon_capops_find_cap_result__tx(b, NOP_CONT, msg_st->result, msg_st->st);
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "failed to send find_cap_result message");
+ }
+ free(msg_st);
+}
+
+static errval_t
+find_cap_result(coreid_t dest, errval_t result, genvaddr_t st)
+{
+ errval_t err;
+ struct find_cap_result_msg_st *msg_st = calloc(1, sizeof(struct find_cap_result_msg_st));
+ if (!msg_st) {
+ return LIB_ERR_MALLOC_FAIL;
+ }
+ msg_st->queue_elem.cont = find_cap_result_send_cont;
+ msg_st->result = result;
+ msg_st->st = st;
+
+ err = capsend_target(dest, (struct msg_queue_elem*)msg_st);
+ if (err_is_fail(err)) {
+ free(msg_st);
+ }
+
+ return err;
+}
+
+/*
+ * Find copies receive handlers {{{2
+ */
+
+void
+find_cap__rx_handler(struct intermon_binding *b, intermon_caprep_t caprep, genvaddr_t st)
+{
+ errval_t err, cleanup_err;
+ struct intermon_state *inter_st = (struct intermon_state*)b->st;
+ coreid_t from = inter_st->core_id;
+ struct capability cap;
+ caprep_to_capability(&caprep, &cap);
+ struct capref capref;
+
+ err = slot_alloc(&capref);
+ if (err_is_fail(err)) {
+ goto send_err;
+ }
+
+ err = monitor_copy_if_exists(&cap, capref);
+ if (err_is_fail(err)) {
+ goto free_slot;
+ }
+
+ cleanup_err = cap_delete(capref);
+ if (err_is_fail(cleanup_err)) {
+ USER_PANIC_ERR(err, "failed to delete temporary cap");
+ }
+
+free_slot:
+ cleanup_err = slot_free(capref);
+ if (err_is_fail(cleanup_err)) {
+ USER_PANIC_ERR(err, "failed to free slot for temporary cap");
+ }
+
+send_err:
+ cleanup_err = find_cap_result(from, err, st);
+ if (err_is_fail(cleanup_err)) {
+ USER_PANIC_ERR(err, "failed to send find_cap result");
+ }
+}
+
+void
+find_cap_result__rx_handler(struct intermon_binding *b, errval_t result, genvaddr_t st)
+{
+ // if we receive a positive result, immediately forward to caller
+ lvaddr_t lst = (lvaddr_t)st;
+ struct find_cap_broadcast_st *fc_bc_st = (struct find_cap_broadcast_st*)lst;
+ if (err_is_ok(result)) {
+ if (!fc_bc_st->found) {
+ fc_bc_st->found = true;
+ struct intermon_state *inter_st = (struct intermon_state*)b->st;
+ coreid_t from = inter_st->core_id;
+ fc_bc_st->result_handler(SYS_ERR_OK, from, fc_bc_st->st);
+ }
+ }
+ else if (err_no(result) != SYS_ERR_CAP_NOT_FOUND) {
+ DEBUG_ERR(result, "ignoring bad find_cap_result");
+ }
+
+ // check to see if broadcast is complete
+ if (capsend_handle_mc_reply(&fc_bc_st->bc)) {
+ if (!fc_bc_st->found) {
+ // broadcast did not find a core, report notfound to caller
+ fc_bc_st->result_handler(SYS_ERR_CAP_NOT_FOUND, 0, fc_bc_st->st);
+ }
+ free(fc_bc_st);
+ }
+}
+
+/*
+ * Find descendants
+ */
+
+struct find_descendants_mc_st {
+ struct capsend_mc_st mc_st;
+ capsend_result_fn result_fn;
+ void *st;
+ bool have_result;
+};
+
+static errval_t
+find_descendants_send_cont(struct intermon_binding *b, intermon_caprep_t *caprep, struct capsend_mc_st *mc_st)
+{
+ lvaddr_t lst = (lvaddr_t)mc_st;
+ return intermon_capops_find_descendants__tx(b, NOP_CONT, *caprep, (genvaddr_t)lst);
+}
+
+errval_t
+capsend_find_descendants(struct domcapref src, capsend_result_fn result_fn, void *st)
+{
+ errval_t err;
+
+ struct capability cap;
+ err = monitor_domains_cap_identify(src.croot, src.cptr, src.bits, &cap);
+ if (err_is_fail(err)) {
+ return err;
+ }
+
+ struct find_descendants_mc_st *mc_st;
+ mc_st = malloc(sizeof(*mc_st));
+ if (!mc_st) {
+ return LIB_ERR_MALLOC_FAIL;
+ }
+
+ mc_st->result_fn = result_fn;
+ mc_st->st = st;
+ mc_st->have_result = false;
+ return capsend_relations(&cap, find_descendants_send_cont, (struct capsend_mc_st*)mc_st);
+}
+
+
+struct find_descendants_result_msg_st {
+ struct intermon_msg_queue_elem queue_elem;
+ errval_t status;
+ genvaddr_t st;
+};
+
+static void
+find_descendants_result_send_cont(struct intermon_binding *b, struct intermon_msg_queue_elem *e)
+{
+ errval_t err;
+ struct find_descendants_result_msg_st *msg_st;
+ msg_st = (struct find_descendants_result_msg_st*)e;
+ err = intermon_capops_find_descendants_result__tx(b, NOP_CONT, msg_st->status, msg_st->st);
+ free(msg_st);
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "could not send find_descendants_result");
+ }
+}
+
+void
+find_descendants__rx_handler(struct intermon_binding *b, intermon_caprep_t caprep, genvaddr_t st)
+{
+ errval_t err;
+
+ struct intermon_state *inter_st = (struct intermon_state*)b->st;
+ coreid_t from = inter_st->core_id;
+
+ struct capability cap;
+ caprep_to_capability(&caprep, &cap);
+
+ bool has_descendants;
+ err = monitor_has_descendants(&cap, &has_descendants);
+ assert(err_is_ok(err));
+
+ struct find_descendants_result_msg_st *msg_st;
+ msg_st = malloc(sizeof(*msg_st));
+ if (!msg_st) {
+ err = LIB_ERR_MALLOC_FAIL;
+ USER_PANIC_ERR(err, "could not alloc find_descendants_result_msg_st");
+ }
+ msg_st->queue_elem.cont = find_descendants_result_send_cont;
+ msg_st->st = st;
+
+ if (err_is_ok(err)) {
+ err = has_descendants ? SYS_ERR_OK : SYS_ERR_CAP_NOT_FOUND;
+ }
+ msg_st->status = err;
+
+ err = capsend_target(from, (struct msg_queue_elem*)msg_st);
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "could not enqueue find_descendants_result msg");
+ }
+}
+
+void
+find_descendants_result__rx_handler(struct intermon_binding *b, errval_t status, genvaddr_t st)
+{
+ lvaddr_t lst = (lvaddr_t) st;
+ struct find_descendants_mc_st *mc_st = (struct find_descendants_mc_st*)lst;
+
+ if (err_is_ok(status)) {
+ // found result
+ if (!mc_st->have_result) {
+ mc_st->have_result = true;
+ mc_st->result_fn(SYS_ERR_OK, mc_st->st);
+ }
+ }
+ else if (err_no(status) != SYS_ERR_CAP_NOT_FOUND) {
+ DEBUG_ERR(status, "ignoring bad find_descendants result");
+ }
+
+ if (capsend_handle_mc_reply(&mc_st->mc_st)) {
+ if (!mc_st->have_result) {
+ mc_st->result_fn(SYS_ERR_CAP_NOT_FOUND, mc_st->st);
+ }
+ free(mc_st);
+ }
+}
+
+
+/*
+ * Ownership update {{{1
+ */
+
+/*
+ * Update owner broadcast {{{2
+ */
+
+struct update_owner_broadcast_st {
+ struct capsend_mc_st bc;
+ struct event_closure completion_continuation;
+};
+
+static errval_t
+update_owner_broadcast_send_cont(struct intermon_binding *b, intermon_caprep_t *caprep, struct capsend_mc_st *bc_st)
+{
+ lvaddr_t lst = (lvaddr_t)bc_st;
+ return intermon_capops_update_owner__tx(b, NOP_CONT, *caprep, (genvaddr_t)lst);
+}
+
+errval_t
+capsend_update_owner(struct domcapref capref, struct event_closure completion_continuation)
+{
+ errval_t err;
+ struct capability cap;
+ err = monitor_domains_cap_identify(capref.croot, capref.cptr, capref.bits,
+ &cap);
+ if (err_is_fail(err)) {
+ return err;
+ }
+
+ struct update_owner_broadcast_st *bc_st = calloc(1, sizeof(struct update_owner_broadcast_st));
+ if (!bc_st) {
+ return LIB_ERR_MALLOC_FAIL;
+ }
+ bc_st->completion_continuation = completion_continuation;
+
+ return capsend_broadcast((struct capsend_mc_st*)bc_st, &cap, update_owner_broadcast_send_cont);
+}
+
+/*
+ * Owner updated response {{{2
+ */
+
+struct owner_updated_msg_st {
+ struct intermon_msg_queue_elem queue_elem;
+ genvaddr_t st;
+};
+
+static void
+owner_updated_send_cont(struct intermon_binding *b, struct intermon_msg_queue_elem *e)
+{
+ errval_t err;
+ struct owner_updated_msg_st *msg_st = (struct owner_updated_msg_st*)e;
+
+ err = intermon_capops_owner_updated__tx(b, NOP_CONT, msg_st->st);
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "failed to send owner_updated message");
+ }
+ free(msg_st);
+}
+
+static errval_t
+owner_updated(coreid_t owner, genvaddr_t st)
+{
+ errval_t err;
+ struct owner_updated_msg_st *msg_st = calloc(1, sizeof(struct owner_updated_msg_st));
+ if (!msg_st) {
+ return LIB_ERR_MALLOC_FAIL;
+ }
+ msg_st->queue_elem.cont = owner_updated_send_cont;
+ msg_st->st = st;
+
+ err = capsend_target(owner, (struct msg_queue_elem*)msg_st);
+ if (err_is_fail(err)) {
+ free(msg_st);
+ }
+
+ return err;
+}
+
+/*
+ * Receive handlers {{{2
+ */
+
+void
+owner_updated__rx_handler(struct intermon_binding *b, genvaddr_t st)
+{
+ lvaddr_t lst = (lvaddr_t)st;
+ struct update_owner_broadcast_st *uo_bc_st = (struct update_owner_broadcast_st*)lst;
+ if (!capsend_handle_mc_reply(&uo_bc_st->bc)) {
+ // broadcast is not complete
+ return;
+ }
+ struct event_closure *cl = &uo_bc_st->completion_continuation;
+ cl->handler(cl->arg);
+ free(uo_bc_st);
+}
+
+void
+update_owner__rx_handler(struct intermon_binding *b, intermon_caprep_t caprep, genvaddr_t st)
+{
+ errval_t err;
+ struct intermon_state *inter_st = (struct intermon_state*)b->st;
+ coreid_t from = inter_st->core_id;
+ struct capref capref;
+ struct capability cap;
+ caprep_to_capability(&caprep, &cap);
+
+ err = slot_alloc(&capref);
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "failed to allocate slot for owner update");
+ }
+
+ err = monitor_copy_if_exists(&cap, capref);
+ if (err_is_ok(err)) {
+ err = monitor_set_cap_owner(cap_root, get_cap_addr(capref),
+ get_cap_valid_bits(capref), from);
+ }
+ if (err_no(err) == SYS_ERR_CAP_NOT_FOUND) {
+ err = SYS_ERR_OK;
+ }
+
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "failed to update cap ownership");
+ }
+
+ cap_destroy(capref);
+
+ err = owner_updated(from, st);
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "failed to send ownership update response");
+ }
+}
+
+/*
+ * Send to all relations of cap {{{1
+ */
+
+errval_t
+capsend_copies(struct capability *cap,
+ capsend_send_fn send_fn,
+ struct capsend_mc_st *mc_st)
+{
+ // this is currently just a broadcast
+ return capsend_broadcast(mc_st, cap, send_fn);
+}
+
+errval_t
+capsend_relations(struct capability *cap,
+ capsend_send_fn send_fn,
+ struct capsend_mc_st *mc_st)
+{
+ // this is currently just a broadcast
+ return capsend_broadcast(mc_st, cap, send_fn);
+}
--- /dev/null
+#include <if/intermon_defs.h>
+#include <capops.h>
+#include "monitor_debug.h"
+#include "internal.h"
+#include "delete_int.h"
+
++// XXX: to make merge compile, capsend_broadcast() will obviously not work
++// right now!
++int num_monitors = -1;
++
+errval_t capops_init(struct waitset *ws, struct intermon_binding *b)
+{
+ DEBUG_CAPOPS("%s\n", __FUNCTION__);
+
+ assert(ws != NULL);
+
+ b->rx_vtbl.capops_request_copy = request_copy__rx;
+ b->rx_vtbl.capops_recv_copy = recv_copy__rx;
+ b->rx_vtbl.capops_recv_copy_result = recv_copy_result__rx;
+ b->rx_vtbl.capops_move_request = move_request__rx_handler;
+ b->rx_vtbl.capops_move_result = move_result__rx_handler;
+ b->rx_vtbl.capops_retrieve_request = retrieve_request__rx;
+ b->rx_vtbl.capops_retrieve_result = retrieve_result__rx;
+ b->rx_vtbl.capops_delete_remote = delete_remote__rx;
+ b->rx_vtbl.capops_delete_remote_result = delete_remote_result__rx;
+ b->rx_vtbl.capops_revoke_mark = revoke_mark__rx;
+ b->rx_vtbl.capops_revoke_ready = revoke_ready__rx;
+ b->rx_vtbl.capops_revoke_commit = revoke_commit__rx;
+ b->rx_vtbl.capops_revoke_done = revoke_done__rx;
+ b->rx_vtbl.capops_request_retype = retype_request__rx;
+ b->rx_vtbl.capops_retype_response = retype_response__rx;
+ b->rx_vtbl.capops_update_owner = update_owner__rx_handler;
+ b->rx_vtbl.capops_owner_updated = owner_updated__rx_handler;
+ b->rx_vtbl.capops_find_cap = find_cap__rx_handler;
+ b->rx_vtbl.capops_find_cap_result = find_cap_result__rx_handler;
+ b->rx_vtbl.capops_find_descendants = find_descendants__rx_handler;
+ b->rx_vtbl.capops_find_descendants_result = find_descendants_result__rx_handler;
+
+ delete_steps_init(ws);
+
+ return SYS_ERR_OK;
+}
--- /dev/null
+/**
+ * \file
+ * \brief Capability invocations specific to the monitors
+ */
+
+/*
+ * Copyright (c) 2007, 2008, 2009, 2010, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef MONITOR_INVOCATIONS_ARCH_H
+#define MONITOR_INVOCATIONS_ARCH_H
+
+#include <barrelfish/syscall_arch.h>
+#include <barrelfish/caddr.h>
+#include <barrelfish/invocations_arch.h>
+#include <barrelfish_kpi/cpu.h>
+#include <barrelfish_kpi/syscalls.h>
+#include "monitor_debug.h"
+
- /**
- * \brief Spawn a new core.
- *
- * \param cur_kern Cap of the current kernel
- * \param core_id APIC ID of the core to try booting
- * \param sp_mem Cap to Ram type memory to relocate the new kernel
- * \param dcb Cap to the dcb of the user program to run on the new kernel
- * \param root_vbits Number of valid bits in root_cptr
- * \param root_cptr Cap to the root of cspace of the new user program
- * \param vtree Cap to the vtree root of the new user program
- * \param dispatcher Cap to the dispatcher of the new user program
- * \param entry Kernel entry point in physical memory
- */
- //XXX: workaround for inline bug of arm-gcc 4.6.1 and lower
- #if defined(__ARM_ARCH_7A__) && defined(__GNUC__) \
- && __GNUC__ == 4 && __GNUC_MINOR__ <= 6 && __GNUC_PATCHLEVEL__ <= 1
- static __attribute__((noinline, unused)) errval_t
- #else
- static inline errval_t
- #endif
- invoke_monitor_spawn_core(coreid_t core_id, enum cpu_type cpu_type,
- forvaddr_t entry)
- {
- DEBUG_INVOCATION("%s: called from %p\n", __FUNCTION__, __builtin_return_address(0));
- uint8_t invoke_bits = get_cap_valid_bits(cap_kernel);
- capaddr_t invoke_cptr = get_cap_addr(cap_kernel) >> (CPTR_BITS - invoke_bits);
-
- return syscall6((invoke_bits << 16) | (KernelCmd_Spawn_core << 8)
- | SYSCALL_INVOKE, invoke_cptr, core_id, cpu_type,
- (uintptr_t)(entry >> 32), (uintptr_t) entry).error;
- }
-
+static inline errval_t
+invoke_monitor_identify_cap(capaddr_t cap, int bits, struct capability *out)
+{
+ DEBUG_INVOCATION("%s: called from %p\n", __FUNCTION__, __builtin_return_address(0));
+ uint8_t invoke_bits = get_cap_valid_bits(cap_kernel);
+ capaddr_t invoke_cptr = get_cap_addr(cap_kernel) >> (CPTR_BITS - invoke_bits);
+
+ return syscall5((invoke_bits << 16) | (KernelCmd_Identify_cap << 8)
+ | SYSCALL_INVOKE, invoke_cptr, cap, bits,
+ (uintptr_t)out).error;
+}
+
+static inline errval_t
+invoke_monitor_identify_domains_cap(capaddr_t root_cap, int root_bits,
+ capaddr_t cap, int bits,
+ struct capability *out)
+{
+ DEBUG_INVOCATION("%s: called from %p\n", __FUNCTION__, __builtin_return_address(0));
+ uint8_t invoke_bits = get_cap_valid_bits(cap_kernel);
+ capaddr_t invoke_cptr = get_cap_addr(cap_kernel) >> (CPTR_BITS - invoke_bits);
+
+ return syscall7((invoke_bits << 16) | (KernelCmd_Identify_domains_cap << 8)
+ | SYSCALL_INVOKE, invoke_cptr, root_cap, root_bits,
+ cap, bits, (uintptr_t)out).error;
+}
+
+static inline errval_t
+invoke_monitor_nullify_cap(capaddr_t cap, int bits)
+{
+ DEBUG_INVOCATION("%s: called from %p\n", __FUNCTION__, __builtin_return_address(0));
+ uint8_t invoke_bits = get_cap_valid_bits(cap_kernel);
+ capaddr_t invoke_cptr = get_cap_addr(cap_kernel) >> (CPTR_BITS - invoke_bits);
+
+ return syscall4((invoke_bits << 16) | (KernelCmd_Nullify_cap << 8)
+ | SYSCALL_INVOKE, invoke_cptr, cap, bits).error;
+}
+
+static inline errval_t
+invoke_monitor_create_cap(uint64_t *raw, capaddr_t caddr, int bits, capaddr_t slot, coreid_t owner)
+{
+ DEBUG_INVOCATION("%s: called from %p\n", __FUNCTION__, __builtin_return_address(0));
+ return cap_invoke6(cap_kernel, KernelCmd_Create_cap, caddr, bits, slot,
+ owner, (uintptr_t)raw).error;
+}
+
+static inline errval_t
+invoke_monitor_register(struct capref ep)
+{
+ DEBUG_INVOCATION("%s: called from %p\n", __FUNCTION__, __builtin_return_address(0));
+ uint8_t invoke_bits = get_cap_valid_bits(cap_kernel);
+ capaddr_t invoke_cptr = get_cap_addr(cap_kernel) >> (CPTR_BITS - invoke_bits);
+
+ return syscall3((invoke_bits << 16) | (KernelCmd_Register << 8)
+ | SYSCALL_INVOKE, invoke_cptr, get_cap_addr(ep)).error;
+}
+
+static inline errval_t
+invoke_monitor_remote_cap_retype(capaddr_t rootcap_addr, uint8_t rootcap_vbits,
+ capaddr_t src, enum objtype newtype,
+ int objbits, capaddr_t to, capaddr_t slot,
+ int bits)
+{
+ DEBUG_INVOCATION("%s: called from %p\n", __FUNCTION__, __builtin_return_address(0));
+ return cap_invoke7(cap_kernel, KernelCmd_Retype,
+ src, (newtype << 16) | (objbits << 8) | bits, to, slot,
+ rootcap_addr, rootcap_vbits).error;
+}
+
+static inline errval_t
+invoke_monitor_get_cap_owner(capaddr_t root, int rbits, capaddr_t cap, int cbits, coreid_t *ret_owner)
+{
+ DEBUG_INVOCATION("%s: called from %p\n", __FUNCTION__, __builtin_return_address(0));
+ struct sysret sysret = cap_invoke5(cap_kernel, KernelCmd_Get_cap_owner,
+ root, rbits, cap, cbits);
+ if (err_is_ok(sysret.error)) {
+ *ret_owner = sysret.value;
+ }
+ return sysret.error;
+}
+
+static inline errval_t
+invoke_monitor_set_cap_owner(capaddr_t root, int rbits, capaddr_t cap, int cbits, coreid_t owner)
+{
+ DEBUG_INVOCATION("%s: called from %p\n", __FUNCTION__, __builtin_return_address(0));
+ return cap_invoke6(cap_kernel, KernelCmd_Set_cap_owner, root, rbits, cap, cbits, owner).error;
+}
+
+
+static inline errval_t
+invoke_monitor_remote_relations(capaddr_t root_cap, int root_bits,
+ capaddr_t cap, int bits,
+ uint8_t relations, uint8_t mask,
+ uint8_t *ret_remote_relations)
+{
+ DEBUG_INVOCATION("%s: called from %p\n", __FUNCTION__, __builtin_return_address(0));
+ struct sysret r = cap_invoke6(cap_kernel, KernelCmd_Remote_relations,
+ root_cap, root_bits, cap, bits,
+ ((uint16_t)relations) | (((uint16_t)mask)<<8));
+ if (err_is_ok(r.error) && ret_remote_relations) {
+ *ret_remote_relations = r.value;
+ }
+ return r.error;
+}
+
+static inline errval_t
+invoke_monitor_cap_has_relations(capaddr_t caddr, uint8_t bits, uint8_t mask, uint8_t *res)
+{
+ DEBUG_INVOCATION("%s: called from %p\n", __FUNCTION__, __builtin_return_address(0));
+ assert(res);
+ struct sysret ret = cap_invoke4(cap_kernel, KernelCmd_Cap_has_relations,
+ caddr, bits, mask);
+ if (err_is_ok(ret.error)) {
+ *res = ret.value;
+ }
+ return ret.error;
+}
+
+
+static inline errval_t
+invoke_monitor_lock_cap(capaddr_t root, int rbits, capaddr_t cap, int cbits)
+{
+ DEBUG_INVOCATION("%s: called from %p\n", __FUNCTION__, __builtin_return_address(0));
+ return cap_invoke5(cap_kernel, KernelCmd_Lock_cap, root, rbits, cap, cbits).error;
+}
+
+static inline errval_t
+invoke_monitor_unlock_cap(capaddr_t root, int rbits, capaddr_t cap, int cbits)
+{
+ DEBUG_INVOCATION("%s: called from %p\n", __FUNCTION__, __builtin_return_address(0));
+ return cap_invoke5(cap_kernel, KernelCmd_Unlock_cap, root, rbits, cap, cbits).error;
+}
+
+static inline errval_t
+invoke_monitor_delete_last(capaddr_t root, int rbits, capaddr_t cap, int cbits,
+ capaddr_t retcn, int retcnbits, cslot_t retslot)
+{
+ DEBUG_INVOCATION("%s: called from %p\n", __FUNCTION__, __builtin_return_address(0));
+ assert(rbits <= 0xff);
+ assert(cbits <= 0xff);
+ assert(retcnbits <= 0xff);
+
+ return cap_invoke6(cap_kernel, KernelCmd_Delete_last, root, cap,
+ retcn, retslot, ((cbits<<16)|(rbits<<8)|retcnbits)).error;
+}
+
+static inline errval_t
+invoke_monitor_delete_foreigns(capaddr_t cap, int bits)
+{
+ DEBUG_INVOCATION("%s: called from %p\n", __FUNCTION__, __builtin_return_address(0));
+ return cap_invoke3(cap_kernel, KernelCmd_Delete_foreigns, cap, bits).error;
+}
+
+static inline errval_t
+invoke_monitor_revoke_mark_target(capaddr_t root, int rbits,
+ capaddr_t cap, int cbits)
+{
+ DEBUG_INVOCATION("%s: called from %p\n", __FUNCTION__, __builtin_return_address(0));
+ return cap_invoke5(cap_kernel, KernelCmd_Revoke_mark_target,
+ root, rbits, cap, cbits).error;
+}
+
+static inline errval_t
+invoke_monitor_revoke_mark_relations(uint64_t *raw_base)
+{
+ DEBUG_INVOCATION("%s: called from %p\n", __FUNCTION__, __builtin_return_address(0));
+ // XXX: this is assumed in client code of this function!
+ assert(sizeof(struct capability) / sizeof(uint64_t) <= 4);
+ return cap_invoke2(cap_kernel, KernelCmd_Revoke_mark_relations,
+ (uintptr_t)raw_base).error;
+}
+
+static inline errval_t
+invoke_monitor_delete_step(capaddr_t retcn, int retcnbits, cslot_t retslot)
+{
+ DEBUG_INVOCATION("%s: called from %p\n", __FUNCTION__, __builtin_return_address(0));
+ return cap_invoke4(cap_kernel, KernelCmd_Delete_step,
+ retcn, retcnbits, retslot).error;
+}
+
+static inline errval_t
+invoke_monitor_clear_step(capaddr_t retcn, int retcnbits, cslot_t retslot)
+{
+ DEBUG_INVOCATION("%s: called from %p\n", __FUNCTION__, __builtin_return_address(0));
+ return cap_invoke4(cap_kernel, KernelCmd_Clear_step,
+ retcn, retcnbits, retslot).error;
+}
+
+static inline errval_t
+invoke_monitor_has_descendants(uint64_t *raw, bool *res)
+{
+ DEBUG_INVOCATION("%s: called from %p\n", __FUNCTION__, __builtin_return_address(0));
+ // XXX: this is assumed in client code of this function!
+ assert(sizeof(struct capability) / sizeof(uint64_t) <= 4);
+
+ struct sysret sysret;
+ sysret = cap_invoke2(cap_kernel, KernelCmd_Has_descendants,
+ (uintptr_t)raw);
+ if (err_is_ok(sysret.error)) {
+ *res = sysret.value;
+ }
+ return sysret.error;
+}
+
+
+/**
+ * \brief Set up tracing in the kernel
+ *
+ */
+static inline errval_t
+invoke_trace_setup(struct capref cap)
+{
+ DEBUG_INVOCATION("%s: called from %p\n", __FUNCTION__, __builtin_return_address(0));
+ USER_PANIC("NYI");
+ return LIB_ERR_NOT_IMPLEMENTED;
+}
+
+static inline errval_t
+invoke_domain_id(struct capref cap, uint64_t domain_id)
+{
+ DEBUG_INVOCATION("%s: called from %p\n", __FUNCTION__, __builtin_return_address(0));
+ USER_PANIC("NYI");
+ return LIB_ERR_NOT_IMPLEMENTED;
+}
+
+static inline errval_t
+invoke_monitor_rck_register(struct capref kern_cap, struct capref ep,
+ int chanid)
+{
+ DEBUG_INVOCATION("%s: called from %p\n", __FUNCTION__, __builtin_return_address(0));
+ USER_PANIC("NYI");
+ return LIB_ERR_NOT_IMPLEMENTED;
+}
+
+static inline errval_t
+invoke_monitor_rck_delete(struct capref kern_cap, int chanid)
+{
+ DEBUG_INVOCATION("%s: called from %p\n", __FUNCTION__, __builtin_return_address(0));
+ USER_PANIC("NYI");
+ return LIB_ERR_NOT_IMPLEMENTED;
+}
+
+static inline errval_t invoke_monitor_sync_timer(uint64_t synctime)
+{
+ DEBUG_INVOCATION("%s: called from %p\n", __FUNCTION__, __builtin_return_address(0));
+ uint8_t invoke_bits = get_cap_valid_bits(cap_kernel);
+ capaddr_t invoke_cptr = get_cap_addr(cap_kernel) >> (CPTR_BITS - invoke_bits);
+
+ return syscall4((invoke_bits << 16) | (KernelCmd_Sync_timer << 8)
+ | SYSCALL_INVOKE, invoke_cptr, synctime >> 32,
+ synctime & 0xffffffff).error;
+}
+
+static inline errval_t
+invoke_monitor_get_arch_id(uintptr_t *arch_id)
+{
+ DEBUG_INVOCATION("%s: called from %p\n", __FUNCTION__, __builtin_return_address(0));
+ assert(arch_id != NULL);
+
+ struct sysret sysret = cap_invoke1(cap_kernel, KernelCmd_Get_arch_id);
+ if (err_is_ok(sysret.error)) {
+ *arch_id = sysret.value;
+ }
+ return sysret.error;
+}
+
+static inline errval_t
+invoke_monitor_ipi_register(struct capref ep, int chanid)
+{
+ DEBUG_INVOCATION("%s: called from %p\n", __FUNCTION__, __builtin_return_address(0));
+ uint8_t invoke_bits = get_cap_valid_bits(cap_kernel);
+ capaddr_t invoke_cptr = get_cap_addr(cap_kernel) >> (CPTR_BITS - invoke_bits);
+
+ return syscall4((invoke_bits << 16) | (KernelCmd_IPI_Register << 8)
+ | SYSCALL_INVOKE, invoke_cptr,
+ get_cap_addr(ep),
+ chanid).error;
+}
+
+static inline errval_t
+invoke_monitor_ipi_delete(int chanid)
+{
+ DEBUG_INVOCATION("%s: called from %p\n", __FUNCTION__, __builtin_return_address(0));
+ uint8_t invoke_bits = get_cap_valid_bits(cap_kernel);
+ capaddr_t invoke_cptr = get_cap_addr(cap_kernel) >> (CPTR_BITS - invoke_bits);
+
+ return syscall3((invoke_bits << 16) | (KernelCmd_IPI_Delete << 8)
+ | SYSCALL_INVOKE, invoke_cptr,
+ chanid).error;
+}
+
+static inline errval_t
+invoke_monitor_copy_existing(uint64_t *raw, capaddr_t cn_addr, int cn_bits, cslot_t slot)
+{
+ DEBUG_INVOCATION("%s: called from %p\n", __FUNCTION__, __builtin_return_address(0));
+ // XXX: this is assumed in client code of this function!
+ assert(sizeof(struct capability) <= 4*sizeof(uint64_t));
+
+ return cap_invoke5(cap_kernel, KernelCmd_Copy_existing,
+ cn_addr, cn_bits, slot, (uintptr_t)raw).error;
+}
+
++static inline errval_t
++invoke_monitor_add_kcb(uintptr_t kcb_base)
++{
++ assert(kcb_base);
++ return cap_invoke2(cap_kernel, KernelCmd_Add_kcb, kcb_base).error;
++}
++
++static inline errval_t
++invoke_monitor_remove_kcb(uintptr_t kcb_base)
++{
++ assert(kcb_base);
++ return cap_invoke2(cap_kernel, KernelCmd_Remove_kcb, kcb_base).error;
++}
++
++static inline errval_t
++invoke_monitor_suspend_kcb_scheduler(bool suspend)
++{
++ return cap_invoke2(cap_kernel, KernelCmd_Suspend_kcb_sched, suspend).error;
++}
++
+#endif
#include <barrelfish/syscall_arch.h>
#include <barrelfish/caddr.h>
+#include <barrelfish/invocations_arch.h>
+#include <barrelfish_kpi/cpu.h>
#include <barrelfish_kpi/syscall_overflows_arch.h>
+#include <barrelfish_kpi/syscalls.h>
- /**
- * \brief Spawn a new core.
- *
- * \param cur_kern Cap of the current kernel
- * \param core_id APIC ID of the core to try booting
- * \param sp_mem Cap to Ram type memory to relocate the new kernel
- * \param dcb Cap to the dcb of the user program to run on the new kernel
- * \param root_vbits Number of valid bits in root_cptr
- * \param root_cptr Cap to the root of cspace of the new user program
- * \param vtree Cap to the vtree root of the new user program
- * \param dispatcher Cap to the dispatcher of the new user program
- * \param entry Kernel entry point in physical memory
- */
- static inline errval_t
- invoke_monitor_spawn_core(coreid_t core_id, enum cpu_type cpu_type,
- forvaddr_t entry)
- {
- uint8_t invoke_bits = get_cap_valid_bits(cap_kernel);
- capaddr_t invoke_cptr = get_cap_addr(cap_kernel) >> (CPTR_BITS - invoke_bits);
-
- return syscall6((invoke_bits << 16) | (KernelCmd_Spawn_core << 8)
- | SYSCALL_INVOKE, invoke_cptr, core_id, cpu_type,
- (uintptr_t)(entry >> 32), (uintptr_t) entry).error;
- }
-
static inline errval_t
+invoke_monitor_remote_relations(capaddr_t root_cap, int root_bits,
+ capaddr_t cap, int bits,
+ uint8_t relations, uint8_t mask,
+ uint8_t *ret_remote_relations)
+{
+ struct sysret r = cap_invoke6(cap_kernel, KernelCmd_Remote_relations,
+ root_cap, root_bits, cap, bits,
+ ((uint16_t)relations) | (((uint16_t)mask)<<8));
+ if (err_is_ok(r.error) && ret_remote_relations) {
+ *ret_remote_relations = r.value;
+ }
+ return r.error;
+}
+
+static inline errval_t
+invoke_monitor_cap_has_relations(capaddr_t caddr, uint8_t bits, uint8_t mask,
+ uint8_t *res)
+{
+ assert(res);
+ struct sysret ret = cap_invoke4(cap_kernel, KernelCmd_Cap_has_relations,
+ caddr, bits, mask);
+ if (err_is_ok(ret.error)) {
+ *res = ret.value;
+ }
+ return ret.error;
+}
+
+static inline errval_t
invoke_monitor_identify_cap(capaddr_t cap, int bits, struct capability *out)
{
uint8_t invoke_bits = get_cap_valid_bits(cap_kernel);
--- /dev/null
+/**
+ * \file
+ * \brief Capability invocations specific to the monitors
+ */
+
+/*
+ * Copyright (c) 2007, 2008, 2009, 2010, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef MONITOR_INVOCATIONS_ARCH_H
+#define MONITOR_INVOCATIONS_ARCH_H
+
+#include <barrelfish/syscall_arch.h>
+#include <barrelfish/caddr.h>
+#include <barrelfish/invocations_arch.h>
- #include <barrelfish_kpi/cpu.h>
-
- /**
- * \brief Spawn a new core.
- *
- * \param core_id APIC ID of the core to try booting
- * \param cpu_type Type of core to boot
- * \param entry Kernel entry point in physical memory
- */
- static inline errval_t
- invoke_monitor_spawn_core(coreid_t core_id, enum cpu_type cpu_type,
- forvaddr_t entry)
- {
- return cap_invoke4(cap_kernel, KernelCmd_Spawn_core, core_id, cpu_type,
- entry).error;
- }
+
+static inline errval_t
+invoke_monitor_remote_relations(capaddr_t root_cap, int root_bits,
+ capaddr_t cap, int bits,
+ uint8_t relations, uint8_t mask,
+ uint8_t *ret_remote_relations)
+{
+ struct sysret r = cap_invoke6(cap_kernel, KernelCmd_Remote_relations,
+ root_cap, root_bits, cap, bits,
+ ((uint16_t)relations) | (((uint16_t)mask)<<8));
+ if (err_is_ok(r.error) && ret_remote_relations) {
+ *ret_remote_relations = r.value;
+ }
+ return r.error;
+}
+
+static inline errval_t
+invoke_monitor_cap_has_relations(capaddr_t caddr, uint8_t bits, uint8_t mask,
+ uint8_t *res)
+{
+ assert(res);
+ struct sysret ret = cap_invoke4(cap_kernel, KernelCmd_Cap_has_relations,
+ caddr, bits, mask);
+ if (err_is_ok(ret.error)) {
+ *res = ret.value;
+ }
+ return ret.error;
+}
+
+static inline errval_t
+invoke_monitor_identify_cap(capaddr_t cap, int bits, struct capability *out)
+{
+ return cap_invoke4(cap_kernel, KernelCmd_Identify_cap, cap, bits,
+ (uintptr_t)out).error;
+}
+
+static inline errval_t
+invoke_monitor_identify_domains_cap(capaddr_t root_cap, int root_bits,
+ capaddr_t cap, int bits,
+ struct capability *out)
+{
+ return cap_invoke6(cap_kernel, KernelCmd_Identify_domains_cap,
+ root_cap, root_bits, cap, bits, (uintptr_t)out).error;
+}
+
+
+static inline errval_t
+invoke_monitor_nullify_cap(capaddr_t cap, int bits)
+{
+ return cap_invoke3(cap_kernel, KernelCmd_Nullify_cap, cap, bits).error;
+}
+
+static inline errval_t
+invoke_monitor_create_cap(uint64_t *raw, capaddr_t caddr, int bits, capaddr_t slot, coreid_t owner)
+{
+ assert(sizeof(struct capability) % sizeof(uint64_t) == 0);
+ assert(sizeof(struct capability) / sizeof(uint64_t) == 4);
+ return cap_invoke9(cap_kernel, KernelCmd_Create_cap,
+ raw[0], raw[1], raw[2], raw[3],
+ caddr, bits, slot, owner).error;
+}
+
+static inline errval_t
+invoke_monitor_register(struct capref ep)
+{
+ return cap_invoke2(cap_kernel, KernelCmd_Register, get_cap_addr(ep)).error;
+}
+
+static inline errval_t
+invoke_monitor_remote_cap_retype(capaddr_t rootcap_addr, uint8_t rootcap_vbits,
+ capaddr_t src, enum objtype newtype,
+ int objbits, capaddr_t to, capaddr_t slot,
+ int bits) {
+ return cap_invoke9(cap_kernel, KernelCmd_Retype, rootcap_addr,
+ rootcap_vbits, src, newtype, objbits, to, slot,
+ bits).error;
+}
+
+static inline errval_t
+invoke_monitor_copy_existing(uint64_t *raw, capaddr_t cn_addr, int cn_bits, cslot_t slot)
+{
+ assert(sizeof(struct capability) == 4*sizeof(uint64_t));
+ return cap_invoke8(cap_kernel, KernelCmd_Copy_existing,
+ raw[0], raw[1], raw[2], raw[3],
+ cn_addr, cn_bits, slot).error;
+}
+
+/**
+ * \brief Set up tracing in the kernel
+ *
+ */
+static inline errval_t
+invoke_trace_setup(struct capref cap)
+{
+ return cap_invoke2(cap_kernel, KernelCmd_Setup_trace,
+ get_cap_addr(cap)).error;
+}
+
+static inline errval_t
+invoke_domain_id(struct capref cap, domainid_t domain_id)
+{
+ return cap_invoke3(cap_kernel, KernelCmd_Domain_Id, get_cap_addr(cap),
+ domain_id).error;
+}
+
+static inline errval_t invoke_monitor_sync_timer(uint64_t synctime)
+{
+ return cap_invoke2(cap_kernel, KernelCmd_Sync_timer, synctime).error;
+}
+
+static inline errval_t
+invoke_monitor_ipi_register(struct capref ep, int chanid)
+{
+ return cap_invoke3(cap_kernel, KernelCmd_IPI_Register, get_cap_addr(ep),
+ chanid).error;
+}
+
+static inline errval_t
+invoke_monitor_ipi_delete(int chanid)
+{
+ return cap_invoke2(cap_kernel, KernelCmd_IPI_Delete, chanid).error;
+}
+
+static inline errval_t
+invoke_monitor_get_arch_id(uintptr_t *arch_id)
+{
+ assert(arch_id != NULL);
+
+ struct sysret sysret = cap_invoke1(cap_kernel, KernelCmd_Get_arch_id);
+ if (sysret.error == SYS_ERR_OK) {
+ *arch_id = sysret.value;
+ }
+ return sysret.error;
+}
+
+static inline errval_t
+invoke_monitor_get_cap_owner(capaddr_t root, int rbits, capaddr_t cap, int cbits, coreid_t *ret_owner)
+{
+ assert(ret_owner);
+ struct sysret sysret = cap_invoke5(cap_kernel, KernelCmd_Get_cap_owner, root, rbits, cap, cbits);
+ if (err_is_ok(sysret.error)) {
+ *ret_owner = sysret.value;
+ }
+ return sysret.error;
+}
+
+static inline errval_t
+invoke_monitor_set_cap_owner(capaddr_t root, int rbits, capaddr_t cap, int cbits, coreid_t owner)
+{
+ return cap_invoke6(cap_kernel, KernelCmd_Set_cap_owner, root, rbits, cap, cbits, owner).error;
+}
+
+static inline errval_t
+invoke_monitor_lock_cap(capaddr_t root, int rbits, capaddr_t cap, int cbits)
+{
+ return cap_invoke5(cap_kernel, KernelCmd_Lock_cap, root, rbits, cap, cbits).error;
+}
+
+static inline errval_t
+invoke_monitor_unlock_cap(capaddr_t root, int rbits, capaddr_t cap, int cbits)
+{
+ return cap_invoke5(cap_kernel, KernelCmd_Unlock_cap, root, rbits, cap, cbits).error;
+}
+
+static inline errval_t
+invoke_monitor_delete_last(capaddr_t root, int rbits, capaddr_t cap, int cbits,
+ capaddr_t retcn, int retcnbits, cslot_t retslot)
+{
+ return cap_invoke8(cap_kernel, KernelCmd_Delete_last, root, rbits, cap,
+ cbits, retcn, retcnbits, retslot).error;
+}
+
+static inline errval_t
+invoke_monitor_delete_foreigns(capaddr_t cap, int bits)
+{
+ return cap_invoke3(cap_kernel, KernelCmd_Delete_foreigns, cap, bits).error;
+}
+
+static inline errval_t
+invoke_monitor_revoke_mark_target(capaddr_t root, int rbits,
+ capaddr_t cap, int cbits)
+{
+ return cap_invoke5(cap_kernel, KernelCmd_Revoke_mark_target,
+ root, rbits, cap, cbits).error;
+}
+
+static inline errval_t
+invoke_monitor_revoke_mark_relations(uint64_t *raw_base)
+{
+ assert(sizeof(struct capability) % sizeof(uint64_t) == 0);
+ assert(sizeof(struct capability) / sizeof(uint64_t) == 4);
+ return cap_invoke5(cap_kernel, KernelCmd_Revoke_mark_relations,
+ raw_base[0], raw_base[1],
+ raw_base[2], raw_base[3]).error;
+}
+
+static inline errval_t
+invoke_monitor_delete_step(capaddr_t retcn, int retcnbits, cslot_t retslot)
+{
+ return cap_invoke4(cap_kernel, KernelCmd_Delete_step,
+ retcn, retcnbits, retslot).error;
+}
+
+static inline errval_t
+invoke_monitor_clear_step(capaddr_t retcn, int retcnbits, cslot_t retslot)
+{
+ return cap_invoke4(cap_kernel, KernelCmd_Clear_step,
+ retcn, retcnbits, retslot).error;
+}
+
+static inline errval_t
+invoke_monitor_has_descendants(uint64_t *raw, bool *res)
+{
+ assert(sizeof(struct capability) % sizeof(uint64_t) == 0);
+ assert(sizeof(struct capability) / sizeof(uint64_t) == 4);
+ struct sysret sysret;
+ sysret = cap_invoke5(cap_kernel, KernelCmd_Has_descendants,
+ raw[0], raw[1], raw[2], raw[3]);
+ if (err_is_ok(sysret.error)) {
+ *res = sysret.value;
+ }
+ return sysret.error;
+}
+
++static inline errval_t
++invoke_monitor_add_kcb(uintptr_t kcb_base)
++{
++ assert(kcb_base);
++
++ return cap_invoke2(cap_kernel, KernelCmd_Add_kcb, kcb_base).error;
++}
++
++static inline errval_t
++invoke_monitor_remove_kcb(uintptr_t kcb_base)
++{
++ assert(kcb_base);
++
++ return cap_invoke2(cap_kernel, KernelCmd_Remove_kcb, kcb_base).error;
++}
++
++static inline errval_t
++invoke_monitor_suspend_kcb_scheduler(bool suspend)
++{
++ return cap_invoke2(cap_kernel, KernelCmd_Suspend_kcb_sched, suspend).error;
++}
++
+#endif
--- /dev/null
+/**
+ * \file
+ * \brief
+ */
+
+/*
+ * Copyright (c) 2007, 2008, 2009, 2010, 2011, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef MONITOR_H
+#define MONITOR_H
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <barrelfish/barrelfish.h>
+#include <spawndomain/spawndomain.h>
+#include <bench/bench_arch.h>
+#include <if/monitor_defs.h>
+#include <if/monitor_blocking_defs.h>
+#include <if/monitor_mem_defs.h>
+#include <monitor_invocations_arch.h>
+#include <queue.h>
+#include <connection.h>
+#include "monitor_debug.h"
+
- // Change #URPC_SIZE if changing this
++STATIC_ASSERT(MON_URPC_SIZE == 2*BASE_PAGE_SIZE,
++ "Change #URPC_SIZE if changing channel length");
+#define MON_URPC_CHANNEL_LEN (32 * UMP_MSG_BYTES)
+#define MON_RAM_CHANNEL_LEN (2 * UMP_MSG_BYTES)
+
+// XXX: These should match the aliases in intermon.if
+typedef uint64_t state_id_t;
+typedef uint64_t mon_id_t;
+typedef uint64_t con_id_t;
+typedef uint32_t chanid_t;
+typedef uint8_t bool_t;
+
+// XXX: from old routing library, to be removed
+typedef uint32_t recordid_t;
+
+//XXX used to wait until all monitors are up and connected. asq
+extern int seen_connections;
+
+struct intermon_state {
+ struct msg_queue queue; ///< Queue of outgoing messages
+ struct intermon_binding *binding; ///< Back-pointer to binding
+ coreid_t core_id; ///< Core ID of monitor on other end
+ rsrcid_t rsrcid;
+ bool rsrcid_inflight;
+ bool capops_ready;
+ struct monitor_binding *originating_client;
+};
+
+struct monitor_state {
+ struct msg_queue queue;
+};
+
+extern iref_t mem_serv_iref;
+extern iref_t name_serv_iref;
+extern iref_t ramfs_serv_iref;
+extern iref_t monitor_rpc_iref;
+extern iref_t monitor_mem_iref;
+extern coreid_t my_core_id;
+extern bool bsp_monitor;
+extern struct capref trace_cap;
+extern struct bootinfo *bi;
+extern bool update_ram_alloc_binding;
+extern int num_monitors;
+
+union capability_caprep_u {
+ intermon_caprep_t caprep;
+ monitor_mem_caprep_t caprep2;
+ monitor_blocking_caprep_t caprepb; // XXX: identical to intermon_caprep_t
+ struct capability cap;
+};
+STATIC_ASSERT(sizeof(union capability_caprep_u) >= sizeof(struct capability), \
+ ASSERT_CONCAT("Size mismatch:", intermon_caprep_t));
+
+STATIC_ASSERT(sizeof(struct capability) <= sizeof(intermon_caprep_t),
+ ASSERT_CONCAT("Size mismatch:", intermon_caprep_t));
+
+static inline void capability_to_caprep(struct capability *cap,
+ intermon_caprep_t *caprep)
+{
+ memcpy(caprep, cap, sizeof(*cap));
+}
+
+static inline void caprep_to_capability(intermon_caprep_t *caprep,
+ struct capability *cap)
+{
+ memcpy(cap, caprep, sizeof(*cap));
+}
+
+static inline void debug_print_caprep(intermon_caprep_t *caprep)
+{
+ struct capability cap;
+ memcpy(&cap, caprep, sizeof(cap));
+ char buf[256];
+ debug_print_cap(buf, 256, &cap);
+ buf[255] = 0;
+ DEBUG_CAPOPS("\t%s\n", buf);
+}
+
+static inline void debug_print_caprep2(monitor_mem_caprep_t *caprep)
+{
+ struct capability cap;
+ memcpy(&cap, caprep, sizeof(cap));
+ char buf[256];
+ debug_print_cap(buf, 256, &cap);
+ buf[255] = 0;
+ DEBUG_CAPOPS("\t%s\n", buf);
+}
+
+#include <ram_alloc.h>
+#include <spawn.h>
+#include <monitor_server.h>
+#include <monitor_invocations.h>
+
- /* monitor_server.c */
- errval_t monitor_server_arch_init(struct monitor_binding *b);
- void set_monitor_rpc_iref(iref_t iref);
-
+/* boot.c */
- void boot_core_request(struct monitor_binding *st, coreid_t id, int32_t hwid,
- int32_t int_cpu_type, char *cmdline);
++void boot_core_request(struct monitor_binding *st, coreid_t id,
++ struct capref frame);
+void boot_initialize_request(struct monitor_binding *st);
+
+errval_t spawn_xcore_monitor(coreid_t id, int hwid, enum cpu_type cpu_type,
+ const char *cmdline,
+ struct intermon_binding **ret_binding);
+errval_t boot_arch_app_core(int argc, char *argv[],
+ coreid_t *ret_parent_coreid,
+ struct intermon_binding **ret_binding);
+
+/* main.c */
+errval_t request_trace_caps(struct intermon_binding *st);
+errval_t request_mem_serv_iref(struct intermon_binding *st);
+errval_t request_name_serv_iref(struct intermon_binding *st);
+errval_t request_ramfs_serv_iref(struct intermon_binding *st);
+
+/* inter.c */
+errval_t intermon_init(struct intermon_binding *b, coreid_t coreid);
+errval_t arch_intermon_init(struct intermon_binding *b);
+
+/* ump_support.c */
+errval_t ump_intermon_init(struct intermon_binding *ib);
+errval_t ump_monitor_init(struct monitor_binding *mb);
+
+/* multihop_support.c */
+errval_t multihop_intermon_init(struct intermon_binding *ib);
+errval_t multihop_monitor_init(struct monitor_binding *mb);
+errval_t multihop_request_routing_table(struct intermon_binding *b);
+
+/* trace_support.c */
+errval_t trace_intermon_init(struct intermon_binding *ib);
+errval_t trace_monitor_init(struct monitor_binding *mb);
+
+/* bfscope_support.c */
+errval_t bfscope_intermon_init(struct intermon_binding *ib);
+errval_t bfscope_monitor_init(struct monitor_binding *mb);
+
+/* rck_support.c */
+errval_t rck_intermon_init(struct intermon_binding *ib);
+errval_t rck_monitor_init(struct monitor_binding *mb);
+
+// Resource control
+errval_t rsrc_new(rsrcid_t *id);
+errval_t rsrc_join_satellite(rsrcid_t id, coreid_t coreid);
+errval_t rsrc_join(rsrcid_t id, struct capref dispcap,
+ struct monitor_blocking_binding *b);
+errval_t rsrc_submit_manifest(rsrcid_t id, char *manifest);
+errval_t rsrc_set_phase(rsrcid_t id, uintptr_t phase);
+errval_t rsrc_set_phase_inter(rsrcid_t id, uintptr_t phase, uint64_t timestamp);
+struct monitor_blocking_binding *rsrc_get_binding(rsrcid_t id);
+errval_t rsrc_set_phase_data(rsrcid_t id, uintptr_t active, void *data,
+ size_t len);
+
+// Time coordination
+errval_t timing_sync_timer(void);
+void timing_sync_timer_reply(errval_t err);
+void timing_sync_bench(void);
+
+/* domain.c */
+void domain_mgmt_init(void);
+
+/* intermon_bindings.c */
+errval_t intermon_binding_set(struct intermon_state *st);
+errval_t intermon_binding_get(coreid_t coreid, struct intermon_binding **ret);
+
+/* iref.c */
+errval_t iref_alloc(struct monitor_binding *binding, uintptr_t service_id,
+ iref_t *iref);
+errval_t iref_get_core_id(iref_t iref, coreid_t *core_id);
+errval_t iref_get_binding(iref_t iref, struct monitor_binding **binding);
+errval_t iref_get_service_id(iref_t iref, uintptr_t *service_id);
+
+#endif // MONITOR_H
--- /dev/null
+/*
+ * Copyright (c) 2007, 2008, 2009, 2010, 2011, 2012, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef MONITOR_SERVER_H
+#define MONITOR_SERVER_H
+
+errval_t monitor_client_setup(struct spawninfo *si);
+errval_t monitor_client_setup_mem_serv(void);
+errval_t monitor_client_setup_monitor(void);
+errval_t monitor_server_init(struct monitor_binding *b);
+errval_t monitor_rpc_init(void);
++errval_t monitor_server_arch_init(struct monitor_binding *b);
++void set_monitor_rpc_iref(iref_t iref);
+
+#endif
*/
#include <inttypes.h>
-#include "monitor.h"
+#include <monitor.h>
+ #include <barrelfish/dispatch.h>
#include <trace/trace.h>
+#include "send_cap.h"
+#include "capops.h"
#include <trace_definitions/trace_defs.h>
#define MIN(x,y) ((x<y) ? (x) : (y))
boot_core_reply_cont(struct monitor_binding *domain_binding,
errval_t error_code)
{
+ assert(domain_binding != NULL);
errval_t err;
+ DEBUG_CAPOPS("boot_core_reply_cont: %s (%"PRIuERRV")\n",
+ err_getstring(error_code), error_code);
err = domain_binding->tx_vtbl.
boot_core_reply(domain_binding, NOP_CONT, error_code);
if (err_is_fail(err)) {
}
struct intermon_state *st = b->st;
- errval_t err;
+ errval_t err = SYS_ERR_OK;
+ assert(st->capops_ready);
- /* Inform other monitors of this new monitor */
+ // Inform other monitors of this new monitor
monitor_ready[st->core_id] = true;
err = new_monitor_notify(st->core_id);
if (err_is_fail(err)) {
assert(err_is_ok(err));
}
struct pending_reply {
struct monitor_blocking_binding *b;
errval_t err;
-- struct capref *cap;
};
static void retry_reply(void *arg)
struct monitor_blocking_binding *b = r->b;
errval_t err;
-- err = b->tx_vtbl.cap_set_remote_response(b, MKCONT(cap_set_remote_done, r->cap),
-- r->err);
++ err = b->tx_vtbl.cap_set_remote_response(b, NOP_CONT, r->err);
if (err_is_ok(err)) {
free(r);
} else if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
assert(err_is_ok(err));
} else {
DEBUG_ERR(err, "failed to reply to memory request");
-- cap_set_remote_done(r->cap);
}
}
static void cap_set_remote(struct monitor_blocking_binding *b,
struct capref cap, bool remote)
{
- struct capref *tmpcap = malloc(sizeof(struct capref));
- bool has_descendants;
- struct capref *tmpcap = malloc(sizeof(struct capref));
errval_t err, reterr;
-- *tmpcap = cap;
- reterr = monitor_cap_remote(cap, remote, &has_descendants);
- err = b->tx_vtbl.cap_set_remote_response(b, MKCONT(cap_set_remote_done, tmpcap),
- reterr);
++ reterr = monitor_remote_relations(cap, RRELS_COPY_BIT, RRELS_COPY_BIT, NULL);
+
- reterr = ERR_NOTIMP;
- err = b->tx_vtbl.cap_set_remote_response(b, MKCONT(cap_set_remote_done, tmpcap),
- reterr);
++ err = b->tx_vtbl.cap_set_remote_response(b, NOP_CONT, reterr);
if(err_is_fail(err)) {
if(err_no(err) == FLOUNDER_ERR_TX_BUSY) {
struct pending_reply *r = malloc(sizeof(struct pending_reply));
assert(r != NULL);
r->b = b;
r->err = reterr;
-- r->cap = tmpcap;
err = b->register_send(b, get_default_waitset(), MKCONT(retry_reply, r));
assert(err_is_ok(err));
} else {
/* ----------------------- BOOTINFO REQUEST CODE END ----------------------- */
+ static void get_ipi_cap(struct monitor_blocking_binding *b)
+ {
+ errval_t err;
+
+ // XXX: We should not just hand out this cap to everyone
+ // who requests it. There is currently no way to determine
+ // if the client is a valid recipient
+
+ err = b->tx_vtbl.get_ipi_cap_response(b, NOP_CONT, cap_ipi);
+ assert(err_is_ok(err));
+ }
+
++// XXX: these look suspicious in combination with distops!
+ static void forward_kcb_request(struct monitor_blocking_binding *b,
+ coreid_t destination, struct capref kcb)
+ {
+ printf("%s:%s:%d: forward_kcb_request in monitor\n",
+ __FILE__, __FUNCTION__, __LINE__);
+
+ errval_t err = SYS_ERR_OK;
+
+ struct capability kcb_cap;
+ err = monitor_cap_identify(kcb, &kcb_cap);
+ if (err_is_fail(err)) {
+ DEBUG_ERR(err, "monitor_cap_identify failed");
+ err = b->tx_vtbl.forward_kcb_request_response(b, NOP_CONT, err);
+ assert(err_is_ok(err));
+ return;
+ }
+
+ if (destination == my_core_id) {
+ uintptr_t kcb_base = (uintptr_t)kcb_cap.u.kernelcontrolblock.kcb;
+ printf("%s:%s:%d: Invoke syscall directly, destination==my_core_id; kcb_base = 0x%"PRIxPTR"\n",
+ __FILE__, __FUNCTION__, __LINE__, kcb_base);
+ err = invoke_monitor_add_kcb(kcb_base);
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "invoke_montitor_add_kcb failed.");
+ }
+
+ err = b->tx_vtbl.forward_kcb_request_response(b, NOP_CONT, err);
+ assert(err_is_ok(err));
+ return;
+ }
+
+ struct intermon_binding *ib;
+ err = intermon_binding_get(destination, &ib);
+ if (err_is_fail(err)) {
+ DEBUG_ERR(err, "intermon_binding_get failed");
+ err = b->tx_vtbl.forward_kcb_request_response(b, NOP_CONT, err);
+ assert(err_is_ok(err));
+ return;
+ }
+
+ intermon_caprep_t kcb_rep;
+ capability_to_caprep(&kcb_cap, &kcb_rep);
+
+ ib->st = b;
+ err = ib->tx_vtbl.give_kcb_request(ib, NOP_CONT, kcb_rep);
+ if (err_is_fail(err)) {
+ DEBUG_ERR(err, "give_kcb send failed");
+ err = b->tx_vtbl.forward_kcb_request_response(b, NOP_CONT, err);
+ assert(err_is_ok(err));
+ return;
+ }
+ }
+
+ static void forward_kcb_rm_request(struct monitor_blocking_binding *b,
+ coreid_t destination, struct capref kcb)
+ {
+ errval_t err = SYS_ERR_OK;
+
+ // can't move ourselves
+ assert(destination != my_core_id);
+
+ struct capability kcb_cap;
+ err = monitor_cap_identify(kcb, &kcb_cap);
+ if (err_is_fail(err)) {
+ DEBUG_ERR(err, "monitor_cap_identify failed");
+ err = b->tx_vtbl.forward_kcb_request_response(b, NOP_CONT, err);
+ assert(err_is_ok(err));
+ return;
+ }
+
+ struct intermon_binding *ib;
+ err = intermon_binding_get(destination, &ib);
+ if (err_is_fail(err)) {
+ DEBUG_ERR(err, "intermon_binding_get failed");
+ err = b->tx_vtbl.forward_kcb_request_response(b, NOP_CONT, err);
+ assert(err_is_ok(err));
+ return;
+ }
+ uintptr_t kcb_base = (uintptr_t )kcb_cap.u.kernelcontrolblock.kcb;
+
+ // send request to other monitor
+ // remember monitor binding to send answer
+ struct intermon_state *ist = (struct intermon_state*)ib->st;
+ ist->originating_client = (struct monitor_binding*)b; //XXX: HACK
+ err = ib->tx_vtbl.forward_kcb_rm_request(ib, NOP_CONT, kcb_base);
+ assert(err_is_ok(err));
+ }
+
+ static void get_global_paddr(struct monitor_blocking_binding *b)
+ {
+ genpaddr_t global = 0;
+ errval_t err;
+ err = invoke_get_global_paddr(cap_kernel, &global);
+ if (err_is_fail(err)) {
+ DEBUG_ERR(err, "get_global_paddr invocation");
+ }
+
+ err = b->tx_vtbl.get_global_paddr_response(b, NOP_CONT, global);
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "sending global paddr failed.");
+ }
+ }
+
/*------------------------- Initialization functions -------------------------*/
static struct monitor_blocking_rx_vtbl rx_vtbl = {
struct capref cap;
uint32_t capid;
uint8_t give_away;
- struct capability capability;
- errval_t msgerr;
- bool has_descendants;
- coremask_t on_cores;
+ struct captx_prepare_state captx_state;
+ intermon_captx_t captx;
};
-static void cap_send_request_2(uintptr_t my_mon_id, struct capref cap,
- uint32_t capid, struct capability capability,
- errval_t msgerr,
- uint8_t give_away, bool has_descendants,
- coremask_t on_cores);
-
-static void cap_send_request_cb(void * st_arg) {
- errval_t err;
- struct send_cap_st * st = (struct send_cap_st *) st_arg;
- if (err_is_fail(st->rcap_st.err)) {
- // lock failed, unlock any cores we locked
- err = rcap_db_release_lock(&(st->capability), st->rcap_st.cores_locked);
- assert (err_is_ok(err));
-
- // try again - TODO, introduce some backoff here
- err = rcap_db_acquire_lock(&(st->capability), (struct rcap_st *)st);
- assert (err_is_ok(err));
- } else {
- cap_send_request_2(st->my_mon_id, st->cap, st->capid, st->capability,
- st->msgerr, st->give_away, st->has_descendants,
- st->on_cores);
- }
-}
-
-/// FIXME: If on the same core, fail. (Why? -AB)
-/// XXX: size of capability is arch specific
-static void cap_send_request(struct monitor_binding *b,
- uintptr_t my_mon_id, struct capref cap,
- uint32_t capid, uint8_t give_away)
-{
- errval_t err, msgerr = SYS_ERR_OK;
- struct capability capability;
- bool has_descendants;
- coremask_t on_cores;
-
- if (!capref_is_null(cap)) {
- err = monitor_cap_identify(cap, &capability);
- if (err_is_fail(err)) {
- USER_PANIC_ERR(err, "monitor_cap_identify failed, ignored");
- return;
- }
-
- // if we can't transfer the cap, it is delivered as NULL
- if (!monitor_can_send_cap(&capability)) {
- cap = NULL_CAP;
- msgerr = MON_ERR_CAP_SEND;
- }
- }
-
- if (capref_is_null(cap)) {
- // we don't care about capabilities, has_descendants, or on_cores here,
- // make the compiler happy though
- static struct capability null_capability;
- static coremask_t null_mask;
- cap_send_request_2(my_mon_id, cap, capid, null_capability,
- msgerr, give_away, false, null_mask);
- } else if (!give_away) {
- if (!rcap_db_exists(&capability)) {
- err = monitor_cap_remote(cap, true, &has_descendants);
- if (err_is_fail(err)) {
- USER_PANIC_ERR(err, "monitor_cap_remote failed");
- return;
- }
- err = rcap_db_add(&capability, has_descendants);
- if (err_is_fail(err)) {
- USER_PANIC_ERR(err, "rcap_db_add failed");
- return;
- }
- }
-
- err = rcap_db_get_info(&capability, &has_descendants, &on_cores);
- if (err_is_fail(err)) {
- USER_PANIC_ERR(err, "rcap_db_get_info failed");
- return;
- }
-
- // allocate state for callback
- struct send_cap_st * send_cap_st = malloc (sizeof(struct send_cap_st));
- send_cap_st->rcap_st.free_at_ccast = false;
- send_cap_st->rcap_st.cb = cap_send_request_cb;
- send_cap_st->my_mon_id = my_mon_id;
- send_cap_st->cap = cap;
- send_cap_st->capability = capability;
- send_cap_st->capid = capid;
- send_cap_st->msgerr = msgerr;
- send_cap_st->give_away = give_away;
- send_cap_st->has_descendants = has_descendants;
- send_cap_st->on_cores = on_cores;
-
- err = rcap_db_acquire_lock(&capability, (struct rcap_st *)send_cap_st);
- assert (err_is_ok(err));
- // continues in cap_send_request_2 (after cap_send_request_cb)
-
- } else { // give_away cap
- err = monitor_cap_remote(cap, true, &has_descendants);
- if (err_is_fail(err)) {
- USER_PANIC_ERR(err, "monitor_cap_remote failed");
- return;
- }
-
- // TODO ensure that no more copies of this cap are on this core
- static coremask_t null_mask;
- // call continuation directly
- cap_send_request_2(my_mon_id, cap, capid, capability, msgerr, give_away,
- has_descendants, null_mask);
+static void
+cap_send_tx_cont(struct intermon_binding *b,
+ struct intermon_msg_queue_elem *e)
+{
+ DEBUG_CAPOPS("%s: %p %p\n", __FUNCTION__, b, e);
+ errval_t send_err;
+ struct send_cap_st *st = (struct send_cap_st*)e;
+ struct remote_conn_state *conn = remote_conn_lookup(st->my_mon_id);
+ send_err = intermon_cap_send_request__tx(b, NOP_CONT, conn->mon_id,
+ st->capid, st->captx);
+ if (err_is_fail(send_err)) {
+ DEBUG_ERR(send_err, "sending cap_send_request failed");
}
+ free(st);
}
-struct cap_send_request_state {
- struct intermon_msg_queue_elem elem;
- uintptr_t your_mon_id;
- uint32_t capid;
- errval_t msgerr;
- intermon_caprep_t caprep;
- uint8_t give_away;
- bool has_descendants;
- coremask_t on_cores;
- bool null_cap;
-};
-
-static void cap_send_request_2_handler(struct intermon_binding *b,
- struct intermon_msg_queue_elem *e)
+static void
+cap_send_request_tx_cont(errval_t err, struct captx_prepare_state *captx_st,
+ intermon_captx_t *captx, void *st_)
{
- errval_t err;
- struct cap_send_request_state *st = (struct cap_send_request_state*)e;
+ DEBUG_CAPOPS("%s: %s [%p]\n", __FUNCTION__, err_getstring(err), __builtin_return_address(0));
+ errval_t queue_err;
+ struct send_cap_st *send_st = (struct send_cap_st*)st_;
- err = b->tx_vtbl.cap_send_request(b, NOP_CONT, st->your_mon_id, st->capid,
- st->caprep, st->msgerr, st->give_away,
- st->has_descendants, st->on_cores.bits,
- st->null_cap);
if (err_is_fail(err)) {
- if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
- struct intermon_state *intermon_state = b->st;
- struct cap_send_request_state *ms =
- malloc(sizeof(struct cap_send_request_state));
- assert(ms);
+ // XXX: should forward error here
+ DEBUG_ERR(err, "preparing cap tx failed");
+ free(send_st);
+ return;
+ }
- ms->your_mon_id = st->your_mon_id;
- ms->capid = st->capid;
- ms->caprep = st->caprep;
- ms->msgerr = st->msgerr;
- ms->give_away = st->give_away;
- ms->has_descendants = st->has_descendants;
- ms->on_cores = st->on_cores;
- ms->null_cap = st->null_cap;
- ms->elem.cont = cap_send_request_2_handler;
-
- errval_t err1 = intermon_enqueue_send(b, &intermon_state->queue,
- get_default_waitset(),
- &ms->elem.queue);
- if (err_is_fail(err1)) {
- USER_PANIC_ERR(err1, "monitor_enqueue_send failed");
- }
+ send_st->captx = *captx;
- } else {
- USER_PANIC_ERR(err, "forwarding cap failed");
- }
+ DEBUG_CAPOPS("%s: enqueueing send\n", __FUNCTION__);
+ send_st->qe.cont = cap_send_tx_cont;
+ struct remote_conn_state *conn = remote_conn_lookup(send_st->my_mon_id);
+ struct intermon_binding *binding = conn->mon_binding;
+ struct intermon_state *inter_st = (struct intermon_state*)binding->st;
+ queue_err = intermon_enqueue_send(binding, &inter_st->queue,
+ binding->waitset,
+ (struct msg_queue_elem*)send_st);
+ if (err_is_fail(queue_err)) {
+ DEBUG_ERR(queue_err, "enqueuing cap_send_request failed");
+ free(send_st);
}
-
}
-static void cap_send_request_2(uintptr_t my_mon_id, struct capref cap,
- uint32_t capid, struct capability capability,
- errval_t msgerr,
- uint8_t give_away, bool has_descendants,
- coremask_t on_cores)
+static void
+cap_send_request(struct monitor_binding *b, uintptr_t my_mon_id,
+ struct capref cap, uint32_t capid)
{
+ DEBUG_CAPOPS("cap_send_request\n");
errval_t err;
struct remote_conn_state *conn = remote_conn_lookup(my_mon_id);
- if (conn == NULL) {
- USER_PANIC_ERR(0, "invalid mon_id, ignored");
- return;
- }
-
- struct intermon_binding *binding = conn->mon_binding;
- uintptr_t your_mon_id = conn->mon_id;
-
- // XXX: This is a typedef of struct that flounder is generating.
- // Flounder should not be generating this and we shouldn't be using it.
- intermon_caprep_t caprep;
- capability_to_caprep(&capability, &caprep);
-
-
- bool null_cap = capref_is_null(cap);
- if (!null_cap) {
- err = cap_destroy(cap);
- if (err_is_fail(err)) {
- USER_PANIC_ERR(err, "cap_destroy failed");
- }
+ struct send_cap_st *st;
+ st = calloc(1, sizeof(*st));
+ if (!st) {
+ err = LIB_ERR_MALLOC_FAIL;
+ DEBUG_ERR(err, "Failed to allocate cap_send_request state");
+ // XXX: should forward error here
+ return;
}
+ st->my_mon_id = my_mon_id;
+ st->cap = cap;
+ st->capid = capid;
- err = binding->tx_vtbl.
- cap_send_request(binding, NOP_CONT, your_mon_id, capid,
- caprep, msgerr, give_away, has_descendants, on_cores.bits,
- null_cap);
- if (err_is_fail(err)) {
- if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
- struct intermon_state *intermon_state = binding->st;
- struct cap_send_request_state *ms =
- malloc(sizeof(struct cap_send_request_state));
- assert(ms);
-
- ms->your_mon_id = your_mon_id;
- ms->capid = capid;
- ms->caprep = caprep;
- ms->msgerr = msgerr;
- ms->give_away = give_away;
- ms->has_descendants = has_descendants;
- ms->on_cores = on_cores;
- ms->null_cap = null_cap;
- ms->elem.cont = cap_send_request_2_handler;
-
- errval_t err1 = intermon_enqueue_send(binding, &intermon_state->queue,
- get_default_waitset(),
- &ms->elem.queue);
- if (err_is_fail(err1)) {
- USER_PANIC_ERR(err1, "monitor_enqueue_send failed");
- }
-
- } else {
- USER_PANIC_ERR(err, "forwarding cap failed");
- }
- }
+ captx_prepare_send(cap, conn->core_id, true, &st->captx_state,
+ cap_send_request_tx_cont, st);
}
- #if 0
- struct capref domains[MAX_DOMAINS];
-
- static void assign_domain_id_request(struct monitor_binding *b, uintptr_t ust,
- struct capref disp, struct capref ep)
- {
- for(domainid_t id = 1; id < MAX_DOMAINS; id++) {
- if(domains[id].cnode.address_bits == 0) {
- domains[id] = ep;
- errval_t err = invoke_domain_id(disp, id);
- assert(err_is_ok(err));
-
- err = b->tx_vtbl.assign_domain_id_reply(b, NOP_CONT, ust, id);
- if (err_is_fail(err)) {
- USER_PANIC_ERR(err, "assign domain ID failed\n");
- }
- return;
- }
- }
-
- // Return error
- errval_t err = b->tx_vtbl.assign_domain_id_reply(b, NOP_CONT, ust, 0);
- if (err_is_fail(err)) {
- USER_PANIC_ERR(err, "assign domain ID failed\n");
- }
- }
- #endif
-
static void span_domain_request(struct monitor_binding *mb,
uintptr_t domain_id, uint8_t core_id,
struct capref vroot, struct capref disp)
}
}
- static void num_cores_request(struct monitor_binding *b)
+ static void migrate_dispatcher_request(struct monitor_binding *b,
+ coreid_t coreid, struct capref vroot,
+ struct capref disp)
{
- /* XXX: This is deprecated and shouldn't be used: there's nothing useful you
- * can do with the result, unless you assume that core IDs are contiguous
- * and start from zero, which is a false assumption! Go ask the SKB...
- */
-
- DEBUG_CAPOPS("Application invoked deprecated num_cores_request() API."
- " Please fix it!\n");
-
- /* Send reply */
- errval_t err = b->tx_vtbl.num_cores_reply(b, NOP_CONT, num_monitors);
- if (err_is_fail(err)) {
- USER_PANIC_ERR(err, "sending num_cores_reply failed");
- }
+ printf("%s:%d\n", __FUNCTION__, __LINE__);
-
}
struct monitor_rx_vtbl the_table = {