distops functionality is NYI.
This commit also makes the root cnodes explicit for sys_map(), closes T270.
Notable changes:
* LRPC LMP endpoints are regular capabilities in L2 cnode, no longer in root
cnode.
* Capability operations take a capability address for the root cnode relative
to which capability addresses are to be resolved. The root cnode capability
address must be resolvable in the current domains cspace.
Closes T264.
Signed-off-by: Simon Gerber <simon.gerber@inf.ethz.ch>
errors kernel SYS_ERR_ {
default success OK "Success",
+ failure NOT_IMPLEMENTED "Kernel functionality NYI",
+
// generic errors during system call handling
failure ILLEGAL_SYSCALL "Illegal system call number",
failure INVARGS_SYSCALL "Invalid system call arguments",
failure LMP_CAPTRANSFER_DST_CNODE_INVALID "Destination CNode cap not of type CNode for cap transfer",
failure LMP_CAPTRANSFER_DST_SLOT_OCCUPIED "Destination slot is occupied for cap transfer",
failure LRPC_SLOT_INVALID "Invalid slot specified for LRPC",
+ failure LRPC_NOT_L1 "L1 CNode lookup failed for LRPC",
+ failure LRPC_NOT_L2 "L2 CNode lookup failed for LRPC",
failure LRPC_NOT_ENDPOINT "Slot specified for LRPC does not contain an endpoint cap",
failure INVALID_EPLEN "Endpoint buffer has invalid length",
failure GUARD_MISMATCH "Guard does not match",
failure CAP_NOT_FOUND "Capability not found (empty slot encountered)",
failure IDENTIFY_LOOKUP "Error while looking up cap to identify",
+ failure L1_CNODE_INDEX "Index into L1 CNode too high",
+ failuRE CAP_LOOKUP_DEPTH "Invalid capability lookup depth",
// Generic capability manipulation errors
failure SLOT_IN_USE "Destination capability slots occupied",
failure SLOT_LOOKUP_FAIL "Failure during slot lookup",
+ failure CNODE_NOT_ROOT "Found non-root CNode during root CNode lookup",
failure GUARD_SIZE_OVERFLOW "Specified guard size exceeds CSpace depth",
failure INVALID_SOURCE_TYPE "Invalid source capability type",
failure ILLEGAL_DEST_TYPE "Illegal destination capability type",
failure SOURCE_CAP_LOOKUP "Error looking up source capability",
+ failure SOURCE_ROOTCN_LOOKUP "Error looking up source root CNode",
failure DEST_CNODE_LOOKUP "Error looking up destination CNode",
+ failure DEST_ROOTCN_LOOKUP "Error looking up destination root CNode",
failure DEST_CNODE_INVALID "Destination CNode cap is not of type CNode",
failure ROOT_CAP_LOOKUP "Error looking up root capability",
failure DEST_TYPE_INVALID "Destination capability is of invalid type",
failure DISP_VSPACE_INVALID "Invalid capability type given for VSpace root on dispatcher",
failure DISP_FRAME "Error setting dispatcher frame",
failure DISP_FRAME_INVALID "Invalid capability type given for dispatcher frame",
+ failure DISP_FRAME_SIZE "Dispatcher frame too small",
failure DISP_NOT_RUNNABLE "Cannot run dispatcher; it is not completely setup",
failure DISP_CAP_LOOKUP "Error looking up dispatcher cap",
failure DISP_CAP_INVALID "Invalid type capability given for dispatcher cap",
failure ARCHITECTURE_NOT_SUPPORTED "Unable to boot core: specified architecture is not supported by kernel",
failure INVALID_YIELD_TARGET "Target capability for directed yield is invalid",
failure DISP_OCAP_LOOKUP "Error looking up other dispatcher cap",
+ failure DISP_OCAP_TYPE "Other dispatcher cap is not dispatcher",
// VMKit specific errors
failure VMKIT_UNAVAIL "Virtualization extensions are unavailable",
failure SHOULD_NOT_GET_HERE "Should not get here",
failure NOT_CNODE "Function invoked on a capref, that does not represent a CNode",
+ // cspace
+ failure CNODE_TYPE "Type requested for cnode creation is not valid cnode type",
+ failure CNODE_SLOTS "#slots requested for cnode creation is invalid",
+
// nested errors in specific functions
failure FRAME_ALLOC "Failure in frame_alloc()",
failure FRAME_CREATE "Failure in frame_create()",
failure COPY_SUPERCN_CAP "Failed to copy superCN cap to mem_serv",
failure MAP_BOOTINFO "Failed to map bootinfo to child",
failure COPY_KERNEL_CAP "Failed to copy kernel cap to monitor",
+ failure COPY_BSP_KCB "Error copying BSP KernelControlBlock",
failure COPY_IPI "Failed to copy IPI cap to monitor",
failure COPY_PERF_MON "Failed to copy performance monitoring cap to monitor",
failure COPY_MODULECN_CAP "Failed to copy module CNode cap to monitor",
uintptr_t arg8, uintptr_t arg9,
uintptr_t arg10)
{
- uint8_t invoke_bits = get_cap_valid_bits(to);
- capaddr_t invoke_cptr = get_cap_addr(to) >> (CPTR_BITS - invoke_bits);
+ capaddr_t invoke_cptr = get_cap_addr(to);
+ enum cnode_type invoke_level = get_cap_level(to);
return syscall(SYSCALL_INVOKE, (uint64_t)invoke_cptr << 32 |
- (uint64_t)invoke_bits << 16 | 10 << 8, 0,
+ (uint64_t)invoke_level << 16 | 10 << 8, 0,
arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9,
arg10);
}
*
* See also cap_retype(), which wraps this.
*
- * \param root Capability of the Root CNode to invoke
- * \param cap Address of cap to retype.
+ * \param root Capability of the source cspace root CNode to invoke
+ * \param src_cspace Source cspace cap address in our cspace.
+ * \param cap Address of cap to retype in source cspace.
* \param offset Offset into cap to retype
* \param newtype Kernel object type to retype to.
* \param objsize Size of created objects, for variable-sized types
* \param count Number of objects to create
- * \param to Address of CNode cap to place retyped caps into.
+ * \param to_cspace Destination cspace cap address in our cspace
+ * \param to Address of CNode cap in destination cspcae to place
+ * retyped caps into.
+ * \param to_level Level/depth of CNode cap in destination cspace
* \param slot Slot in CNode cap to start placement.
- * \param bits Number of valid address bits in 'to'.
*
* \return Error code
*/
-static inline errval_t invoke_cnode_retype(struct capref root, capaddr_t cap,
- gensize_t offset, enum objtype newtype,
- gensize_t objsize, size_t count,
- capaddr_t to, capaddr_t slot, int bits)
+static inline errval_t invoke_cnode_retype(struct capref root, capaddr_t src_cspace,
+ capaddr_t cap, gensize_t offset,
+ enum objtype newtype, gensize_t objsize,
+ size_t count, capaddr_t to_cspace,
+ capaddr_t to, enum cnode_type to_level,
+ capaddr_t slot)
{
assert(cap != CPTR_NULL);
- return cap_invoke9(root, CNodeCmd_Retype, cap, offset, newtype, objsize,
- count, to, slot, bits).error;
+ return cap_invoke10(root, CNodeCmd_Retype,
+ ((uint64_t)src_cspace << 32) | (uint64_t)cap, offset,
+ newtype, objsize, count, to_cspace, to, slot, to_level).error;
}
/**
* \param dest_cnode_cptr Address of CNode cap, where newly created cap will be
* placed into.
* \param dest_slot Slot in CNode cap to place new cap.
- * \param dest_vbits Number of valid address bits in 'dest_cnode_cptr'.
+ * \param dest_level Depth/level of destination CNode.
*
* \return Error code
*/
enum objtype type, uint8_t objbits,
capaddr_t dest_cnode_cptr,
capaddr_t dest_slot,
- uint8_t dest_vbits)
+ enum cnode_type dest_level)
{
assert(dest_cnode_cptr != CPTR_NULL);
return cap_invoke6(root, CNodeCmd_Create, type, objbits, dest_cnode_cptr,
- dest_slot, dest_vbits).error;
+ dest_slot, dest_level).error;
}
/**
*
* See also cap_mint(), which wraps this.
*
- * \param root Capability of the CNode to invoke
- * \param to CNode to place copy into.
- * \param slot Slot in CNode cap to place copy into.
+ * \param root Capability of the source cspace root CNode to invoke
+ * \param to_cspace Destination cspace cap address relative to source cspace
+ * \param to Destination CNode address relative to destination cspace
+ * \param slot Slot in destination CNode cap to place copy into
* \param from Address of cap to copy.
- * \param tobits Number of valid address bits in 'to'.
- * \param frombits Number of valid address bits in 'from'.
+ * \param tolevel Level/depth of 'to'.
+ * \param fromlevel Level/depth of 'from'.
* \param param1 1st cap-dependent parameter.
* \param param2 2nd cap-dependent parameter.
*
* \return Error code
*/
-static inline errval_t invoke_cnode_mint(struct capref root, capaddr_t to,
- capaddr_t slot, capaddr_t from, int tobits,
- int frombits, uint64_t param1,
- uint64_t param2)
+static inline errval_t invoke_cnode_mint(struct capref root, capaddr_t to_cspace,
+ capaddr_t to, capaddr_t slot,
+ capaddr_t from_cspace, capaddr_t from,
+ enum cnode_type tolevel,
+ enum cnode_type fromlevel,
+ uint64_t param1, uint64_t param2)
{
- return cap_invoke8(root, CNodeCmd_Mint, to, slot, from, tobits, frombits,
- param1, param2).error;
+ return cap_invoke10(root, CNodeCmd_Mint, to_cspace, to, slot, from_cspace,
+ from, tolevel, fromlevel, param1, param2).error;
}
/**
*
* See also cap_copy(), which wraps this.
*
- * \param root Capability of the CNode to invoke
- * \param to CNode to place copy into.
+ * \param root Capability of the source cspace root CNode to invoke
+ * \param to_cspace Capability address of destination root cnode relative
+ * to our cspace
+ * \param to CNode address to place copy into relative to
+ * destination cspace.
* \param slot Slot in CNode cap to place copy into.
+ * \param from_cspace Capability address of source root cnode relative
+ * to our cspace
* \param from Address of cap to copy.
- * \param tobits Number of valid address bits in 'to'.
- * \param frombits Number of valid address bits in 'from'.
+ * \param tolevel Level/depth of 'to'.
+ * \param fromlevel Level/depth of 'from'.
*
* \return Error code
*/
-static inline errval_t invoke_cnode_copy(struct capref root, capaddr_t to,
- capaddr_t slot, capaddr_t from, int tobits,
- int frombits)
+static inline errval_t invoke_cnode_copy(struct capref root, capaddr_t to_cspace,
+ capaddr_t to, capaddr_t slot,
+ capaddr_t from_cspace, capaddr_t from,
+ enum cnode_type tolevel,
+ enum cnode_type fromlevel)
{
- return cap_invoke6(root, CNodeCmd_Copy, to, slot, from,
- tobits, frombits).error;
+ return cap_invoke8(root, CNodeCmd_Copy, to_cspace, to, slot, from_cspace,
+ from, tolevel, fromlevel).error;
}
/**
*
* \param root Capability of the CNode to invoke
* \param cap Address of cap to delete.
- * \param bits Number of valid bits within 'cap'.
+ * \param level Level/depth of 'cap'.
*
* \return Error code
*/
static inline errval_t invoke_cnode_delete(struct capref root, capaddr_t cap,
- int bits)
+ enum cnode_type level)
{
- return cap_invoke3(root, CNodeCmd_Delete, cap, bits).error;
+ return cap_invoke3(root, CNodeCmd_Delete, cap, level).error;
}
static inline errval_t invoke_cnode_revoke(struct capref root, capaddr_t cap,
- int bits)
+ enum cnode_type level)
{
- return cap_invoke3(root, CNodeCmd_Revoke, cap, bits).error;
+ return cap_invoke3(root, CNodeCmd_Revoke, cap, level).error;
}
static inline errval_t invoke_cnode_get_state(struct capref root, capaddr_t cap,
- int bits, distcap_state_t *ret)
+ enum cnode_type level, distcap_state_t *ret)
{
- struct sysret sysret = cap_invoke3(root, CNodeCmd_GetState, cap, bits);
+ struct sysret sysret = cap_invoke3(root, CNodeCmd_GetState, cap, level);
assert(ret != NULL);
if (err_is_ok(sysret.error)) {
}
static inline errval_t invoke_vnode_map(struct capref ptable, capaddr_t slot,
- capaddr_t src, int frombits, size_t flags,
- size_t offset, size_t pte_count,
- capaddr_t mcnaddr, int mcnbits,
- cslot_t mapping_slot)
+ capaddr_t src_root, capaddr_t src,
+ enum cnode_type srclevel, size_t
+ flags, size_t offset, size_t pte_count,
+ capaddr_t mcnroot, capaddr_t mcnaddr,
+ enum cnode_type mcnlevel, cslot_t mapping_slot)
{
- return cap_invoke10(ptable, VNodeCmd_Map, slot, src, frombits, flags,
- offset, pte_count, mcnaddr, mcnbits, mapping_slot).error;
+ return cap_invoke10(ptable, VNodeCmd_Map, slot,
+ ((uint64_t)src_root << 32) | (uint64_t)src, srclevel,
+ flags, offset, pte_count,
+ ((uint64_t)mcnroot << 32) | (uint64_t)mcnaddr,
+ mcnlevel, mapping_slot).error;
}
static inline errval_t invoke_vnode_unmap(struct capref cap,
- capaddr_t mapping_addr, int bits)
+ capaddr_t mapping_addr,
+ enum cnode_type level)
{
- return cap_invoke3(cap, VNodeCmd_Unmap, mapping_addr, bits).error;
+ return cap_invoke3(cap, VNodeCmd_Unmap, mapping_addr, level).error;
}
/**
struct frame_identity *ret)
{
assert(ret != NULL);
+ assert(get_croot_addr(frame) == CPTR_ROOTCN);
struct sysret sysret = cap_invoke2(frame, FrameCmd_Identify, (uintptr_t)ret);
/**
* \brief Setup a dispatcher, possibly making it runnable
*
- * \param dispatcher Address of dispatcher capability
- * \param domdispatcher Address of existing dispatcher for domain ID
- * \param cspace_root Root of CSpace for new dispatcher
- * \param cspace_root_bits Number of valid bits in cspace_root
- * \param vspace_root Root of VSpace for new dispatcher
- * \param dispatcher_frame Frame capability for dispatcher structure
+ * \param dispatcher Address of dispatcher capability relative to own
+ * cspace
+ * \param domdispatcher Address of existing dispatcher for domain ID relative
+ * to own cspace
+ * \param cspace Root of CSpace for new dispatcher relative to own
+ * cspace
+ * \param vspace Root of VSpace for new dispatcher relative to cspace
+ * for new dispatcher.
+ * \param dispframe Frame capability for dispatcher structure relative to
+ * cspace for new dispatcher.
* \param run Make runnable if true
*
- * Any arguments of CPTR_NULL are ignored.
+ * Need to either supply caprefs for all or none of cspace, vspace, dispframe
+ * and domdispatcher.
*
* \return Error code
*/
struct capref cspace, struct capref vspace,
struct capref dispframe, bool run)
{
- uint8_t root_vbits = get_cap_valid_bits(cspace);
- capaddr_t root_caddr = get_cap_addr(cspace) >> (CPTR_BITS - root_vbits);
+ assert(get_croot_addr(dispatcher) == CPTR_ROOTCN);
+ assert(get_croot_addr(cspace) == CPTR_ROOTCN);
+ assert(get_croot_addr(domdispatcher) == CPTR_ROOTCN);
+ assert(get_croot_addr(vspace) == get_cap_addr(cspace));
+ assert(get_croot_addr(dispframe) == get_cap_addr(cspace));
+
+ capaddr_t root_caddr = get_cap_addr(cspace);
+ uint8_t root_level = get_cap_level(cspace);
capaddr_t vtree_caddr = get_cap_addr(vspace);
capaddr_t disp_caddr = get_cap_addr(dispframe);
capaddr_t dd_caddr = get_cap_addr(domdispatcher);
return cap_invoke7(dispatcher, DispatcherCmd_Setup, root_caddr,
- root_vbits, vtree_caddr, disp_caddr, run,
+ root_level, vtree_caddr, disp_caddr, run,
dd_caddr).error;
}
static inline errval_t invoke_irqtable_alloc_dest_cap(struct capref irqcap, struct capref dest_cap)
{
- uint8_t dcn_vbits = get_cnode_valid_bits(dest_cap);
+ uint8_t dcn_level = get_cnode_level(dest_cap);
capaddr_t dcn_addr = get_cnode_addr(dest_cap);
- struct sysret ret = cap_invoke4(irqcap, IRQTableCmd_AllocDestCap, dcn_vbits, dcn_addr, dest_cap.slot);
+ struct sysret ret = cap_invoke4(irqcap, IRQTableCmd_AllocDestCap,
+ dcn_level, dcn_addr, dest_cap.slot);
return ret.error;
}
idcap_id_t *id)
{
assert(id != NULL);
+ assert(get_croot_addr(idcap) == CPTR_ROOTCN);
struct sysret sysret = cap_invoke1(idcap, IDCmd_Identify);
uint64_t arg7, uint64_t arg8, uint64_t arg9,
uint64_t arg10)
{
- uint8_t send_bits = get_cap_valid_bits(send_cap);
- capaddr_t send_cptr = get_cap_addr(send_cap) >> (CPTR_BITS - send_bits);
+ uint8_t send_level = get_cap_level(send_cap);
+ capaddr_t send_cptr = get_cap_addr(send_cap);
if(debug_notify_syscall) {
printf("memcached: lmp_ep_send while forbidden from %p, %p, %p\n",
#ifndef TRACE_DISABLE_LRPC
// Do an LRPC if possible
- if (send_cptr == 0 && send_bits == 0 // Not sending a cap
- && ep.cnode.address == CPTR_ROOTCN // EP in rootcn
+ if (send_cptr == 0 && send_level == 0 // Not sending a cap
&& (flags & LMP_FLAG_SYNC) != 0 // sync option
&& length_words <= LRPC_MSG_LENGTH) { // Check length
assert(LRPC_MSG_LENGTH == 4);
- return syscall6(SYSCALL_LRPC, ep.slot, arg1, arg2, arg3, arg4).error;
+ assert(get_croot_addr(ep) == CPTR_ROOTCN);
+ return syscall6(SYSCALL_LRPC, get_cap_addr(ep), arg1, arg2, arg3, arg4).error;
}
#endif
- uint8_t invoke_bits = get_cap_valid_bits(ep);
- capaddr_t invoke_cptr = get_cap_addr(ep) >> (CPTR_BITS - invoke_bits);
+ uint8_t invoke_level = get_cap_level(ep);
+ capaddr_t invoke_cptr = get_cap_addr(ep);
return syscall(SYSCALL_INVOKE,
- (uint64_t)invoke_cptr << 32 | (uint64_t)send_bits << 24 |
- (uint64_t)invoke_bits << 16 | (uint64_t)length_words << 8 |
+ (uint64_t)invoke_cptr << 32 | (uint64_t)send_level << 24 |
+ (uint64_t)invoke_level << 16 | (uint64_t)length_words << 8 |
flags, send_cptr,
arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9,
arg10).error;
#include <sys/cdefs.h>
#include <barrelfish_kpi/types.h>
+#include <barrelfish_kpi/init.h>
+#include <barrelfish_kpi/capabilities.h>
+
+#include <barrelfish/cspace.h>
+
#include <stdint.h>
#include <stdbool.h>
+#include <bitmacros.h>
+
__BEGIN_DECLS
-#include <stdbool.h>
+/**
+ * \brief extract slot (L2 index) from capability address `addr`
+ * \param addr the capability address
+ * \return The slot number (L2 index) component of the address, i.e. the low
+ * bits.
+ */
+static inline cslot_t get_capaddr_slot(capaddr_t addr)
+{
+ return (cslot_t)(addr & MASK_T(capaddr_t, L2_CNODE_BITS));
+}
/**
- * \brief User-level representation of a CNode, its CSpace address and size
+ * \brief extract CNode address component from capability address `addr`
+ * \param addr the capability address
+ * \return the cnode component of the address, i.e. the address with the slot
+ * (L2 index) bits set to zero.
+ */
+static inline capaddr_t get_capaddr_cnode_addr(capaddr_t addr)
+{
+ return addr & ~MASK_T(capaddr_t, L2_CNODE_BITS);
+}
+
+enum cnode_type {
+ CNODE_TYPE_ROOT = 0,
+ CNODE_TYPE_OTHER,
+ CNODE_TYPE_COUNT,
+} __attribute__((packed));
+
+/**
+ * \brief User-level representation of a CNode, this is essentially a capref
+ * to a CNode.
*/
struct cnoderef {
- capaddr_t address; ///< Base address of CNode in CSpace
- uint8_t address_bits; ///< Number of valid bits in base address
- uint8_t size_bits; ///< Number of slots in the CNode as a power of 2
- uint8_t guard_size; ///< Guard size of the CNode
+ capaddr_t croot;
+ capaddr_t cnode;
+ enum cnode_type level;
} __attribute__((packed));
-#define NULL_CNODE (struct cnoderef){ /*address*/ 0, /*address_bits*/ 0, \
- /*size_bits*/ 0, /*guard_size*/ 0 }
+#define NULL_CNODE (struct cnoderef){ \
+ /*croot*/ 0, /*cnode*/ 0, \
+ /*level*/ CNODE_TYPE_ROOT }
/**
* \brief User-level representation of a capability and its CSpace address
*/
struct capref {
- struct cnoderef cnode; ///< CNode this cap resides in
- capaddr_t slot; ///< Slot number within CNode
+ struct cnoderef cnode;
+ cslot_t slot;
};
#define NULL_CAP (struct capref){ /*cnode*/ NULL_CNODE, /*slot*/ 0 }
+static inline bool cnoderef_is_null(struct cnoderef cnoderef)
+{
+ return cnoderef.croot == 0 && cnoderef.cnode == 0;
+}
+
static inline bool capref_is_null(struct capref capref)
{
- return capref.cnode.address == 0 && capref.cnode.address_bits == 0;
+ return cnoderef_is_null(capref.cnode) && capref.slot == 0;
}
/* well-known cnodes */
-extern struct cnoderef cnode_root, cnode_task, cnode_base,
- cnode_super, cnode_page, cnode_module;
+extern struct cnoderef cnode_root, cnode_task, cnode_base, cnode_super,
+ cnode_page, cnode_module;
/* well-known capabilities */
extern struct capref cap_root, cap_monitorep, cap_irq, cap_io, cap_dispatcher,
cap_sessionid, cap_ipi, cap_vroot;
/**
- * \brief Returns the number of valid bits in the CSpace address of a cap
+ * \brief Returns the depth in the CSpace address of a cap
*/
-static inline uint8_t get_cap_valid_bits(struct capref cap)
+static inline uint8_t get_cap_level(struct capref cap)
{
- uint8_t sum = cap.cnode.address_bits + cap.cnode.guard_size +
- cap.cnode.size_bits;
- if (sum > CPTR_BITS) {
- return sum % CPTR_BITS;
+ if (capref_is_null(cap)) {
+ return 0;
} else {
- return sum;
+ return cap.cnode.level + 1;
}
}
/**
* \brief Returns the CSpace address of a cap
*/
-// XXX: workaround for an inlining bug in gcc 4.4.1 as shipped with ubuntu 9.10
-// XXX: bug still present in 4.4.3
-#if defined(__GNUC__) \
- && __GNUC__ == 4 && __GNUC_MINOR__ == 4 && __GNUC_PATCHLEVEL__ <= 3
-static __attribute__((noinline)) capaddr_t get_cap_addr(struct capref cap)
-#else
static inline capaddr_t get_cap_addr(struct capref cap)
-#endif
{
- uint8_t vbits = get_cap_valid_bits(cap);
- if (cap.cnode.address_bits == CPTR_BITS) { // special case for root
- return cap.slot << (CPTR_BITS - vbits);
- } else {
- return cap.cnode.address | (cap.slot << (CPTR_BITS - vbits));
+ if (!capref_is_null(cap)) {
+ switch (cap.cnode.level) {
+ case CNODE_TYPE_ROOT:
+ return cap.slot << L2_CNODE_BITS;
+ // capref is in L2 CNode
+ case CNODE_TYPE_OTHER:
+ return cap.cnode.cnode | cap.slot;
+ default:
+ assert(!"invalid level");
+ }
}
+ return 0;
}
/**
- * \brief Returns the number of valid bits in the CSpace address of the CNode
+ * \brief Returns the depth in the CSpace address of the CNode
* containing the given cap
*/
-static inline uint8_t get_cnode_valid_bits(struct capref cap)
+static inline uint8_t get_cnode_level(struct capref cap)
{
- return cap.cnode.address_bits;
+ return cap.cnode.level;
}
/**
* \brief Returns the CSpace address of the CNode containing the given cap
- *
- * Returns the valid bits of the address only, in the least significant bits
- * of the result. This is the format needed for CNode invocation parameters.
*/
static inline capaddr_t get_cnode_addr(struct capref cap)
{
- return cap.cnode.address >> (CPTR_BITS - cap.cnode.address_bits);
+ switch (cap.cnode.level) {
+ case CNODE_TYPE_ROOT:
+ return cap.cnode.croot;
+ case CNODE_TYPE_OTHER:
+ return cap.cnode.cnode;
+ default:
+ assert(!"unknown cnoderef type");
+ }
+}
+
+
+/**
+ * \brief Returns the CSpace address of the cspace root cap of the given cap
+ */
+static inline capaddr_t get_croot_addr(struct capref cap)
+{
+ return cap.cnode.croot;
+}
+
+static inline struct capref get_croot_capref(struct capref cap)
+{
+ capaddr_t croot = get_croot_addr(cap);
+ struct capref ref = {
+ .cnode = {
+ .croot = CPTR_ROOTCN,
+ .cnode = get_capaddr_cnode_addr(croot),
+ .level = CNODE_TYPE_OTHER,
+ },
+ .slot = get_capaddr_slot(croot),
+ };
+ return ref;
}
/**
*/
static inline bool cnodecmp(struct cnoderef c1, struct cnoderef c2)
{
- return ((c1.address == c2.address) &&
- (c1.address_bits == c2.address_bits) &&
- (c1.guard_size == c2.guard_size));
+ return (c1.cnode == c2.cnode && c1.croot == c2.croot && c1.level == c2.level);
}
/**
*/
static inline bool capcmp(struct capref c1, struct capref c2)
{
- return (c1.slot == c2.slot) && cnodecmp(c1.cnode, c2.cnode);
+ return c1.slot == c2.slot && cnodecmp(c1.cnode, c2.cnode);
}
/**
* \brief Creates a new #cnoderef struct, performing address calculations.
+ * XXX: TODO remove size_bits from signature
*/
static inline struct cnoderef build_cnoderef(struct capref cap,
- uint8_t size_bits)
+ enum cnode_type cntype)
{
- struct cnoderef ret;
- ret.address = get_cap_addr(cap);
- ret.address_bits = (cap.cnode.address_bits + cap.cnode.guard_size +
- cap.cnode.size_bits) % CPTR_BITS;
- ret.size_bits = size_bits;
- ret.guard_size = 0; // XXX
- return ret;
+ assert(cntype < CNODE_TYPE_COUNT);
+
+ struct cnoderef cnode = NULL_CNODE;
+ switch(get_cnode_level(cap)) {
+ // L2 cnode in our root cnode
+ case CNODE_TYPE_ROOT:
+ // cannot make cnoderef from non-invokable capref.
+ assert(cap.cnode.croot == CPTR_ROOTCN);
+ cnode.croot = CPTR_ROOTCN;
+ cnode.cnode = get_cap_addr(cap);
+ cnode.level = cntype;
+ break;
+ // CNode for another cspace
+ case CNODE_TYPE_OTHER:
+ cnode.level = cntype;
+ switch (cntype) {
+ // creating a cnoderef to a root cnode for another cspace
+ case CNODE_TYPE_ROOT:
+ cnode.croot = get_cap_addr(cap);
+ cnode.cnode = 0;
+ break;
+ case CNODE_TYPE_OTHER:
+ cnode.croot = get_croot_addr(cap);
+ cnode.cnode = get_cap_addr(cap);
+ break;
+ default:
+ assert(!"build_cnoderef: provided cntype invalid");
+ }
+ break;
+ default:
+ assert(!"cap level not valid");
+ return NULL_CNODE;
+ }
+ return cnode;
}
__END_DECLS
errval_t cnode_create(struct capref *ret_dest, struct cnoderef *cnoderef,
cslot_t slots, cslot_t *retslots);
-errval_t cnode_create_l1(struct capref *ret_dest, struct cnoderef *cnoderef);
+errval_t cnode_create_foreign(struct capref *ret_dest, struct cnoderef *cnoderef,
+ enum objtype cntype);
errval_t cnode_create_l2(struct capref *ret_dest, struct cnoderef *cnoderef);
errval_t cnode_create_raw(struct capref dest, struct cnoderef *cnoderef,
- cslot_t slots, cslot_t *retslots);
+ enum objtype cntype, cslot_t slots, cslot_t *retslots);
errval_t cnode_create_with_guard(struct capref dest, struct cnoderef *cnoderef,
cslot_t slots, cslot_t *retslots,
uint64_t guard, uint8_t guard_size);
errval_t cnode_create_from_mem(struct capref dest, struct capref src,
- struct cnoderef *cnoderef, uint8_t slot_bits);
+ enum objtype cntype, struct cnoderef *cnoderef,
+ size_t slots);
errval_t cap_retype(struct capref dest_start, struct capref src, gensize_t offset,
enum objtype new_type, gensize_t objsize, size_t count);
-errval_t cap_create(struct capref dest, enum objtype type, uint8_t size_bits);
+errval_t cap_create(struct capref dest, enum objtype type, size_t bytes);
errval_t cap_delete(struct capref cap);
errval_t cap_revoke(struct capref cap);
struct cspace_allocator;
* type-specific parameters.
*/
static inline errval_t
-cap_mint(struct capref dest, struct capref src, uint64_t param1,
- uint64_t param2)
+cap_mint(struct capref dest, struct capref src, uint64_t param1, uint64_t param2)
{
- uint8_t dcn_vbits = get_cnode_valid_bits(dest);
+ capaddr_t dcs_addr = get_croot_addr(dest);
capaddr_t dcn_addr = get_cnode_addr(dest);
- uint8_t scp_vbits = get_cap_valid_bits(src);
- capaddr_t scp_addr = get_cap_addr(src) >> (CPTR_BITS - scp_vbits);
-
- return invoke_cnode_mint(cap_root, dcn_addr, dest.slot, scp_addr, dcn_vbits,
- scp_vbits, param1, param2);
+ uint8_t dcn_level = get_cnode_level(dest);
+ capaddr_t scp_root = get_croot_addr(src);
+ capaddr_t scp_addr = get_cap_addr(src);
+ uint8_t scp_level = get_cap_level(src);
+
+ return invoke_cnode_mint(cap_root, dcs_addr, dcn_addr, dest.slot,
+ scp_root, scp_addr, dcn_level, scp_level,
+ param1, param2);
}
/**
uint64_t attr, uint64_t off, uint64_t pte_count,
struct capref mapping)
{
- uint8_t svbits = get_cap_valid_bits(src);
- capaddr_t saddr = get_cap_addr(src) >> (CPTR_BITS - svbits);
+ assert(get_croot_addr(dest) == CPTR_ROOTCN);
+
+ capaddr_t sroot = get_croot_addr(src);
+ capaddr_t saddr = get_cap_addr(src);
+ uint8_t slevel = get_cap_level(src);
- uint8_t mcn_vbits = get_cnode_valid_bits(mapping);
+ uint8_t mcn_level = get_cnode_level(mapping);
capaddr_t mcn_addr = get_cnode_addr(mapping);
+ capaddr_t mcn_root = get_croot_addr(mapping);
- return invoke_vnode_map(dest, slot, saddr, svbits, attr, off, pte_count,
- mcn_addr, mcn_vbits, mapping.slot);
+ return invoke_vnode_map(dest, slot, sroot, saddr, slevel, attr, off, pte_count,
+ mcn_root, mcn_addr, mcn_level, mapping.slot);
}
static inline errval_t vnode_unmap(struct capref pgtl, struct capref mapping)
{
- uint8_t bits = get_cap_valid_bits(mapping);
- capaddr_t mapping_addr = get_cap_addr(mapping) >> (CPTR_BITS - bits);
+ capaddr_t mapping_addr = get_cap_addr(mapping);
+ uint8_t level = get_cap_level(mapping);
- return invoke_vnode_unmap(pgtl, mapping_addr, bits);
+ return invoke_vnode_unmap(pgtl, mapping_addr, level);
}
/**
static inline errval_t cap_copy(struct capref dest, struct capref src)
{
errval_t err;
- uint8_t dcn_vbits = get_cnode_valid_bits(dest);
+ capaddr_t dcs_addr = get_croot_addr(dest);
capaddr_t dcn_addr = get_cnode_addr(dest);
- uint8_t scp_vbits = get_cap_valid_bits(src);
- capaddr_t scp_addr = get_cap_addr(src) >> (CPTR_BITS - scp_vbits);
+ capaddr_t scp_root = get_croot_addr(src);
+ capaddr_t scp_addr = get_cap_addr(src);
+ uint8_t dcn_level = get_cnode_level(dest);
+ uint8_t scp_level = get_cap_level(src);
- err = invoke_cnode_copy(cap_root, dcn_addr, dest.slot, scp_addr, dcn_vbits,
- scp_vbits);
+ err = invoke_cnode_copy(cap_root, dcs_addr, dcn_addr, dest.slot, scp_root,
+ scp_addr, dcn_level, scp_level);
return err;
}
static inline errval_t cap_get_state(struct capref cap, distcap_state_t *state)
{
- uint8_t vbits = get_cap_valid_bits(cap);
- capaddr_t caddr = get_cap_addr(cap) >> (CPTR_BITS - vbits);
+ capaddr_t caddr = get_cap_addr(cap);
+ uint8_t level = get_cap_level(cap);
- return invoke_cnode_get_state(cap_root, caddr, vbits, state);
+ return invoke_cnode_get_state(cap_root, caddr, level, state);
}
__END_DECLS
#include <barrelfish_kpi/init.h>
/* Root CNode */
-#define ROOTCN_SLOT_MONITOREP (ROOTCN_SLOTS_USER+0) ///< lrpc endpoint to monitor
-#define ROOTCN_FREE_EP_SLOTS (ROOTCN_SLOTS_USER+1) ///< free slots to place EPs
+#define ROOTCN_FREE_SLOTS (ROOTCN_SLOTS_USER+0) ///< free slots to place EPs
/* Task CNode */
#define TASKCN_SLOT_SELFEP (TASKCN_SLOTS_USER+0) ///< Endpoint to self
-#define TASKCN_SLOT_INITEP (TASKCN_SLOTS_USER+1) ///< End Point to init
+#define TASKCN_SLOT_INITEP (TASKCN_SLOTS_USER+1) ///< End Point to init (for monitor and memserv)
+#define TASKCN_SLOT_MONITOREP (TASKCN_SLOTS_USER+1) ///< lrpc endpoint to monitor (for all other domains)
+#define TASKCN_SLOTS_FREE (TASKCN_SLOTS_USER+2) ///< first free slot in taskcn
// taskcn appears at the beginning of cspace, so the cptrs match the slot numbers
#define CPTR_ROOTCN TASKCN_SLOT_ROOTCN ///< Cptr to init's root CNode
int debug_print_cap(char *buf, size_t len, struct capability *cap);
int debug_print_cap_at_capref(char *buf, size_t len, struct capref cap);
int debug_print_capref(char *buf, size_t len, struct capref cap);
+int debug_print_cnoderef(char *buf, size_t len, struct cnoderef cnode);
void debug_print_save_area(arch_registers_state_t *state);
void debug_print_fpu_state(arch_registers_fpu_state_t *state);
};
struct range_slot_allocator {
- struct capref cnode_cap; ///< capref for the cnode
+ struct capref cnode_cap; ///< capref for the L1 cnode
+ cslot_t rootcn_slot; ///< L1 slot of L2 cnode in this allocator
struct cnoderef cnode; ///< cnoderef for the cnode to allocate from
struct cnode_meta *meta; ///< Linked list of meta data
struct slab_allocator slab; ///< Slab allocation
*/
/*
- * Copyright (c) 2007-2012, ETH Zurich.
+ * Copyright (c) 2007-2012, 2016, ETH Zurich.
* Copyright (c) 2015, Hewlett Packard Enterprise Development LP.
* All rights reserved.
*
// Size of CNode entry
#define OBJBITS_CTE 6
-// Size of L2 CNode table: resolve 8 bits of cap address
-#define OBJSIZE_L2CNODE (1UL << (OBJBITS_CTE + 8))
+/// Number of entries in L2 CNode in bits
+#define L2_CNODE_BITS 8
+/// Number of entries in L2 CNode
+#define L2_CNODE_SLOTS (1UL << L2_CNODE_BITS)
+/// Size of L2 CNode table in bytes
+#define OBJSIZE_L2CNODE (L2_CNODE_SLOTS * (1UL << OBJBITS_CTE))
// Size of dispatcher
#define OBJBITS_DISPATCHER 10
);
}
+static inline bool type_is_vroot(enum objtype type)
+{
+ STATIC_ASSERT(48 == ObjType_Num, "Check VNode definitions");
+
+ return (type == ObjType_VNode_x86_64_pml4 ||
+#ifdef CONFIG_PAE
+ type == ObjType_VNode_x86_32_pdpt ||
+#else
+ type == ObjType_VNode_x86_32_pdir ||
+#endif
+ type == ObjType_VNode_AARCH64_l1 ||
+ type == ObjType_VNode_ARM_l1
+ );
+}
/**
* Return size of vnode in bits. This is the size of a page table page.
*
return 0;
}
+/**
+ * Return number of slots for cnode in bits.
+ * @param type Object type.
+ * @return Number of page table entries in bits
+ */
+static inline size_t cnode_get_slots(struct capability *cnode) {
+ STATIC_ASSERT(48 == ObjType_Num, "Check CNode definitions");
+
+ switch (cnode->type) {
+ case ObjType_CNode:
+ return 1UL << cnode->u.cnode.bits;
+ case ObjType_L1CNode:
+ return cnode->u.l1cnode.allocated_bytes / (1UL << OBJBITS_CTE);
+ case ObjType_L2CNode:
+ return L2_CNODE_SLOTS;
+ default:
+ assert(!"not a cnode");
+ return 0;
+ }
+}
+
static inline enum objtype get_mapping_type(enum objtype captype)
{
STATIC_ASSERT(48 == ObjType_Num, "Knowledge of all mapping types");
#define DEFAULT_CNODE_SLOTS (1UL << DEFAULT_CNODE_BITS)
-/// Number of entries in L2 CNode in bits
-#define L2_CNODE_BITS 8
-/// Number of entries in L2 CNode
-#define L2_CNODE_SLOTS (OBJSIZE_L2CNODE / (1UL << OBJBITS_CTE))
-
/// Number of entries in page cnode (in bits)
#define PAGE_CNODE_BITS (DEFAULT_CNODE_BITS + 2)
#define SLOT_ALLOC_CNODE_SLOTS L2_CNODE_SLOTS
/* Task CNode */
+#define TASKCN_SLOT_TASKCN 0 ///< Task CNode in itself (XXX)
#define TASKCN_SLOT_DISPATCHER 1 ///< Dispatcher cap in task cnode
#define TASKCN_SLOT_ROOTCN 2 ///< RootCN slot in task cnode
#define TASKCN_SLOT_DISPFRAME 4 ///< Dispatcher frame cap in task cnode
#define TASKCN_SLOT_IPI 17 ///< Copy of IPI cap
#define TASKCN_SLOTS_USER 18 ///< First free slot in taskcn for user
-/// Address bits resolved for the standard CNodes (taskcn, supercn, base_page_cn)
-#define DEFAULT_CN_ADDR_BITS (CPTR_BITS - L2_CNODE_BITS)
+/* Page CNode */
+#define PAGECN_SLOT_VROOT 0 ///< First slot of page cnode is root page table
+
+#define ROOTCN_SLOT_LEVEL CSPACE_LEVEL_L1
+#define ROOTCN_SLOT_ADDR(slot) ((slot) << L2_CNODE_BITS)
-#define CPTR_BASE_PAGE_CN_BASE (ROOTCN_SLOT_BASE_PAGE_CN << DEFAULT_CN_ADDR_BITS)
-#define CPTR_SUPERCN_BASE (ROOTCN_SLOT_SUPERCN << (CPTR_BITS - SUPER_CNODE_BITS))
-#define CPTR_PHYADDRCN_BASE (ROOTCN_SLOT_PACN << DEFAULT_CN_ADDR_BITS)
-#define CPTR_MODULECN_BASE (ROOTCN_SLOT_MODULECN << DEFAULT_CN_ADDR_BITS)
-#define CPTR_PML4_BASE (ROOTCN_SLOT_PAGECN << (CPTR_BITS - PAGE_CNODE_BITS))
-#define MODULECN_SIZE_BITS 14 ///< Size of module cnode (in bits)
+// Cspace addresses for well-defined L2 CNodes
+#define CPTR_TASKCN_BASE ROOTCN_SLOT_ADDR(ROOTCN_SLOT_TASKCN)
+#define CPTR_BASE_PAGE_CN_BASE ROOTCN_SLOT_ADDR(ROOTCN_SLOT_BASE_PAGE_CN)
+#define CPTR_SUPERCN_BASE ROOTCN_SLOT_ADDR(ROOTCN_SLOT_SUPERCN)
+#define CPTR_PHYADDRCN_BASE ROOTCN_SLOT_ADDR(ROOTCN_SLOT_PACN)
+#define CPTR_MODULECN_BASE ROOTCN_SLOT_ADDR(ROOTCN_SLOT_MODULECN)
+#define CPTR_PAGECN_BASE ROOTCN_SLOT_ADDR(ROOTCN_SLOT_PAGECN)
/**
* Memory region types.
/// Incoming LMP endpoint message buffer
struct lmp_endpoint_kern {
- capaddr_t recv_cptr; ///< CSpace address of CNode to receive caps
- capaddr_t recv_slot; ///< Slot number in #recv_cptr
- uint8_t recv_bits; ///< Valid bits in #recv_cptr
+ capaddr_t recv_cspc; ///< Address of cspace root of cnode to receive caps
+ capaddr_t recv_cptr; ///< CSpace address of slot to receive caps
uint32_t delivered; ///< Position in buffer (words delivered by kernel)
uint32_t consumed; ///< Position in buffer (words consumed by user)
uintptr_t buf[]; ///< Buffer for async LMP messages
/*
* Macros for bit manipulation: masks, etc.
*
- * Copyright (c) 2015, ETH Zurich.
+ * Copyright (c) 2015-2016 ETH Zurich.
* All rights reserved.
*
* This file is distributed under the terms in the attached LICENSE file.
#ifndef __BITMACROS_H
#define __BITMACROS_H
+/* A one-bit mask at bit n of type t */
+#define BIT_T(t, n) ((t)1 << (n))
+
/* A one-bit mask at bit n */
-#define BIT(n) (1ULL << (n))
+#define BIT(n) BIT_T(uint64_t, n)
+
+/* An n-bit mask, beginning at bit 0 of type t */
+#define MASK_T(t, n) (BIT_T(t, n) - 1)
/* An n-bit mask, beginning at bit 0 */
-#define MASK(n) (BIT(n) - 1)
+#define MASK(n) MASK_T(uint64_t, n)
/* An n-bit field selector, beginning at bit m */
#define FIELD(m,n,x) (((x) >> m) & MASK(n))
union {
struct {
struct capref cap; ///< VNode cap
+ struct capref invokable; ///< Copy of VNode cap that is invokable
struct vnode *children; ///< Children of this VNode
} vnode; // for non-leaf node (maps another vnode)
struct {
static int bsp_coreid;
/// Quick way to find the base address of a cnode capability
-#define CNODE(cte) (cte)->cap.u.cnode.cnode
+#define CNODE(cte) get_address(&(cte)->cap)
/// Pointer to bootinfo structure for init
static struct bootinfo *bootinfo = (struct bootinfo *)BOOTINFO_BASE;
assert((remain & BASE_PAGE_MASK) == 0);
- assert(st->modulecn_slot < (1UL << st->modulecn->cap.u.cnode.bits));
+ assert(st->modulecn_slot < cnode_get_slots(&st->modulecn->cap));
// create as DevFrame cap to avoid zeroing memory contents
err = caps_create_new(ObjType_DevFrame, base_addr, remain,
remain, my_core_id,
*/
/*
- * Copyright (c) 2007, 2008, 2009, 2010, ETH Zurich.
+ * Copyright (c) 2007, 2008, 2009, 2010, 2016, ETH Zurich.
* All rights reserved.
*
* This file is distributed under the terms in the attached LICENSE file.
* If you do not find this file, copies can be found by writing to:
- * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
*/
#include <barrelfish_kpi/syscalls.h>
mov %fs, OFFSETOF_FS_REG(%rdi)
mov %gs, OFFSETOF_GS_REG(%rdi)
- /* Load pointer to root CNode cap */
+ /* Load pointer to root CNode cap into %rdi */
mov dcb_current(%rip), %rdi
lea OFFSETOF_DCB_CSPACE_CAP(%rdi), %rdi
+ cmpb $OBJTYPE_L1CNODE, OFFSETOF_CAP_TYPE(%rdi)
+ jne err_cspace
+
+ /* Deconstruct cap address in %rsi into L1/L2 indices */
+ /* Store L1 index in r11 */
+ mov %rsi, %r11
+ shr $L2_CNODE_BITS, %r11
+ /* Store L2 index in rsi */
+ mov $1, %r15
+ shl $L2_CNODE_BITS, %r15
+ sub $1, %r15
+ and %r15, %rsi
+
/* Check that slot number is within CNode */
- movb OFFSETOF_CAP_CNODE_BITS(%rdi), %cl
- mov $1, %r15
- shl %cl, %r15
- cmp %r15, %rsi
+ movq OFFSETOF_CAP_L1CNODE_ALLOCATED_BYTES(%rdi), %rcx
+ shr $OBJBITS_CTE, %rcx
+ /* rcx: #slots in L1 CNode */
+ cmp %rcx, %r11
jae err_slot
- /* Load pointer to endpoint cap */
- shl $OBJBITS_CTE, %rsi
- mov OFFSETOF_CAP_CNODE_CNODE(%rdi), %rcx
+ /* Load pointer to endpoint cap: do two-level lookup */
+ /* deref slot in L1 cnode */
+ /* scale index for array lookup */
+ shl $OBJBITS_CTE, %r11
+ /* get cnode base address into rcx */
+ mov OFFSETOF_CAP_L1CNODE_CNODE(%rdi), %rcx
+ /* phys_to_mem() */
mov $PHYS_TO_MEM_OFFSET, %rdi // phys_to_mem()
add %rdi, %rcx
+ /* add offset into L1 CNode */
+ add %r11, %rcx
+
+ /* Check that we found a L2 CNode */
+ cmpb $OBJTYPE_L2CNODE, OFFSETOF_CAP_TYPE(%rcx)
+ jne err_l2cnode
+
+ /* L2 CNode cap pointer in %rcx */
+ /* Load pointer to EP from L2 CNode; L2 slot index in %rsi */
+ shl $OBJBITS_CTE, %rsi
+ mov OFFSETOF_CAP_L2CNODE_CNODE(%rcx), %rcx
+ /* phys_to_mem() */
+ mov $PHYS_TO_MEM_OFFSET, %rdi
+ add %rdi, %rcx
+ /* add offset into L2 CNode */
add %rsi, %rcx
/* Check that it's an endpoint */
movq $0, current_ldt_npages(%rip)
jmp load_ldt_continue
+err_l2index:
err_slot: // Wrong slot
mov $SYS_ERR_LRPC_SLOT_INVALID, %rax
jmp err
+err_cspace:
+ mov $SYS_ERR_LRPC_NOT_L1, %rax
+ jmp err
+
+err_l2cnode: // Encountered non-CNode
+ int $3 // hw breakpoint
+ mov $SYS_ERR_LRPC_NOT_L2, %rax
+ jmp err
+
err_endpoint: // Not an endpoint
mov $SYS_ERR_LRPC_NOT_ENDPOINT, %rax
/* jmp err - fall through */
}
}
-errval_t irq_debug_create_src_cap(uint8_t dcn_vbits, capaddr_t dcn, capaddr_t out_cap_addr, uint16_t gsi)
+errval_t irq_debug_create_src_cap(uint8_t dcn_level, capaddr_t dcn, capaddr_t out_cap_addr, uint16_t gsi)
{
// This method is a hack to forge a irq src cap for the given GSI targeting the ioapic
errval_t err;
out_cap.cap.u.irqsrc.controller = ioapic_controller_id;
struct cte * cn;
- err = caps_lookup_slot(&dcb_current->cspace.cap, dcn, dcn_vbits, &cn, CAPRIGHTS_WRITE);
+ err = caps_lookup_slot_2(&dcb_current->cspace.cap, dcn, dcn_level, &cn, CAPRIGHTS_WRITE);
if(err_is_fail(err)){
return err;
}
return SYS_ERR_OK;
}
-errval_t irq_table_alloc_dest_cap(uint8_t dcn_vbits, capaddr_t dcn, capaddr_t out_cap_addr)
+errval_t irq_table_alloc_dest_cap(uint8_t dcn_level, capaddr_t dcn, capaddr_t out_cap_addr)
{
errval_t err;
out_cap.cap.u.irqdest.vector = i;
struct cte * cn;
- err = caps_lookup_slot(&dcb_current->cspace.cap, dcn, dcn_vbits, &cn, CAPRIGHTS_WRITE);
+ err = caps_lookup_slot_2(&dcb_current->cspace.cap, dcn, dcn_level,
+ &cn, CAPRIGHTS_WRITE);
if(err_is_fail(err)){
return err;
}
struct cte *endpoint;
// Lookup & check message endpoint cap
- err = caps_lookup_slot(&dcb_current->cspace.cap, endpoint_adr,
- CPTR_BITS, &endpoint, CAPRIGHTS_WRITE);
+ err = caps_lookup_slot_2(&dcb_current->cspace.cap, endpoint_adr,
+ 2, &endpoint, CAPRIGHTS_WRITE);
if (err_is_fail(err)) {
return err_push(err, SYS_ERR_IRQ_LOOKUP_EP);
}
// TODO: cleanup already mapped pages
memset(mapping_cte, 0, sizeof(*mapping_cte));
debug(SUBSYS_PAGING, "slot in use\n");
+ printk(LOG_NOTE, "slot = 0x%016"PRIx64"\n", entry->raw);
return SYS_ERR_VNODE_SLOT_INUSE;
}
genpaddr_t paddr = get_address(&src_cte->cap) + offset;
genvaddr_t vaddr;
compile_vaddr(dest_vnode_cte, dest_slot, &vaddr);
- printf("mapping 0x%"PRIxGENPADDR" to 0x%"PRIxGENVADDR"\n", paddr, vaddr);
+ printk(LOG_NOTE, "mapping 0x%"PRIxGENPADDR" to 0x%"PRIxGENVADDR"\n", paddr, vaddr);
#endif
cslot_t last_slot = dest_slot + pte_count;
static struct sysret handle_dispatcher_setup(struct capability *to,
int cmd, uintptr_t *args)
{
- capaddr_t cptr = args[0];
- int depth = args[1];
- capaddr_t vptr = args[2];
- capaddr_t dptr = args[3];
- bool run = args[4];
- capaddr_t odptr = args[5];
+ capaddr_t cptr = args[0];
+ uint8_t level = args[1];
+ capaddr_t vptr = args[2] & 0xffffffff;
+ capaddr_t dptr = args[3] & 0xffffffff;
+ bool run = args[4];
+ capaddr_t odptr = args[5] & 0xffffffff;
TRACE(KERNEL, SC_DISP_SETUP, 0);
- struct sysret sr = sys_dispatcher_setup(to, cptr, depth, vptr, dptr, run, odptr);
+ struct sysret sr = sys_dispatcher_setup(to, cptr, level, vptr, dptr, run, odptr);
TRACE(KERNEL, SC_DISP_SETUP, 1);
return sr;
}
uintptr_t *args,
bool from_monitor)
{
- uint64_t source_cptr = args[0];
- uint64_t offset = args[1];
- uint64_t type = args[2];
- uint64_t objsize = args[3];
- uint64_t objcount = args[4];
- uint64_t dest_cnode_cptr = args[5];
- uint64_t dest_slot = args[6];
- uint64_t dest_vbits = args[7];
+ capaddr_t source_croot = args[0] >> 32;
+ capaddr_t source_cptr = args[0] & 0xffffffff;
+ uint64_t offset = args[1];
+ uint64_t type = args[2];
+ uint64_t objsize = args[3];
+ uint64_t objcount = args[4];
+ capaddr_t dest_cspace_cptr= args[5];
+ capaddr_t dest_cnode_cptr = args[6];
+ uint64_t dest_slot = args[7];
+ uint64_t dest_cnode_level = args[8];
TRACE(KERNEL, SC_RETYPE, 0);
- struct sysret sr = sys_retype(root, source_cptr, offset, type, objsize,
- objcount, dest_cnode_cptr, dest_slot, dest_vbits,
- from_monitor);
+ struct sysret sr = sys_retype(root, source_croot, source_cptr, offset, type,
+ objsize, objcount, dest_cspace_cptr,
+ dest_cnode_cptr, dest_cnode_level,
+ dest_slot, from_monitor);
TRACE(KERNEL, SC_RETYPE, 1);
return sr;
}
{
/* Retrieve arguments */
enum objtype type = args[0];
- uint8_t objbits = args[1];
+ size_t objsize = args[1];
capaddr_t dest_cnode_cptr = args[2];
- cslot_t dest_slot = args[3];
- uint8_t dest_vbits = args[4];
+ uint8_t dest_level = args[3];
+ cslot_t dest_slot = args[4];
TRACE(KERNEL, SC_CREATE, 0);
- struct sysret sr = sys_create(root, type, objbits, dest_cnode_cptr, dest_slot,
- dest_vbits);
+ struct sysret sr = sys_create(root, type, objsize, dest_cnode_cptr,
+ dest_level, dest_slot);
TRACE(KERNEL, SC_CREATE, 1);
return sr;
}
static struct sysret copy_or_mint(struct capability *root,
uintptr_t *args, bool mint)
{
- /* Retrive arguments */
- capaddr_t destcn_cptr = args[0];
- uint64_t dest_slot = args[1];
- capaddr_t source_cptr = args[2];
- int destcn_vbits = args[3];
- int source_vbits = args[4];
+ /* Retrieve arguments */
+ capaddr_t dest_cspace_cptr = args[0];
+ capaddr_t destcn_cptr = args[1];
+ uint64_t dest_slot = args[2];
+ capaddr_t source_croot_ptr = args[3];
+ capaddr_t source_cptr = args[4];
+ uint8_t destcn_level = args[5];
+ uint8_t source_level = args[6];
uint64_t param1, param2;
// params only sent if mint operation
if (mint) {
- param1 = args[5];
- param2 = args[6];
+ param1 = args[7];
+ param2 = args[8];
} else {
param1 = param2 = 0;
}
TRACE(KERNEL, SC_COPY_OR_MINT, 0);
- struct sysret sr = sys_copy_or_mint(root, destcn_cptr, dest_slot, source_cptr,
- destcn_vbits, source_vbits, param1, param2, mint);
+ struct sysret sr = sys_copy_or_mint(root, dest_cspace_cptr, destcn_cptr, dest_slot,
+ source_croot_ptr, source_cptr,
+ destcn_level, source_level,
+ param1, param2, mint);
TRACE(KERNEL, SC_COPY_OR_MINT, 1);
return sr;
}
{
/* Retrieve arguments */
uint64_t slot = args[0];
- capaddr_t source_cptr = args[1];
- int source_vbits = args[2];
+ capaddr_t source_root_cptr= args[1] >> 32;
+ capaddr_t source_cptr = args[1] & 0xffffffff;
+ uint8_t source_level = args[2];
uint64_t flags = args[3];
uint64_t offset = args[4];
uint64_t pte_count = args[5];
- capaddr_t mapping_cnptr = args[6];
- int mapping_cnvbits = args[7];
+ capaddr_t mapping_croot = args[6] >> 32;
+ capaddr_t mapping_cnptr = args[6] & 0xffffffff;
+ uint8_t mapping_cn_level= args[7];
cslot_t mapping_slot = args[8];
TRACE(KERNEL, SC_MAP, 0);
- struct sysret sr = sys_map(ptable, slot, source_cptr, source_vbits, flags,
- offset, pte_count, mapping_cnptr, mapping_cnvbits,
- mapping_slot);
+ struct sysret sr = sys_map(ptable, slot, source_root_cptr, source_cptr,
+ source_level, flags, offset, pte_count,
+ mapping_croot, mapping_cnptr,
+ mapping_cn_level, mapping_slot);
TRACE(KERNEL, SC_MAP, 1);
return sr;
}
int cmd, uintptr_t *args)
{
capaddr_t cptr = args[0];
- int bits = args[1];
- return sys_delete(root, cptr, bits);
+ uint8_t level = args[1];
+ return sys_delete(root, cptr, level);
}
static struct sysret handle_revoke(struct capability *root,
int cmd, uintptr_t *args)
{
capaddr_t cptr = args[0];
- int bits = args[1];
- return sys_revoke(root, cptr, bits);
+ uint8_t level = args[1];
+ return sys_revoke(root, cptr, level);
}
static struct sysret handle_get_state(struct capability *root,
int cmd, uintptr_t *args)
{
capaddr_t cptr = args[0];
- int bits = args[1];
- return sys_get_state(root, cptr, bits);
+ uint8_t level = args[1];
+ return sys_get_state(root, cptr, level);
}
-static struct sysret handle_cnode_cmd_nyi(struct capability *root,
- int cmd, uintptr_t *args)
+
+#if 1
+static struct sysret handle_cnode_cmd_obsolete(struct capability *root,
+ int cmd, uintptr_t *args)
{
- assert(root->type == ObjType_L1CNode || root->type == ObjType_L2CNode);
- panic("L%dCNode: command %d NYI", root->type == ObjType_L1CNode ? 1 : 2, cmd);
+ panic("Trying to invoke GPT CNode: command %d", cmd);
return SYSRET(LIB_ERR_NOT_IMPLEMENTED);
}
+#endif
static struct sysret handle_unmap(struct capability *pgtable,
int cmd, uintptr_t *args)
{
capaddr_t cptr = args[0];
- int bits = args[1];
+ uint8_t level = args[1];
errval_t err;
struct cte *mapping;
- err = caps_lookup_slot(&dcb_current->cspace.cap, cptr, bits,
- &mapping, CAPRIGHTS_READ_WRITE);
+ err = caps_lookup_slot_2(&dcb_current->cspace.cap, cptr, level,
+ &mapping, CAPRIGHTS_READ_WRITE);
if (err_is_fail(err)) {
return SYSRET(err_push(err, SYS_ERR_CAP_NOT_FOUND));
}
[FrameCmd_Identify] = handle_frame_identify,
},
[ObjType_CNode] = {
+ [CNodeCmd_Copy] = handle_cnode_cmd_obsolete,
+ [CNodeCmd_Mint] = handle_cnode_cmd_obsolete,
+ [CNodeCmd_Retype] = handle_cnode_cmd_obsolete,
+ [CNodeCmd_Create] = handle_cnode_cmd_obsolete,
+ [CNodeCmd_Delete] = handle_cnode_cmd_obsolete,
+ [CNodeCmd_Revoke] = handle_cnode_cmd_obsolete,
+ [CNodeCmd_GetState] = handle_cnode_cmd_obsolete,
+ },
+ [ObjType_L1CNode] = {
[CNodeCmd_Copy] = handle_copy,
[CNodeCmd_Mint] = handle_mint,
[CNodeCmd_Retype] = handle_retype,
[CNodeCmd_Revoke] = handle_revoke,
[CNodeCmd_GetState] = handle_get_state,
},
- [ObjType_L1CNode] = {
- [CNodeCmd_Copy] = handle_cnode_cmd_nyi,
- [CNodeCmd_Mint] = handle_cnode_cmd_nyi,
- [CNodeCmd_Retype] = handle_cnode_cmd_nyi,
- [CNodeCmd_Create] = handle_cnode_cmd_nyi,
- [CNodeCmd_Delete] = handle_cnode_cmd_nyi,
- [CNodeCmd_Revoke] = handle_cnode_cmd_nyi,
- [CNodeCmd_GetState] = handle_cnode_cmd_nyi,
- },
[ObjType_L2CNode] = {
- [CNodeCmd_Copy] = handle_cnode_cmd_nyi,
- [CNodeCmd_Mint] = handle_cnode_cmd_nyi,
- [CNodeCmd_Retype] = handle_cnode_cmd_nyi,
- [CNodeCmd_Create] = handle_cnode_cmd_nyi,
- [CNodeCmd_Delete] = handle_cnode_cmd_nyi,
- [CNodeCmd_Revoke] = handle_cnode_cmd_nyi,
- [CNodeCmd_GetState] = handle_cnode_cmd_nyi,
+ [CNodeCmd_Copy] = handle_copy,
+ [CNodeCmd_Mint] = handle_mint,
+ [CNodeCmd_Retype] = handle_retype,
+ [CNodeCmd_Create] = handle_create,
+ [CNodeCmd_Delete] = handle_delete,
+ [CNodeCmd_Revoke] = handle_revoke,
+ [CNodeCmd_GetState] = handle_get_state,
},
[ObjType_VNode_x86_64_pml4] = {
[VNodeCmd_Identify] = handle_vnode_identify,
{
// unpack "header" word
capaddr_t invoke_cptr = arg0 >> 32;
- uint8_t send_bits = arg0 >> 24;
- uint8_t invoke_bits = arg0 >> 16;
+ uint8_t send_level = arg0 >> 24;
+ uint8_t invoke_level = arg0 >> 16;
uint8_t length_words = arg0 >> 8;
uint8_t flags = arg0;
debug(SUBSYS_SYSCALL, "sys_invoke(0x%x(%d), 0x%lx)\n",
- invoke_cptr, invoke_bits, arg1);
+ invoke_cptr, invoke_level, arg1);
+ //printk(LOG_NOTE, "sys_invoke(0x%x(%d), 0x%lx)\n",
+ // invoke_cptr, invoke_level, arg1);
// Capability to invoke
struct capability *to = NULL;
- retval.error = caps_lookup_cap(&dcb_current->cspace.cap, invoke_cptr,
- invoke_bits, &to, CAPRIGHTS_READ);
+ retval.error = caps_lookup_cap_2(&dcb_current->cspace.cap, invoke_cptr,
+ invoke_level, &to, CAPRIGHTS_READ);
if (err_is_fail(retval.error)) {
break;
}
// try to deliver message
retval.error = lmp_deliver(to, dcb_current, args, length_words,
- arg1, send_bits, give_away);
+ arg1, send_level, give_away);
/* Switch to reciever upon successful delivery with sync flag,
* or (some cases of) unsuccessful delivery with yield flag */
panic("dispatch returned");
}
} else { // not endpoint cap, call kernel handler through dispatch table
+ // printk(LOG_NOTE, "sys_invoke: to->type = %d, cmd = %"PRIu64"\n",
+ // to->type, args[0]);
+
uint64_t cmd = args[0];
if (cmd >= CAP_MAX_CMD) {
retval.error = SYS_ERR_ILLEGAL_INVOCATION;
return SYS_ERR_OK;
}
+ else if (cte->cap.type == ObjType_L1CNode || cte->cap.type == ObjType_L2CNode)
+ {
+ printk(LOG_WARN, "delete last NYI for L1/L2 CNode");
+ return SYS_ERR_NOT_IMPLEMENTED;
+ }
else if (cte->cap.type == ObjType_Dispatcher)
{
debug(SUBSYS_CAPS, "deleting last copy of dispatcher: %p\n", cte);
*/
/*
- * Copyright (c) 2007-2012,2015, ETH Zurich.
+ * Copyright (c) 2007-2012,2015,2016 ETH Zurich.
* Copyright (c) 2015, Hewlett Packard Enterprise Development LP.
* All rights reserved.
*
case ObjType_L1CNode: {
int ret = snprintf(buf, len, "L1 CNode cap "
- "(allocated bytes %#"PRIxGENSIZE
+ "(base=%#"PRIxGENPADDR", allocated bytes %#"PRIxGENSIZE
", rights mask %#"PRIxCAPRIGHTS")",
- get_size(cap), cap->u.l1cnode.rightsmask);
+ get_address(cap), get_size(cap),
+ cap->u.l1cnode.rightsmask);
return ret;
}
case ObjType_L2CNode: {
int ret = snprintf(buf, len, "L2 CNode cap "
- "(rights mask %#"PRIxCAPRIGHTS")",
- cap->u.l1cnode.rightsmask);
+ "(base=%#"PRIxGENPADDR", rights mask %#"PRIxCAPRIGHTS")",
+ get_address(cap), cap->u.l1cnode.rightsmask);
return ret;
}
break;
case ObjType_L1CNode:
- printk(LOG_NOTE, "creating L1 CNode\n");
for (dest_i = 0; dest_i < count; dest_i++) {
+ assert(objsize >= OBJSIZE_L2CNODE);
+ assert(objsize % OBJSIZE_L2CNODE == 0);
temp_cap.u.l1cnode.cnode = lpaddr + dest_i * objsize;
temp_cap.u.l1cnode.allocated_bytes = objsize;
// XXX: implement CNode cap rights
- temp_cap.u.cnode.rightsmask = CAPRIGHTS_ALLRIGHTS;
+ temp_cap.u.l1cnode.rightsmask = CAPRIGHTS_ALLRIGHTS;
err = set_cap(&dest_caps[dest_i].cap, &temp_cap);
if (err_is_fail(err)) {
break;
break;
case ObjType_L2CNode:
- printk(LOG_NOTE, "creating L2 CNode\n");
for (dest_i = 0; dest_i < count; dest_i++) {
temp_cap.u.l2cnode.cnode = lpaddr + dest_i * objsize;
// XXX: implement CNode cap rights
- temp_cap.u.cnode.rightsmask = CAPRIGHTS_ALLRIGHTS;
+ temp_cap.u.l2cnode.rightsmask = CAPRIGHTS_ALLRIGHTS;
err = set_cap(&dest_caps[dest_i].cap, &temp_cap);
if (err_is_fail(err)) {
break;
/* Can only resolve CNode type */
/* XXX: this is not very clean */
- if (cnode_cap->type != ObjType_CNode && cnode_cap->type != ObjType_L1CNode) {
+ if (cnode_cap->type != ObjType_CNode &&
+ cnode_cap->type != ObjType_L1CNode &&
+ cnode_cap->type != ObjType_L2CNode)
+ {
debug(SUBSYS_CAPS, "caps_lookup_slot: Cap to lookup not of type CNode\n"
"cnode_cap->type = %u\n", cnode_cap->type);
TRACE(KERNEL, CAP_LOOKUP_SLOT, 1);
}
/* Number of bits resolved by this cnode (guard and bits) */
- uint8_t bits_resolved = cnode_cap->u.cnode.bits +
- cnode_cap->u.cnode.guard_size;
+ uint8_t bits_resolved = 0;
+ switch(cnode_cap->type) {
+ case ObjType_CNode:
+ bits_resolved = cnode_cap->u.cnode.bits +
+ cnode_cap->u.cnode.guard_size;
+ break;
+ case ObjType_L1CNode:
+ {
+ printk(LOG_NOTE, "L1CNode: size = %"PRIuGENSIZE"\n", cnode_cap->u.l1cnode.allocated_bytes);
+ cslot_t slots = cnode_cap->u.l1cnode.allocated_bytes / sizeof(struct cte);
+ printk(LOG_NOTE, "L1CNode: slots = %"PRIuCSLOT"\n", slots);
+ bits_resolved = log2cl(slots);
+ assert(bits_resolved >= L2_CNODE_BITS);
+ }
+ break;
+ case ObjType_L2CNode:
+ bits_resolved = L2_CNODE_BITS;
+ break;
+ default:
+ panic("caps_lookup_slot: trying to lookup cap in a non-cnode cap");
+ }
// All CNodes must resolve at least one bit
assert(bits_resolved > 0);
// If lookup exceeded expected depth then table is malformed
}
/* Guard-check (bit-mask of guard in cptr must match guard in cnode cap) */
- capaddr_t cptr_guard = (cptr >> (vbits - cnode_cap->u.cnode.guard_size))
- & MASK(cnode_cap->u.cnode.guard_size);
- if (cptr_guard != cnode_cap->u.cnode.guard) {
- debug(SUBSYS_CAPS, "caps_lookup_slot: guard check failed\n"
- "Computed guard = %"PRIuCADDR", "
- "Cnode guard = %"PRIxCADDR", bits = %u\n",
- cptr_guard, cnode_cap->u.cnode.guard,
- cnode_cap->u.cnode.guard_size);
- TRACE(KERNEL, CAP_LOOKUP_SLOT, 1);
- return SYS_ERR_GUARD_MISMATCH;
+ if (cnode_cap->type == ObjType_CNode) {
+ capaddr_t cptr_guard = (cptr >> (vbits - cnode_cap->u.cnode.guard_size))
+ & MASK(cnode_cap->u.cnode.guard_size);
+ if (cptr_guard != cnode_cap->u.cnode.guard) {
+ debug(SUBSYS_CAPS, "caps_lookup_slot: guard check failed\n"
+ "Computed guard = %"PRIuCADDR", "
+ "Cnode guard = %"PRIxCADDR", bits = %u\n",
+ cptr_guard, cnode_cap->u.cnode.guard,
+ cnode_cap->u.cnode.guard_size);
+ TRACE(KERNEL, CAP_LOOKUP_SLOT, 1);
+ return SYS_ERR_GUARD_MISMATCH;
+ }
}
/* Locate capability in this cnode */
// Offset into the cnode
size_t offset = (cptr >> (vbits - bits_resolved)) &
- MASK(cnode_cap->u.cnode.bits);
+ MASK(log2cl(cnode_get_slots(cnode_cap)));
// The capability at the offset
- struct cte *next_slot = caps_locate_slot(cnode_cap->u.cnode.cnode, offset);
+ struct cte *next_slot = caps_locate_slot(get_address(cnode_cap), offset);
+ printk(LOG_NOTE, "%s: level=%d, cntype=%d, caddr=%#"PRIxCADDR", vbits=%hhu, bits_resolved=%hhu,"
+ " slot=%zu: next_slot->type = %d\n",
+ __FUNCTION__, level, cnode_cap->type, cptr, vbits, bits_resolved, offset,
+ next_slot->cap.type);
+
// Do not return NULL type capability
if (next_slot->cap.type == ObjType_Null) {
TRACE(KERNEL, CAP_LOOKUP_SLOT, 1);
if(bitsleft == 0) {
*ret = next_slot;
TRACE(KERNEL, CAP_LOOKUP_SLOT, 1);
+ printk(LOG_NOTE, "return successfully\n");
return SYS_ERR_OK;
}
+ if (next_slot->cap.type == ObjType_L1CNode)
+ {
+ printk(LOG_NOTE, "found L1 CNode: restarting lookup with cptr=%#"PRIxCADDR
+ ", vbits=%hhu\n", cptr, bitsleft);
+ // restart lookup if we find L1 CNode
+ return caps_lookup_slot(&next_slot->cap, cptr, bitsleft, ret, rights);
+ }
/* If next capability is not of type cnode, return it */
// XXX: Is this consistent?
- if (next_slot->cap.type != ObjType_CNode) {
+ if (next_slot->cap.type != ObjType_CNode &&
+ next_slot->cap.type != ObjType_L2CNode)
+ {
*ret = next_slot;
TRACE(KERNEL, CAP_LOOKUP_SLOT, 1);
+ printk(LOG_NOTE, "return successfully\n");
return SYS_ERR_OK;
}
errval_t caps_lookup_slot(struct capability *cnode_cap, capaddr_t cptr,
uint8_t vbits, struct cte **ret, CapRights rights)
{
+ panic("%s called from %#"PRIxPTR"\n", __FUNCTION__,
+ kernel_virt_to_elf_addr(__builtin_return_address(0)));
+
return caps_lookup_slot_internal(cnode_cap, cptr, vbits, ret, rights, 1);
}
errval_t caps_lookup_cap(struct capability *cnode_cap, capaddr_t cptr,
uint8_t vbits, struct capability **ret, CapRights rights)
{
+ panic("%s called from %#"PRIxPTR"\n", __FUNCTION__,
+ kernel_virt_to_elf_addr(__builtin_return_address(0)));
+
TRACE(KERNEL, CAP_LOOKUP_CAP, 0);
struct cte *ret_cte;
errval_t err = caps_lookup_slot(cnode_cap, cptr, vbits, &ret_cte, rights);
}
/**
+ * Look up a capability in two-level cspace rooted at `rootcn`.
+ */
+errval_t caps_lookup_slot_2(struct capability *rootcn, capaddr_t cptr,
+ uint8_t level, struct cte **ret, CapRights rights)
+{
+ TRACE(KERNEL, CAP_LOOKUP_SLOT, 0);
+
+ cslot_t l1index, l2index;
+ l1index = (cptr >> L2_CNODE_BITS) & MASK(CPTR_BITS-L2_CNODE_BITS);
+ l2index = cptr & MASK(L2_CNODE_BITS);
+
+ assert(ret != NULL);
+ assert(rootcn != NULL);
+
+ if (level > 2) {
+ debug(SUBSYS_CAPS, "%s called with level=%hhu, from %p\n",
+ __FUNCTION__, level,
+ (void*)kernel_virt_to_elf_addr(__builtin_return_address(0)));
+ TRACE(KERNEL, CAP_LOOKUP_SLOT, 1);
+ return SYS_ERR_CAP_LOOKUP_DEPTH;
+ }
+ assert(level <= 2);
+
+ // level 0 means that we do not do any resolution and just return the cte
+ // for rootcn.
+ if (level == 0) {
+ *ret = cte_for_cap(rootcn);
+ TRACE(KERNEL, CAP_LOOKUP_SLOT, 1);
+ return SYS_ERR_OK;
+ }
+
+ if (rootcn->type != ObjType_L1CNode) {
+ debug(SUBSYS_CAPS, "%s: rootcn->type = %d, called from %p\n",
+ __FUNCTION__, rootcn->type,
+ (void*)kernel_virt_to_elf_addr(__builtin_return_address(0)));
+ TRACE(KERNEL, CAP_LOOKUP_SLOT, 1);
+ // XXX: think about errors
+ return SYS_ERR_CNODE_TYPE;
+ }
+ assert(rootcn->type == ObjType_L1CNode);
+
+ if (l1index > cnode_get_slots(rootcn)) {
+ TRACE(KERNEL, CAP_LOOKUP_SLOT, 1);
+ return SYS_ERR_L1_CNODE_INDEX;
+ }
+
+ /* Apply rights to L1 CNode */
+ if ((rootcn->rights & rights) != rights) {
+ debug(SUBSYS_CAPS, "caps_lookup_slot: Rights mismatch\n"
+ "Passed rights = %u, cnode_cap->rights = %u\n",
+ rights, rootcn->rights);
+ TRACE(KERNEL, CAP_LOOKUP_SLOT, 1);
+ return SYS_ERR_CNODE_RIGHTS;
+ }
+
+ struct cte *l2cnode = caps_locate_slot(get_address(rootcn), l1index);
+
+ // level == 1 means that we terminate after looking up the slot in the L1
+ // cnode.
+ if (level == 1) {
+ if (l2cnode->cap.type == ObjType_Null) {
+ TRACE(KERNEL, CAP_LOOKUP_SLOT, 1);
+ return SYS_ERR_CAP_NOT_FOUND;
+ }
+ *ret = l2cnode;
+ TRACE(KERNEL, CAP_LOOKUP_SLOT, 1);
+ return SYS_ERR_OK;
+ }
+
+ if (l2cnode->cap.type != ObjType_L2CNode) {
+ TRACE(KERNEL, CAP_LOOKUP_SLOT, 1);
+ debug(SUBSYS_CAPS, "%s: l2cnode->type = %d\n", __FUNCTION__,
+ l2cnode->cap.type);
+ return SYS_ERR_CNODE_TYPE;
+ }
+ assert(l2cnode->cap.type == ObjType_L2CNode);
+
+ assert(l2index < L2_CNODE_SLOTS);
+
+ /* Apply rights to L2 CNode */
+ if ((l2cnode->cap.rights & rights) != rights) {
+ debug(SUBSYS_CAPS, "caps_lookup_slot: Rights mismatch\n"
+ "Passed rights = %u, cnode_cap->rights = %u\n",
+ rights, l2cnode->cap.rights);
+ TRACE(KERNEL, CAP_LOOKUP_SLOT, 1);
+ return SYS_ERR_CNODE_RIGHTS;
+ }
+
+ struct cte *cte = caps_locate_slot(get_address(&l2cnode->cap), l2index);
+ if (cte->cap.type == ObjType_Null) {
+ TRACE(KERNEL, CAP_LOOKUP_SLOT, 1);
+ return SYS_ERR_CAP_NOT_FOUND;
+ }
+
+ *ret = cte;
+
+ TRACE(KERNEL, CAP_LOOKUP_SLOT, 1);
+ return SYS_ERR_OK;
+}
+
+/**
+ * Wrapper for caps_lookup_slot_2 returning capability instead of cte.
+ */
+errval_t caps_lookup_cap_2(struct capability *cnode_cap, capaddr_t cptr,
+ uint8_t level, struct capability **ret, CapRights rights)
+{
+ TRACE(KERNEL, CAP_LOOKUP_CAP, 0);
+
+ struct cte *ret_cte;
+ errval_t err = caps_lookup_slot_2(cnode_cap, cptr, level, &ret_cte, rights);
+ if (err_is_fail(err)) {
+ return err;
+ }
+ *ret = &ret_cte->cap;
+ TRACE(KERNEL, CAP_LOOKUP_CAP, 1);
+ return SYS_ERR_OK;
+}
+
+/**
* \brief Create a capability from an existing capability metadata.
*
* Used when sending capabilities across cores. The metadata is sent across
return SYS_ERR_CNODE_TYPE;
}
- struct cte *dest = caps_locate_slot(cnode->u.cnode.cnode, dest_slot);
+ struct cte *dest = caps_locate_slot(get_address(cnode), dest_slot);
err = set_cap(&dest->cap, src);
if (err_is_fail(err)) {
// return REVOKE_FIRST, if we found a cap inside the region
// (FOUND_INNER == 2) or overlapping the region (FOUND_PARTIAL == 3)
if (find_range_result >= MDB_RANGE_FOUND_INNER) {
- printf("found existing region inside, or overlapping requested region\n");
+ printk(LOG_NOTE,
+ "%s: found existing region inside, or overlapping requested region:\n",
+ __FUNCTION__);
return SYS_ERR_REVOKE_FIRST;
}
// return REVOKE_FIRST, if we found a cap that isn't our source
else if (find_range_result == MDB_RANGE_FOUND_SURROUNDING &&
!is_copy(&found_cte->cap, src_cap))
{
- printf("found non source region fully covering requested region");
+ printk(LOG_NOTE,
+ "%s: found non source region fully covering requested region",
+ __FUNCTION__);
return SYS_ERR_REVOKE_FIRST;
}
}
}
/* check that destination slots all fit within target cnode */
- // TODO: fix this with new cspace layout (should be easier)
- if (dest_slot + count > (1UL << dest_cnode->u.cnode.bits)) {
+ if (dest_slot + count > cnode_get_slots(dest_cnode)) {
debug(SUBSYS_CAPS, "caps_retype: dest slots don't fit in cnode\n");
return SYS_ERR_SLOTS_INVALID;
}
/* check that destination slots are all empty */
debug(SUBSYS_CAPS, "caps_retype: dest cnode is %#" PRIxLPADDR
" dest_slot %d\n",
- dest_cnode->u.cnode.cnode, (int)dest_slot);
+ get_address(dest_cnode), (int)dest_slot);
for (cslot_t i = 0; i < count; i++) {
- if (caps_locate_slot(dest_cnode->u.cnode.cnode, dest_slot + i)->cap.type
+ if (caps_locate_slot(get_address(dest_cnode), dest_slot + i)->cap.type
!= ObjType_Null) {
debug(SUBSYS_CAPS, "caps_retype: dest slot %d in use\n",
(int)(dest_slot + i));
if (type == ObjType_L2CNode) {
debug(SUBSYS_CAPS, "caps_retype: check that dest cnode is L1"
" when creating L2 CNodes\n");
- if (dest_cnode->type != ObjType_L1CNode) {
- panic("L2 CNode can only be created in L1 CNode\n");
+ if (dest_cnode->type != ObjType_L1CNode &&
+ dest_cnode->type != ObjType_L2CNode)
+ {
+ panic("L2 CNode can only be created in L1 or L2 CNode\n");
}
}
/* create new caps */
struct cte *dest_cte =
- caps_locate_slot(dest_cnode->u.cnode.cnode, dest_slot);
+ caps_locate_slot(get_address(dest_cnode), dest_slot);
err = caps_create(type, base, size, objsize, count, my_core_id, dest_cte);
if (err_is_fail(err)) {
debug(SUBSYS_CAPS, "caps_retype: failed to create a dest cap\n");
uintptr_t param2)
{
/* Parameter Checking */
- assert(dest_cnode_cte->cap.type == ObjType_CNode);
+ assert(dest_cnode_cte->cap.type == ObjType_L1CNode ||
+ dest_cnode_cte->cap.type == ObjType_L2CNode);
+
+ // only allow L2 CNodes and BSP KCB in L1 CNode
+ // XXX: BSPKCB should not be in rootcn...
+ if (dest_cnode_cte->cap.type == ObjType_L1CNode &&
+ src_cte->cap.type != ObjType_L2CNode &&
+ src_cte->cap.type != ObjType_KernelControlBlock)
+ {
+ printk(LOG_WARN, "trying to copy cap type %d into cap type %d\n",
+ src_cte->cap.type, dest_cnode_cte->cap.type);
+ return SYS_ERR_DEST_TYPE_INVALID;
+ }
struct cte *dest_cte;
- dest_cte = caps_locate_slot(dest_cnode_cte->cap.u.cnode.cnode, dest_slot);
+ dest_cte = caps_locate_slot(get_address(&dest_cnode_cte->cap), dest_slot);
return caps_copy_to_cte(dest_cte, src_cte, mint, param1, param2);
}
#include <barrelfish_kpi/cpu_arch.h>
#include <barrelfish_kpi/registers_arch.h>
+#include <bitmacros.h>
+
#if defined(__x86_64__) || defined(__i386__)
# include <arch/x86/apic.h>
#endif
* \param ep Endpoint capability of destination
* \param send Pointer to sending DCB.
* \param send_cptr Address of capability in sender's cspace
- * \param send_bits Valid bits in #send_cptr
+ * \param send_level Depth/level of capability in sender's cspace
*
* \return Error code
*/
static errval_t lmp_transfer_cap(struct capability *ep, struct dcb *send,
- capaddr_t send_cptr, uint8_t send_bits,
+ capaddr_t send_cptr, uint8_t send_level,
bool give_away)
{
errval_t err;
assert(recv != NULL);
assert(ep->u.endpoint.epoffset != 0);
+ // printk(LOG_NOTE, "%s: ep->u.endpoint.epoffset = %"PRIuLVADDR"\n", __FUNCTION__, ep->u.endpoint.epoffset);
/* Look up the slot receiver can receive caps in */
struct lmp_endpoint_kern *recv_ep
= (void *)((uint8_t *)recv->disp + ep->u.endpoint.epoffset);
- // The cnode
- struct capability *recv_cnode_cap;
- err = caps_lookup_cap(&recv->cspace.cap, recv_ep->recv_cptr,
- recv_ep->recv_bits, &recv_cnode_cap,
- CAPRIGHTS_READ_WRITE);
- if (err_is_fail(err)) {
- return err_push(err, SYS_ERR_LMP_CAPTRANSFER_DST_CNODE_LOOKUP);
+ // Lookup cspace root for receiving
+ struct capability *recv_cspace_cap;
+ // XXX: do we want a level into receiver's cspace here?
+ // printk(LOG_NOTE, "recv_cspace_ptr = %"PRIxCADDR"\n", recv_ep->recv_cspc);
+ err = caps_lookup_cap_2(&recv->cspace.cap, recv_ep->recv_cspc, 2,
+ &recv_cspace_cap, CAPRIGHTS_READ_WRITE);
+ if (err_is_fail(err) || recv_cspace_cap->type != ObjType_L1CNode) {
+ return SYS_ERR_LMP_CAPTRANSFER_DST_CNODE_INVALID;
+ }
+ // Check index into L1 cnode
+ capaddr_t l1index = recv_ep->recv_cptr >> L2_CNODE_BITS;
+ if (l1index >= cnode_get_slots(recv_cspace_cap)) {
+ return SYS_ERR_LMP_CAPTRANSFER_DST_CNODE_INVALID;
}
+ // Get the cnode
+ struct cte *recv_cnode_cte = caps_locate_slot(get_address(recv_cspace_cap),
+ l1index);
+ struct capability *recv_cnode_cap = &recv_cnode_cte->cap;
// Check for cnode type
- if (recv_cnode_cap->type != ObjType_CNode) {
+ if (recv_cnode_cap->type != ObjType_L2CNode) {
return SYS_ERR_LMP_CAPTRANSFER_DST_CNODE_INVALID;
}
// The slot within the cnode
struct cte *recv_cte;
recv_cte = caps_locate_slot(recv_cnode_cap->u.cnode.cnode,
- recv_ep->recv_slot);
+ recv_ep->recv_cptr & MASK(L2_CNODE_BITS));
/* Look up source slot in sender */
struct cte *send_cte;
- err = caps_lookup_slot(&send->cspace.cap, send_cptr, send_bits, &send_cte,
- CAPRIGHTS_READ);
+ err = caps_lookup_slot_2(&send->cspace.cap, send_cptr, send_level,
+ &send_cte, CAPRIGHTS_READ);
if (err_is_fail(err)) {
return err_push(err, SYS_ERR_LMP_CAPTRANSFER_SRC_LOOKUP);
}
CapRights rights);
errval_t caps_lookup_slot(struct capability *cnode_cap, capaddr_t cptr,
uint8_t vbits, struct cte **ret, CapRights rights);
+errval_t caps_lookup_cap_2(struct capability *cnode_cap, capaddr_t cptr,
+ uint8_t level, struct capability **ret, CapRights rights);
+errval_t caps_lookup_slot_2(struct capability *rootcn, capaddr_t cptr,
+ uint8_t level, struct cte **ret, CapRights rights);
/*
* Delete and revoke
struct sysret sys_yield(capaddr_t target);
struct sysret sys_suspend(bool halt);
struct sysret
-sys_dispatcher_setup(struct capability *to, capaddr_t cptr, int depth,
+sys_dispatcher_setup(struct capability *to, capaddr_t cptr, uint8_t level,
capaddr_t vptr, capaddr_t dptr, bool run, capaddr_t odptr);
struct sysret
sys_dispatcher_properties(struct capability *to,
unsigned long wcet, unsigned long period,
unsigned long release, unsigned short weight);
struct sysret
-sys_retype(struct capability *root, capaddr_t source_cptr, gensize_t offset,
- enum objtype type, gensize_t objsize, size_t count,
- capaddr_t dest_cnode_cptr, cslot_t dest_slot,
- uint8_t dest_vbits, bool from_monitor);
+sys_retype(struct capability *root, capaddr_t source_croot, capaddr_t source_cptr,
+ gensize_t offset, enum objtype type, gensize_t objsize, size_t count,
+ capaddr_t dest_cspace_ptr, capaddr_t dest_cnode_cptr,
+ uint8_t dest_level, cslot_t dest_slot, bool from_monitor);
struct sysret sys_create(struct capability *root, enum objtype type,
- uint8_t objbits, capaddr_t dest_cnode_cptr,
- cslot_t dest_slot, int dest_vbits);
+ size_t objsize, capaddr_t dest_cnode_cptr,
+ uint8_t dest_level, cslot_t dest_slot);
struct sysret
-sys_map(struct capability *ptable, cslot_t slot, capaddr_t source_cptr,
- int source_vbits, uintptr_t flags, uintptr_t offset,
- uintptr_t pte_count, capaddr_t mapping_cnptr, int mapping_cnvbits,
- cslot_t mapping_slot);
+sys_map(struct capability *ptable, cslot_t slot, capaddr_t source_root_cptr,
+ capaddr_t source_cptr, uint8_t source_level, uintptr_t flags,
+ uintptr_t offset, uintptr_t pte_count, capaddr_t mapping_crootptr,
+ capaddr_t mapping_cnptr, uint8_t mapping_cn_level, cslot_t mapping_slot);
struct sysret
-sys_copy_or_mint(struct capability *root, capaddr_t destcn_cptr, cslot_t dest_slot,
- capaddr_t source_cptr, int destcn_vbits, int source_vbits,
+sys_copy_or_mint(struct capability *root, capaddr_t dest_cspace_cptr,
+ capaddr_t destcn_cptr, cslot_t dest_slot, capaddr_t
+ source_croot_ptr, capaddr_t source_cptr,
+ uint8_t destcn_level, uint8_t source_level,
uintptr_t param1, uintptr_t param2, bool mint);
-struct sysret sys_delete(struct capability *root, capaddr_t cptr, uint8_t bits);
-struct sysret sys_revoke(struct capability *root, capaddr_t cptr, uint8_t bits);
-struct sysret sys_get_state(struct capability *root, capaddr_t cptr, uint8_t bits);
+struct sysret sys_delete(struct capability *root, capaddr_t cptr, uint8_t level);
+struct sysret sys_revoke(struct capability *root, capaddr_t cptr, uint8_t level);
+struct sysret sys_get_state(struct capability *root, capaddr_t cptr, uint8_t level);
struct sysret
sys_dispatcher_setup_guest (struct capability *to,
capaddr_t epp, capaddr_t vnodep,
*/
/*
- * Copyright (c) 2007, 2008, 2009, 2010, ETH Zurich.
+ * Copyright (c) 2007, 2008, 2009, 2010, 2016, ETH Zurich.
* All rights reserved.
*
* This file is distributed under the terms in the attached LICENSE file.
* If you do not find this file, copies can be found by writing to:
- * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ * ETH Zurich D-INFK, Universitaetstr 6, CH-8092 Zurich. Attn: Systems Group.
*/
#include <kernel.h>
#include <string.h>
#include <syscall.h>
#include <barrelfish_kpi/syscalls.h>
-//#include <capabilities.h>
-//#include <mdb/mdb.h>
#include <mdb/mdb_tree.h>
-//#include <cap_predicates.h>
#include <dispatch.h>
#include <distcaps.h>
-//#include <wakeup.h>
-//#include <paging_kernel_helper.h>
-//#include <exec.h>
-//#include <irq.h>
-//#include <trace/trace.h>
static errval_t sys_double_lookup(capaddr_t rptr, uint8_t rbits,
capaddr_t tptr, uint8_t tbits,
{
errval_t err;
struct capability *ep;
- err = caps_lookup_cap(&dcb_current->cspace.cap, ep_caddr, CPTR_BITS, &ep,
- CAPRIGHTS_READ);
+ err = caps_lookup_cap_2(&dcb_current->cspace.cap, ep_caddr, 2, &ep,
+ CAPRIGHTS_READ);
if(err_is_fail(err)) {
printf("Failure looking up endpoint!\n");
}
struct sysret sys_monitor_identify_cap(struct capability *root,
- capaddr_t cptr, uint8_t bits,
+ capaddr_t cptr, uint8_t level,
struct capability *retbuf)
{
struct capability *cap;
- errval_t err = caps_lookup_cap(root, cptr, bits, &cap, CAPRIGHTS_READ);
+ errval_t err = caps_lookup_cap_2(root, cptr, level, &cap, CAPRIGHTS_READ);
if (err_is_fail(err)) {
return SYSRET(err_push(err, SYS_ERR_IDENTIFY_LOOKUP));
}
coreid_t my_core_id;
/// Quick way to find the base address of a cnode capability
-#define CNODE(cte) (cte)->cap.u.cnode.cnode
+#define CNODE(cte) get_address(&(cte)->cap)
/**
* \brief Create caps in 'cnode'
panic("Cannot handle bootinfo region type!");
}
- if (*slot >= 1UL << cnode->u.cnode.bits) {
+ if (*slot >= cnode_get_slots(cnode)) {
printk(LOG_WARN, "create_caps_to_cnode: Cannot create more caps "
"in CNode\n");
return SYS_ERR_SLOTS_IN_USE;
/* create the capability */
err = caps_create_new(cap_type, base_addr, size, size, my_core_id,
- caps_locate_slot(cnode->u.cnode.cnode, (*slot)++));
+ caps_locate_slot(get_address(cnode), (*slot)++));
if (err_is_fail(err)) {
return err;
}
#endif
/* create root cnode */
- err = caps_create_new(ObjType_CNode, alloc_phys(OBJSIZE_L2CNODE),
- OBJSIZE_L2CNODE, L2_CNODE_SLOTS, my_core_id,
+ err = caps_create_new(ObjType_L1CNode, alloc_phys(OBJSIZE_L2CNODE),
+ OBJSIZE_L2CNODE, OBJSIZE_L2CNODE, my_core_id,
rootcn);
assert(err_is_ok(err));
// Task cnode in root cnode
st->taskcn = caps_locate_slot(CNODE(rootcn), ROOTCN_SLOT_TASKCN);
- err = caps_create_new(ObjType_CNode, alloc_phys(OBJSIZE_L2CNODE),
- OBJSIZE_L2CNODE, L2_CNODE_SLOTS, my_core_id,
+ err = caps_create_new(ObjType_L2CNode, alloc_phys(OBJSIZE_L2CNODE),
+ OBJSIZE_L2CNODE, OBJSIZE_L2CNODE, my_core_id,
st->taskcn);
assert(err_is_ok(err));
- st->taskcn->cap.u.cnode.guard_size = GUARD_REMAINDER(2 * L2_CNODE_BITS);
// Page cnode in root cnode
st->pagecn = caps_locate_slot(CNODE(rootcn), ROOTCN_SLOT_PAGECN);
- err = caps_create_new(ObjType_CNode,
- alloc_phys(1UL << (OBJBITS_CTE + PAGE_CNODE_BITS)),
- PAGE_CNODE_SLOTS * sizeof(struct cte), PAGE_CNODE_SLOTS,
- my_core_id, st->pagecn);
+ err = caps_create_new(ObjType_L2CNode,
+ alloc_phys(OBJSIZE_L2CNODE), OBJSIZE_L2CNODE,
+ OBJSIZE_L2CNODE, my_core_id, st->pagecn);
assert(err_is_ok(err));
// Base page cnode in root cnode
st->basepagecn = caps_locate_slot(CNODE(rootcn), ROOTCN_SLOT_BASE_PAGE_CN);
- err = caps_create_new(ObjType_CNode, alloc_phys(OBJSIZE_L2CNODE),
- OBJSIZE_L2CNODE, L2_CNODE_SLOTS, my_core_id,
+ err = caps_create_new(ObjType_L2CNode, alloc_phys(OBJSIZE_L2CNODE),
+ OBJSIZE_L2CNODE, OBJSIZE_L2CNODE, my_core_id,
st->basepagecn);
assert(err_is_ok(err));
// Super cnode in root cnode
st->supercn = caps_locate_slot(CNODE(rootcn), ROOTCN_SLOT_SUPERCN);
- err = caps_create_new(ObjType_CNode,
- alloc_phys(1UL << (OBJBITS_CTE + SUPER_CNODE_BITS)),
- SUPER_CNODE_SLOTS * sizeof(struct cte),
- SUPER_CNODE_SLOTS, my_core_id, st->supercn);
+ err = caps_create_new(ObjType_L2CNode,
+ alloc_phys(OBJSIZE_L2CNODE),
+ OBJSIZE_L2CNODE, OBJSIZE_L2CNODE, my_core_id, st->supercn);
assert(err_is_ok(err));
// slot_alloc cnodes in root cnode. assumes SLOT_SLOT_ALLOC0,1,2 are
assert(ROOTCN_SLOT_SLOT_ALLOC0 + 1 == ROOTCN_SLOT_SLOT_ALLOC1);
assert(ROOTCN_SLOT_SLOT_ALLOC1 + 1 == ROOTCN_SLOT_SLOT_ALLOC2);
st->slot_alloc_cn0 = caps_locate_slot(CNODE(rootcn), ROOTCN_SLOT_SLOT_ALLOC0);
- err = caps_create_new(ObjType_CNode,
+ err = caps_create_new(ObjType_L2CNode,
alloc_phys(3*OBJSIZE_L2CNODE), 3*OBJSIZE_L2CNODE,
- L2_CNODE_SLOTS, my_core_id, st->slot_alloc_cn0);
+ OBJSIZE_L2CNODE, my_core_id, st->slot_alloc_cn0);
assert(err_is_ok(err));
// Seg cnode in root cnode
st->segcn = caps_locate_slot(CNODE(rootcn), ROOTCN_SLOT_SEGCN);
- err = caps_create_new(ObjType_CNode, alloc_phys(BASE_PAGE_SIZE),
- BASE_PAGE_SIZE, DEFAULT_CNODE_SLOTS, my_core_id,
+ err = caps_create_new(ObjType_L2CNode, alloc_phys(OBJSIZE_L2CNODE),
+ OBJSIZE_L2CNODE, OBJSIZE_L2CNODE, my_core_id,
st->segcn);
assert(err_is_ok(err));
// Physaddr cnode in root cnode
st->physaddrcn = caps_locate_slot(CNODE(rootcn), ROOTCN_SLOT_PACN);
- err = caps_create_new(ObjType_CNode,
- alloc_phys(1UL << (OBJBITS_CTE + PHYSADDRCN_BITS)),
- 1UL << (OBJBITS_CTE + PHYSADDRCN_BITS), PHYSADDRCN_SLOTS,
+ err = caps_create_new(ObjType_L2CNode,
+ alloc_phys(OBJSIZE_L2CNODE), OBJSIZE_L2CNODE, OBJSIZE_L2CNODE,
my_core_id, st->physaddrcn);
assert(err_is_ok(err));
if (arch_core_is_bsp()) {
// Cnode for Boot loaded modules
st->modulecn = caps_locate_slot(CNODE(rootcn), ROOTCN_SLOT_MODULECN);
- err = caps_create_new(ObjType_CNode,
- alloc_phys(1UL << (OBJBITS_CTE + MODULECN_SIZE_BITS)),
- 1UL << (MODULECN_SIZE_BITS + OBJBITS_CTE),
- 1UL << MODULECN_SIZE_BITS, my_core_id, st->modulecn);
+ err = caps_create_new(ObjType_L2CNode,
+ alloc_phys(OBJSIZE_L2CNODE), OBJSIZE_L2CNODE,
+ OBJSIZE_L2CNODE, my_core_id, st->modulecn);
assert(err_is_ok(err));
}
/* FIXME: lots of missing argument checks in this function */
struct sysret
-sys_dispatcher_setup(struct capability *to, capaddr_t cptr, int depth,
+sys_dispatcher_setup(struct capability *to, capaddr_t cptr, uint8_t level,
capaddr_t vptr, capaddr_t dptr, bool run, capaddr_t odptr)
{
errval_t err = SYS_ERR_OK;
assert(to->type == ObjType_Dispatcher);
struct dcb *dcb = to->u.dispatcher.dcb;
+ assert(dcb != dcb_current);
lpaddr_t lpaddr;
- /* 1. set cspace root */
- if (cptr != CPTR_NULL) {
- struct cte *root;
- err = caps_lookup_slot(&dcb_current->cspace.cap, cptr, depth,
- &root, CAPRIGHTS_READ);
- if (err_is_fail(err)) {
- return SYSRET(err_push(err, SYS_ERR_DISP_CSPACE_ROOT));
+ /* 0. Handle sys_dispatcher_setup for guest domains */
+ if (cptr == 0x0) {
+ assert(dcb->is_vm_guest);
+ assert(vptr == 0x0);
+ assert(dptr == 0x0);
+ assert(odptr == 0x0);
+ if (!dcb->is_vm_guest || vptr != 0x0 || dptr != 0x0 || odptr != 0x0) {
+ return SYSRET(SYS_ERR_DISP_NOT_RUNNABLE);
}
- if (root->cap.type != ObjType_CNode) {
- return SYSRET(err_push(err, SYS_ERR_DISP_CSPACE_INVALID));
- }
- err = caps_copy_to_cte(&dcb->cspace, root, false, 0, 0);
- if (err_is_fail(err)) {
- return SYSRET(err_push(err, SYS_ERR_DISP_CSPACE_ROOT));
+ if (run) {
+ // Dispatchers run disabled the first time
+ dcb->disabled = 1;
+ make_runnable(dcb);
}
+ return SYSRET(SYS_ERR_OK);
}
- /* 2. set vspace root */
- if (vptr != CPTR_NULL) {
- struct capability *vroot;
- err = caps_lookup_cap(&dcb_current->cspace.cap, vptr, CPTR_BITS,
- &vroot, CAPRIGHTS_WRITE);
- if (err_is_fail(err)) {
- return SYSRET(err_push(err, SYS_ERR_DISP_VSPACE_ROOT));
- }
+ assert(!dcb->is_vm_guest);
+ assert(!cptr == 0x0);
+ assert(!vptr == 0x0);
+ assert(!dptr == 0x0);
+ assert(!odptr == 0x0);
- // Insert as dispatcher's VSpace root
- switch(vroot->type) {
- case ObjType_VNode_x86_64_pml4:
- dcb->vspace =
- (lvaddr_t)gen_phys_to_local_phys(vroot->u.vnode_x86_64_pml4.base);
- break;
-#ifdef CONFIG_PAE
- case ObjType_VNode_x86_32_pdpt:
- dcb->vspace =
- (lvaddr_t)gen_phys_to_local_phys(vroot->u.vnode_x86_32_pdpt.base);
- break;
-#else
- case ObjType_VNode_x86_32_pdir:
- dcb->vspace =
- (lvaddr_t)gen_phys_to_local_phys(vroot->u.vnode_x86_32_pdir.base);
- break;
-#endif
- case ObjType_VNode_ARM_l1:
- dcb->vspace =
- (lvaddr_t)gen_phys_to_local_phys(vroot->u.vnode_arm_l1.base);
- break;
-
- case ObjType_VNode_AARCH64_l1:
- dcb->vspace =
- (lvaddr_t)gen_phys_to_local_phys(vroot->u.vnode_aarch64_l1.base);
- break;
-
- case ObjType_VNode_AARCH64_l2:
- dcb->vspace =
- (lvaddr_t)gen_phys_to_local_phys(vroot->u.vnode_aarch64_l2.base);
- break;
-
- default:
- return SYSRET(err_push(err, SYS_ERR_DISP_VSPACE_INVALID));
- }
+ if (cptr == 0x0 || vptr == 0x0 || dptr == 0x0 || odptr == 0x0) {
+ return SYSRET(SYS_ERR_DISP_NOT_RUNNABLE);
}
- /* 3. set dispatcher frame pointer */
- if (dptr != CPTR_NULL) {
- struct cte *dispcte;
- err = caps_lookup_slot(&dcb_current->cspace.cap, dptr, CPTR_BITS,
- &dispcte, CAPRIGHTS_WRITE);
- if (err_is_fail(err)) {
- return SYSRET(err_push(err, SYS_ERR_DISP_FRAME));
- }
- struct capability *dispcap = &dispcte->cap;
- if (dispcap->type != ObjType_Frame) {
- return SYSRET(err_push(err, SYS_ERR_DISP_FRAME_INVALID));
- }
+ /* 1. set cspace root */
+ struct cte *root;
+ err = caps_lookup_slot_2(&dcb_current->cspace.cap, cptr, level,
+ &root, CAPRIGHTS_READ);
+ if (err_is_fail(err)) {
+ debug(SUBSYS_CAPS, "caps_lookup_cap for croot=%"PRIxCADDR", level=%d: %"PRIuERRV"\n", cptr, level, err);
+ return SYSRET(err_push(err, SYS_ERR_DISP_CSPACE_ROOT));
+ }
+ if (root->cap.type != ObjType_L1CNode) {
+ return SYSRET(err_push(err, SYS_ERR_DISP_CSPACE_INVALID));
+ }
+ err = caps_copy_to_cte(&dcb->cspace, root, false, 0, 0);
+ if (err_is_fail(err)) {
+ debug(SUBSYS_CAPS, "caps_copy_to_cte for croot: %"PRIuERRV"\n", err);
+ return SYSRET(err_push(err, SYS_ERR_DISP_CSPACE_ROOT));
+ }
- /* FIXME: check rights, check size */
+ /* 2. set vspace root */
+ struct capability *vroot;
+ err = caps_lookup_cap_2(&root->cap, vptr, CNODE_TYPE_COUNT, &vroot, CAPRIGHTS_WRITE);
+ if (err_is_fail(err)) {
+ debug(SUBSYS_CAPS, "caps_lookup_cap for vroot=%"PRIxCADDR": %"PRIuERRV"\n", vptr, err);
+ return SYSRET(err_push(err, SYS_ERR_DISP_VSPACE_ROOT));
+ }
- lpaddr = gen_phys_to_local_phys(dispcap->u.frame.base);
- dcb->disp = local_phys_to_mem(lpaddr);
- // Copy the cap to dcb also
- err = caps_copy_to_cte(&dcb->disp_cte, dispcte, false, 0, 0);
- // If copy fails, something wrong in kernel
- assert(err_is_ok(err));
+ // Insert as dispatcher's VSpace root
+ if (!type_is_vroot(vroot->type)) {
+ return SYSRET(SYS_ERR_DISP_VSPACE_INVALID);
}
+ dcb->vspace = gen_phys_to_local_phys(get_address(vroot));
- /* 5. Make runnable if desired -- Set pointer to ipi_data */
+ /* 3. set dispatcher frame pointer */
+ struct cte *dispcte;
+ err = caps_lookup_slot_2(&root->cap, dptr, CNODE_TYPE_COUNT, &dispcte,
+ CAPRIGHTS_READ_WRITE);
+ if (err_is_fail(err)) {
+ return SYSRET(err_push(err, SYS_ERR_DISP_FRAME));
+ }
+ struct capability *dispcap = &dispcte->cap;
+ if (dispcap->type != ObjType_Frame) {
+ return SYSRET(SYS_ERR_DISP_FRAME_INVALID);
+ }
+ if (get_size(dispcap) < (1UL << DISPATCHER_FRAME_BITS)) {
+ return SYSRET(SYS_ERR_DISP_FRAME_SIZE);
+ }
+ /* FIXME: check rights? */
+
+ lpaddr = gen_phys_to_local_phys(get_address(dispcap));
+ dcb->disp = local_phys_to_mem(lpaddr);
+ // Copy the cap to dcb also
+ err = caps_copy_to_cte(&dcb->disp_cte, dispcte, false, 0, 0);
+ // If copy fails, something wrong in kernel
+ assert(err_is_ok(err));
+
+ /* 5. Make runnable if desired */
if (run) {
- if (dcb->vspace == 0 ||
- (!dcb->is_vm_guest &&
- (dcb->disp == 0 || dcb->cspace.cap.type != ObjType_CNode))) {
+ if (dcb->vspace == 0 || dcb->disp == 0 || dcb->cspace.cap.type != ObjType_L1CNode) {
return SYSRET(err_push(err, SYS_ERR_DISP_NOT_RUNNABLE));
}
}
/* 6. Copy domain ID off given dispatcher */
- if(odptr != CPTR_NULL) {
- struct capability *odisp;
- err = caps_lookup_cap(&dcb_current->cspace.cap, odptr, CPTR_BITS,
- &odisp, CAPRIGHTS_READ_WRITE);
- if (err_is_fail(err)) {
- return SYSRET(err_push(err, SYS_ERR_DISP_OCAP_LOOKUP));
- }
- dcb->domain_id = odisp->u.dispatcher.dcb->domain_id;
+ // XXX: We generally pass the current dispatcher as odisp, see e.g.
+ // lib/spawndomain/spawn.c:spawn_run(). In that case the new domain gets
+ // the same domain id as the domain doing the spawning. cf. T271
+ // -SG, 2016-07-21.
+ struct capability *odisp;
+ err = caps_lookup_cap_2(&dcb_current->cspace.cap, odptr, CNODE_TYPE_COUNT,
+ &odisp, CAPRIGHTS_READ);
+ if (err_is_fail(err)) {
+ return SYSRET(err_push(err, SYS_ERR_DISP_OCAP_LOOKUP));
+ }
+ if (odisp->type != ObjType_Dispatcher) {
+ return SYSRET(SYS_ERR_DISP_OCAP_TYPE);
}
+ dcb->domain_id = odisp->u.dispatcher.dcb->domain_id;
/* 7. (HACK) Set current core id */
- {
- struct dispatcher_shared_generic *disp =
- get_dispatcher_shared_generic(dcb->disp);
- if(disp){
- disp->curr_core_id = my_core_id;
- }
- }
+ struct dispatcher_shared_generic *disp =
+ get_dispatcher_shared_generic(dcb->disp);
+ disp->curr_core_id = my_core_id;
- if(!dcb->is_vm_guest) {
- struct dispatcher_shared_generic *disp =
- get_dispatcher_shared_generic(dcb->disp);
- err = trace_new_application(disp->name, (uintptr_t) dcb);
+ /* 8. Enable tracing for new domain */
+ err = trace_new_application(disp->name, (uintptr_t) dcb);
- if (err == TRACE_ERR_NO_BUFFER) {
- // Try to use the boot buffer.
- trace_new_boot_application(disp->name, (uintptr_t) dcb);
- }
+ if (err == TRACE_ERR_NO_BUFFER) {
+ // Try to use the boot buffer.
+ trace_new_boot_application(disp->name, (uintptr_t) dcb);
}
return SYSRET(SYS_ERR_OK);
}
/**
- * \param root Root CNode to invoke
+ * \param root Source CSpace root cnode to invoke
+ * \param source_croot Source capability cspace root
* \param source_cptr Source capability cptr
* \param offset Offset into source capability from which to retype
* \param type Type to retype to
* \param objsize Object size for variable-sized types
* \param count number of objects to create
+ * \param dest_cspace_cptr Destination CSpace cnode cptr relative to
+ * source cspace root
* \param dest_cnode_cptr Destination cnode cptr
* \param dest_slot Destination slot number
- * \param dest_vbits Valid bits in destination cnode cptr
+ * \param dest_cnode_level Level/depth of destination cnode
*/
struct sysret
-sys_retype(struct capability *root, capaddr_t source_cptr, gensize_t offset,
- enum objtype type, gensize_t objsize, size_t count,
- capaddr_t dest_cnode_cptr, cslot_t dest_slot,
- uint8_t dest_vbits, bool from_monitor)
+sys_retype(struct capability *root, capaddr_t source_croot, capaddr_t source_cptr,
+ gensize_t offset, enum objtype type, gensize_t objsize, size_t count,
+ capaddr_t dest_cspace_cptr, capaddr_t dest_cnode_cptr,
+ uint8_t dest_cnode_level, cslot_t dest_slot, bool from_monitor)
{
errval_t err;
return SYSRET(SYS_ERR_ILLEGAL_DEST_TYPE);
}
+ /* Lookup source cspace root cnode */
+ struct capability *source_root;
+ err = caps_lookup_cap_2(root, source_croot, 2, &source_root, CAPRIGHTS_READ);
+ if (err_is_fail(err)) {
+ return SYSRET(err_push(err, SYS_ERR_SOURCE_ROOTCN_LOOKUP));
+ }
/* Source capability */
struct cte *source_cte;
- err = caps_lookup_slot(root, source_cptr, CPTR_BITS, &source_cte,
- CAPRIGHTS_READ);
+ // XXX: level from where
+ err = caps_lookup_slot_2(source_root, source_cptr, 2, &source_cte,
+ CAPRIGHTS_READ);
if (err_is_fail(err)) {
return SYSRET(err_push(err, SYS_ERR_SOURCE_CAP_LOOKUP));
}
assert(source_cte != NULL);
- /* Destination cnode */
+ /* Destination cspace root cnode in source cspace */
+ struct capability *dest_cspace_root;
+ // XXX: level from where?
+ err = caps_lookup_cap_2(root, dest_cspace_cptr, 2,
+ &dest_cspace_root, CAPRIGHTS_READ);
+ if (err_is_fail(err)) {
+ return SYSRET(err_push(err, SYS_ERR_DEST_ROOTCN_LOOKUP));
+ }
+ /* dest_cspace_root must be L1 CNode */
+ if (dest_cspace_root->type != ObjType_L1CNode) {
+ return SYSRET(SYS_ERR_CNODE_TYPE);
+ }
+
+ /* Destination cnode in destination cspace */
struct capability *dest_cnode_cap;
- err = caps_lookup_cap(root, dest_cnode_cptr, dest_vbits,
- &dest_cnode_cap, CAPRIGHTS_READ_WRITE);
+ err = caps_lookup_cap_2(dest_cspace_root, dest_cnode_cptr, dest_cnode_level,
+ &dest_cnode_cap, CAPRIGHTS_READ_WRITE);
if (err_is_fail(err)) {
return SYSRET(err_push(err, SYS_ERR_DEST_CNODE_LOOKUP));
}
- // XXX: not very clean!
- if (dest_cnode_cap->type != ObjType_CNode &&
- dest_cnode_cap->type != ObjType_L1CNode ) {
+
+ /* check that destination cnode is actually a cnode */
+ if (dest_cnode_cap->type != ObjType_L1CNode &&
+ dest_cnode_cap->type != ObjType_L2CNode) {
+ debug(SUBSYS_CAPS, "destcn type: %d\n", dest_cnode_cap->type);
return SYSRET(SYS_ERR_DEST_CNODE_INVALID);
}
}
struct sysret sys_create(struct capability *root, enum objtype type,
- uint8_t objbits, capaddr_t dest_cnode_cptr,
- cslot_t dest_slot, int dest_vbits)
+ size_t objsize, capaddr_t dest_cnode_cptr,
+ uint8_t dest_level, cslot_t dest_slot)
{
errval_t err;
- uint8_t bits = 0;
+ uint8_t size = 0;
genpaddr_t base = 0;
/* Paramter checking */
/* Destination CNode */
struct capability *dest_cnode_cap;
- err = caps_lookup_cap(root, dest_cnode_cptr, dest_vbits,
- &dest_cnode_cap, CAPRIGHTS_READ_WRITE);
+ err = caps_lookup_cap_2(root, dest_cnode_cptr, dest_level,
+ &dest_cnode_cap, CAPRIGHTS_READ_WRITE);
if (err_is_fail(err)) {
return SYSRET(err_push(err, SYS_ERR_DEST_CNODE_LOOKUP));
}
/* Destination slot */
struct cte *dest_cte;
- dest_cte = caps_locate_slot(dest_cnode_cap->u.cnode.cnode, dest_slot);
+ dest_cte = caps_locate_slot(get_address(dest_cnode_cap), dest_slot);
if (dest_cte->cap.type != ObjType_Null) {
return SYSRET(SYS_ERR_SLOTS_IN_USE);
}
return SYSRET(SYS_ERR_TYPE_NOT_CREATABLE);
}
- return SYSRET(caps_create_new(type, base, bits, objbits, my_core_id, dest_cte));
+ return SYSRET(caps_create_new(type, base, size, objsize, my_core_id, dest_cte));
}
/**
* Common code for copying and minting except the mint flag and param passing
+ *
+ * \param root Source cspace root cnode
+ * \param dest_cspace_cptr Destination cspace root cnode cptr in source cspace
+ * \parma destcn_cptr Destination cnode cptr relative to destination cspace
+ * \param dest_slot Destination slot
+ * \param source_cptr Source capability cptr relative to source cspace
+ * \param destcn_level Level/depth of destination cnode
+ * \param source_level Level/depth of source cap
+ * \param param1 First parameter for mint
+ * \param param2 Second parameter for mint
+ * \param mint Call is a minting operation
*/
struct sysret
-sys_copy_or_mint(struct capability *root, capaddr_t destcn_cptr, cslot_t dest_slot,
- capaddr_t source_cptr, int destcn_vbits, int source_vbits,
- uintptr_t param1, uintptr_t param2, bool mint)
+sys_copy_or_mint(struct capability *root, capaddr_t dest_cspace_cptr,
+ capaddr_t destcn_cptr, cslot_t dest_slot, capaddr_t
+ source_croot_ptr, capaddr_t source_cptr,
+ uint8_t destcn_level, uint8_t source_level,
+ uintptr_t param1, uintptr_t param2, bool mint)
{
errval_t err;
param1 = param2 = 0;
}
- /* Lookup source cap */
+ if (root->type != ObjType_L1CNode) {
+ debug(SUBSYS_CAPS, "%s: root->type = %d\n", __FUNCTION__, root->type);
+ return SYSRET(SYS_ERR_CNODE_NOT_ROOT);
+ }
+ assert(root->type == ObjType_L1CNode);
+
+ /* Lookup source cspace in our cspace */
+ struct capability *src_croot;
+ err = caps_lookup_cap_2(root, source_croot_ptr, 2, &src_croot,
+ CAPRIGHTS_READ);
+ if (err_is_fail(err)) {
+ return SYSRET(err_push(err, SYS_ERR_SOURCE_ROOTCN_LOOKUP));
+ }
+ if (src_croot->type != ObjType_L1CNode) {
+ debug(SUBSYS_CAPS, "%s: src rootcn type = %d\n", __FUNCTION__, src_croot->type);
+ return SYSRET(SYS_ERR_CNODE_NOT_ROOT);
+ }
+ /* Lookup source cap in source cspace */
struct cte *src_cap;
- err = caps_lookup_slot(root, source_cptr, source_vbits,
- &src_cap, CAPRIGHTS_READ);
+ err = caps_lookup_slot_2(src_croot, source_cptr, source_level, &src_cap,
+ CAPRIGHTS_READ);
if (err_is_fail(err)) {
return SYSRET(err_push(err, SYS_ERR_SOURCE_CAP_LOOKUP));
}
- /* Lookup destination cnode cap */
+ /* Destination cspace root cnode in source cspace */
+ struct capability *dest_cspace_root;
+ // XXX: level from where?
+ err = caps_lookup_cap_2(root, dest_cspace_cptr, 2, &dest_cspace_root, CAPRIGHTS_READ);
+ if (err_is_fail(err)) {
+ return SYSRET(err_push(err, SYS_ERR_DEST_ROOTCN_LOOKUP));
+ }
+ /* dest_cspace_root must be L1 CNode */
+ if (dest_cspace_root->type != ObjType_L1CNode) {
+ debug(SUBSYS_CAPS, "%s: dest rootcn type = %d\n", __FUNCTION__, src_croot->type);
+ return SYSRET(SYS_ERR_CNODE_TYPE);
+ }
+
+ /* Destination cnode in destination cspace */
struct cte *dest_cnode_cap;
- err = caps_lookup_slot(root, destcn_cptr, destcn_vbits,
- &dest_cnode_cap, CAPRIGHTS_READ_WRITE);
+ err = caps_lookup_slot_2(dest_cspace_root, destcn_cptr, destcn_level,
+ &dest_cnode_cap, CAPRIGHTS_READ_WRITE);
if (err_is_fail(err)) {
return SYSRET(err_push(err, SYS_ERR_DEST_CNODE_LOOKUP));
}
/* Perform copy */
- if (dest_cnode_cap->cap.type == ObjType_CNode) {
+ if (dest_cnode_cap->cap.type == ObjType_L1CNode ||
+ dest_cnode_cap->cap.type == ObjType_L2CNode)
+ {
return SYSRET(caps_copy_to_cnode(dest_cnode_cap, dest_slot, src_cap,
mint, param1, param2));
} else {
}
struct sysret
-sys_map(struct capability *ptable, cslot_t slot, capaddr_t source_cptr,
- int source_vbits, uintptr_t flags, uintptr_t offset,
- uintptr_t pte_count, capaddr_t mapping_cnptr, int mapping_cnvbits,
- cslot_t mapping_slot)
+sys_map(struct capability *ptable, cslot_t slot, capaddr_t source_root_cptr,
+ capaddr_t source_cptr, uint8_t source_level, uintptr_t flags,
+ uintptr_t offset, uintptr_t pte_count, capaddr_t mapping_crootptr,
+ capaddr_t mapping_cnptr, uint8_t mapping_cn_level, cslot_t mapping_slot)
{
assert (type_is_vnode(ptable->type));
errval_t err;
+ /* XXX: TODO: make root explicit argument for sys_map() */
struct capability *root = &dcb_current->cspace.cap;
- /* Lookup source cap */
+ /* Lookup source root cn cap in own cspace */
+ struct capability *src_root;
+ err = caps_lookup_cap_2(root, source_root_cptr, source_level, &src_root,
+ CAPRIGHTS_READ);
+ if (err_is_fail(err)) {
+ return SYSRET(err_push(err, SYS_ERR_SOURCE_ROOTCN_LOOKUP));
+ }
+ if (src_root->type != ObjType_L1CNode) {
+ return SYSRET(SYS_ERR_CNODE_NOT_ROOT);
+ }
+ /* Lookup source cap in source cspace */
struct cte *src_cte;
- err = caps_lookup_slot(root, source_cptr, source_vbits, &src_cte,
- CAPRIGHTS_READ);
+ err = caps_lookup_slot_2(src_root, source_cptr, source_level, &src_cte,
+ CAPRIGHTS_READ);
if (err_is_fail(err)) {
return SYSRET(err_push(err, SYS_ERR_SOURCE_CAP_LOOKUP));
}
- /* Lookup mapping slot */
+ /* Lookup mapping cspace root in our cspace */
+ struct capability *mapping_croot;
+ err = caps_lookup_cap_2(root, mapping_crootptr, 2, &mapping_croot,
+ CAPRIGHTS_READ_WRITE);
+ if (err_is_fail(err)) {
+ return SYSRET(err_push(err, SYS_ERR_DEST_ROOTCN_LOOKUP));
+ }
+
+ /* Lookup mapping slot in dest cspace */
struct cte *mapping_cnode_cte;
- err = caps_lookup_slot(root, mapping_cnptr, mapping_cnvbits,
- &mapping_cnode_cte, CAPRIGHTS_READ_WRITE);
+ err = caps_lookup_slot_2(mapping_croot, mapping_cnptr, mapping_cn_level,
+ &mapping_cnode_cte, CAPRIGHTS_READ_WRITE);
if (err_is_fail(err)) {
return SYSRET(err_push(err, SYS_ERR_DEST_CNODE_LOOKUP));
}
- if (mapping_cnode_cte->cap.type != ObjType_CNode) {
+ if (mapping_cnode_cte->cap.type != ObjType_L2CNode) {
return SYSRET(SYS_ERR_DEST_TYPE_INVALID);
}
offset, pte_count, mapping_cte));
}
-struct sysret sys_delete(struct capability *root, capaddr_t cptr, uint8_t bits)
+struct sysret sys_delete(struct capability *root, capaddr_t cptr, uint8_t level)
{
errval_t err;
struct cte *slot;
- err = caps_lookup_slot(root, cptr, bits, &slot, CAPRIGHTS_READ_WRITE);
+ err = caps_lookup_slot_2(root, cptr, level, &slot, CAPRIGHTS_READ_WRITE);
if (err_is_fail(err)) {
return SYSRET(err);
}
return SYSRET(err);
}
-struct sysret sys_revoke(struct capability *root, capaddr_t cptr, uint8_t bits)
+struct sysret sys_revoke(struct capability *root, capaddr_t cptr, uint8_t level)
{
errval_t err;
struct cte *slot;
- err = caps_lookup_slot(root, cptr, bits, &slot, CAPRIGHTS_READ_WRITE);
+ err = caps_lookup_slot_2(root, cptr, level, &slot, CAPRIGHTS_READ_WRITE);
if (err_is_fail(err)) {
return SYSRET(err);
}
return SYSRET(err);
}
-struct sysret sys_get_state(struct capability *root, capaddr_t cptr, uint8_t bits)
+struct sysret sys_get_state(struct capability *root, capaddr_t cptr, uint8_t level)
{
errval_t err;
struct cte *slot;
- err = caps_lookup_slot(root, cptr, bits, &slot, CAPRIGHTS_READ);
+ err = caps_lookup_slot_2(root, cptr, level, &slot, CAPRIGHTS_READ);
if (err_is_fail(err)) {
return SYSRET(err);
}
errval_t sys_debug_create_irq_src_cap(struct capref cap, uint16_t gsi)
{
- uint8_t dcn_vbits = get_cnode_valid_bits(cap);
+ // XXX: check this
+ uint8_t dcn_level = get_cnode_level(cap);
capaddr_t dcn_addr = get_cnode_addr(cap);
- struct sysret sr = syscall6(SYSCALL_DEBUG, DEBUG_CREATE_IRQ_SRC_CAP, dcn_vbits, dcn_addr,
+ struct sysret sr = syscall6(SYSCALL_DEBUG, DEBUG_CREATE_IRQ_SRC_CAP, dcn_level, dcn_addr,
cap.slot, gsi);
return sr.error;
}
*/
/*
- * Copyright (c) 2007-2010, 2012, ETH Zurich.
+ * Copyright (c) 2007-2010, 2012, 2016, ETH Zurich.
* All rights reserved.
*
* This file is distributed under the terms in the attached LICENSE file.
#include <stdio.h>
/// Root CNode
-struct cnoderef cnode_root = {
- .address = CPTR_ROOTCN,
- .address_bits = CPTR_BITS,
- .size_bits = L2_CNODE_BITS,
- .guard_size = 0
-};
+#define ROOT_CNODE_INIT { \
+ .croot = CPTR_ROOTCN, \
+ .cnode = 0, \
+ .level = CNODE_TYPE_ROOT, }
+
+struct cnoderef cnode_root = ROOT_CNODE_INIT;
#define TASK_CNODE_INIT { \
- .address = 0, \
- .address_bits = L2_CNODE_BITS, \
- .size_bits = L2_CNODE_BITS, \
- .guard_size = GUARD_REMAINDER(2 * L2_CNODE_BITS) }
+ .croot = CPTR_ROOTCN, \
+ .cnode = CPTR_TASKCN_BASE, \
+ .level = CNODE_TYPE_OTHER, }
#define PAGE_CNODE_INIT { \
- .address = ROOTCN_SLOT_PAGECN << DEFAULT_CN_ADDR_BITS, \
- .address_bits = L2_CNODE_BITS, \
- .size_bits = PAGE_CNODE_BITS, \
- .guard_size = 0 }
+ .croot = CPTR_ROOTCN, \
+ .cnode = CPTR_PAGECN_BASE, \
+ .level = CNODE_TYPE_OTHER, }
/// Task CNode
struct cnoderef cnode_task = TASK_CNODE_INIT;
/// Base CNode
struct cnoderef cnode_base = {
- .address = CPTR_BASE_PAGE_CN_BASE,
- .address_bits = L2_CNODE_BITS,
- .size_bits = L2_CNODE_BITS,
- .guard_size = 0
+ .cnode = CPTR_BASE_PAGE_CN_BASE,
+ .level = CNODE_TYPE_OTHER,
+ .croot = CPTR_ROOTCN,
};
/// Super CNode
struct cnoderef cnode_super = {
- .address = ROOTCN_SLOT_SUPERCN << DEFAULT_CN_ADDR_BITS,
- .address_bits = L2_CNODE_BITS,
- .size_bits = SUPER_CNODE_BITS,
- .guard_size = 0
+ .cnode = CPTR_SUPERCN_BASE,
+ .level = CNODE_TYPE_OTHER,
+ .croot = CPTR_ROOTCN,
};
/// Page CNode
/// Module CNode
struct cnoderef cnode_module = {
- .address = CPTR_MODULECN_BASE,
- .address_bits = L2_CNODE_BITS,
- .size_bits = MODULECN_SIZE_BITS,
- .guard_size = 0
+ .cnode = CPTR_MODULECN_BASE,
+ .level = CNODE_TYPE_OTHER,
+ .croot = CPTR_ROOTCN,
};
/// Capability to Root CNode
.slot = TASKCN_SLOT_DISPFRAME
};
-#define ROOT_CNODE_INIT { \
- .address = CPTR_ROOTCN, \
- .address_bits = CPTR_BITS, \
- .size_bits = L2_CNODE_BITS, \
- .guard_size = 0 }
-
/// Capability for monitor endpoint
struct capref cap_monitorep = {
- .cnode = ROOT_CNODE_INIT,
- .slot = ROOTCN_SLOT_MONITOREP
+ .cnode = TASK_CNODE_INIT,
+ .slot = TASKCN_SLOT_MONITOREP
};
/// Capability for kernel (only in monitor)
/// Root PML4 VNode
struct capref cap_vroot = {
.cnode = PAGE_CNODE_INIT,
- .slot = CPTR_PML4_BASE
+ .slot = PAGECN_SLOT_VROOT,
};
static inline bool backoff(int count)
enum objtype new_type, gensize_t objsize, size_t count)
{
errval_t err;
-
- // Number of valid bits in destination CNode address
- uint8_t dcn_vbits = get_cnode_valid_bits(dest_start);
+#if 0
+
+ debug_printf("%s: callstack %p %p %p %p\n",
+ __FUNCTION__,
+ __builtin_return_address(0),
+ __builtin_return_address(1),
+ __builtin_return_address(2),
+ __builtin_return_address(3));
+
+ char buf[256];
+ debug_print_capref(buf, 256, cap_root);
+ debug_printf("%s: cap_root = %.*s\n", __FUNCTION__, 256, buf);
+ debug_print_capref(buf, 256, dest_start);
+ debug_printf("%s: dest_start = %.*s\n", __FUNCTION__, 256, buf);
+ debug_print_capref(buf, 256, src);
+ debug_printf("%s: src = %.*s\n", __FUNCTION__, 256, buf);
+#endif
+
+ // Address of destination cspace
+ capaddr_t dcs_addr = get_croot_addr(dest_start);
// Address of the cap to the destination CNode
capaddr_t dcn_addr = get_cnode_addr(dest_start);
+ // Depth/Level of destination cnode
+ enum cnode_type dcn_level = get_cnode_level(dest_start);
+ // Address of source cspace
+ capaddr_t scp_root = get_croot_addr(src);
// Address of source capability
capaddr_t scp_addr = get_cap_addr(src);
- err = invoke_cnode_retype(cap_root, scp_addr, offset, new_type, objsize, count,
- dcn_addr, dest_start.slot, dcn_vbits);
+#if 0
+ debug_printf("retype(root=%#"PRIxCADDR", scp_root=%#"PRIxCADDR", scp_addr=%#"PRIxCADDR", dcs_addr=%#"PRIxCADDR", dcn_addr=%#"PRIxCADDR", dcn_level=%d, slot=%d)\n",
+ get_cap_addr(cap_root), scp_root, scp_addr, dcs_addr, dcn_addr, dcn_level, dest_start.slot);
+#endif
+ err = invoke_cnode_retype(cap_root, scp_root, scp_addr, offset, new_type,
+ objsize, count, dcs_addr, dcn_addr, dcn_level,
+ dest_start.slot);
if (err_no(err) == SYS_ERR_RETRY_THROUGH_MONITOR) {
+ USER_PANIC("remote retype nyi for two-level cspace");
return cap_retype_remote(scp_addr, offset, new_type, objsize, count,
- dcn_addr, dest_start.slot, dcn_vbits);
+ dcn_addr, dest_start.slot, CPTR_BITS);
} else {
return err;
}
*
* \param dest Location where to create the cap, which must be empty.
* \param type Kernel object type to create.
- * \param size_bits Size of the created capability as a power of two.
+ * \param size Size of the created capability in bytes.
* (ignored for fixed-size objects)
*
* Only certain types of capabilities can be created this way. If invoked on
* SYS_ERR_TYPE_NOT_CREATABLE is returned. Most capabilities have to be retyped
* from other capabilities with cap_retype().
*/
-errval_t cap_create(struct capref dest, enum objtype type, uint8_t size_bits)
+errval_t cap_create(struct capref dest, enum objtype type, size_t size)
{
errval_t err;
- // Number of valid bits in the destination CNode address
- uint8_t dest_vbits = get_cnode_valid_bits(dest);
// Address of the cap to the destination CNode
capaddr_t dest_cnode_cptr = get_cnode_addr(dest);
+ enum cnode_type dest_cnode_level = get_cnode_level(dest);
- err = invoke_cnode_create(cap_root, type, size_bits, dest_cnode_cptr,
- dest.slot, dest_vbits);
+ err = invoke_cnode_create(cap_root, type, size, dest_cnode_cptr,
+ dest_cnode_level, dest.slot);
return err;
}
*
* Deletes (but does not revoke) the given capability, allowing the CNode slot
* to be reused.
+ *
+ * // TODO: croot!
*/
errval_t cap_delete(struct capref cap)
{
errval_t err;
- uint8_t vbits = get_cap_valid_bits(cap);
- capaddr_t caddr = get_cap_addr(cap) >> (CPTR_BITS - vbits);
+ struct capref croot = get_croot_capref(cap);
+ capaddr_t caddr = get_cap_addr(cap);
+ enum cnode_type level = get_cap_level(cap);
- err = invoke_cnode_delete(cap_root, caddr, vbits);
+ err = invoke_cnode_delete(croot, caddr, level);
if (err_no(err) == SYS_ERR_RETRY_THROUGH_MONITOR) {
- return cap_delete_remote(caddr, vbits);
+ USER_PANIC("remote delete nyi for two-level cspace");
+ return cap_delete_remote(caddr, CPTR_BITS);
} else {
return err;
}
errval_t cap_revoke(struct capref cap)
{
errval_t err;
- uint8_t vbits = get_cap_valid_bits(cap);
- capaddr_t caddr = get_cap_addr(cap) >> (CPTR_BITS - vbits);
+ struct capref croot = get_croot_capref(cap);
+ capaddr_t caddr = get_cap_addr(cap);
+ enum cnode_type level = get_cap_level(cap);
- err = invoke_cnode_revoke(cap_root, caddr, vbits);
+ err = invoke_cnode_revoke(croot, caddr, level);
if (err_no(err) == SYS_ERR_RETRY_THROUGH_MONITOR) {
- return cap_revoke_remote(caddr, vbits);
+ USER_PANIC("remote revoke nyi for two-level cspace");
+ return cap_revoke_remote(caddr, CPTR_BITS);
} else {
return err;
}
* \param dest location in which to place newly-created CNode cap
* \param src location of RAM capability to be retyped to new CNode
* \param cnoderef cnoderef struct, filled-in if non-NULL with relevant info
- * \param slot_bits number of slots in created CNode as a power of two.
+ * \param slots number of slots in created CNode
* must match size of RAM capability.
*
* This function requires that dest refer to an existing but empty slot. It
* retypes the given memory to a new CNode.
*/
errval_t cnode_create_from_mem(struct capref dest, struct capref src,
- struct cnoderef *cnoderef, uint8_t slot_bits)
+ enum objtype cntype, struct cnoderef *cnoderef,
+ size_t slots)
{
errval_t err;
+ if (cntype != ObjType_L1CNode &&
+ cntype != ObjType_L2CNode)
+ {
+ return LIB_ERR_CNODE_TYPE;
+ }
+
+
// Retype it to the destination
- err = cap_retype(dest, src, 0, ObjType_CNode, 1UL << slot_bits, 1);
+ // debug_printf("objsize =%zu\n", slots * (1UL << OBJBITS_CTE));
+ err = cap_retype(dest, src, 0, cntype, slots * (1UL << OBJBITS_CTE), 1);
if (err_is_fail(err)) {
return err_push(err, LIB_ERR_CAP_RETYPE);
}
// Construct the cnoderef to return
if (cnoderef != NULL) {
- *cnoderef = build_cnoderef(dest, slot_bits);
+ enum cnode_type ref_cntype = cntype == ObjType_L1CNode ? CNODE_TYPE_ROOT : CNODE_TYPE_OTHER;
+ // debug_printf("building cnoderef for objtype = %d, cntype = %d\n", cntype, ref_cntype);
+ *cnoderef = build_cnoderef(dest, ref_cntype);
}
return SYS_ERR_OK;
errval_t cnode_create(struct capref *ret_dest, struct cnoderef *cnoderef,
cslot_t slots, cslot_t *retslots)
{
- errval_t err;
-
- // Allocate a slot for destination.
- assert(ret_dest != NULL);
- err = slot_alloc(ret_dest);
- if (err_is_fail(err)) {
- return err_push(err, LIB_ERR_SLOT_ALLOC);
- }
-
- // Use cnode_create_raw
- return cnode_create_raw(*ret_dest, cnoderef, slots, retslots);
+ USER_PANIC("cnode_create deprecated; use cnode_create_l1 or cnode_create_l2\n");
+ return LIB_ERR_NOT_IMPLEMENTED;
}
/**
}
cslot_t retslots;
- err = cnode_create_raw(*ret_dest, cnoderef, L2_CNODE_SLOTS, &retslots);
+ err = cnode_create_raw(*ret_dest, cnoderef, ObjType_L2CNode,
+ L2_CNODE_SLOTS, &retslots);
if (retslots != L2_CNODE_SLOTS) {
debug_printf("Unable to create properly sized L2 CNode: got %"PRIuCSLOT" slots instead of %"PRIuCSLOT"\n",
retslots, (cslot_t)L2_CNODE_SLOTS);
}
/**
- * \brief Create a L1 CNode from newly-allocated RAM in a newly-allocated slot
+ * \brief Create a CNode for another cspace from newly-allocated RAM in a
+ * newly-allocated slot
*
* \param ret_dest capref struct to be filled-in with location of CNode
* \param cnoderef cnoderef struct, filled-in if non-NULL with relevant info
+ * \param cntype the type for the new cnode
*
- * This function creates a L1 CNode which contains 256 capabilities initially
- * and puts it in a slot in our own L1 CNode, so we can start populating it.
+ * This function creates a CNode which contains 256 capabilities initially
+ * and puts it in a slot in our cspace.
*/
-errval_t cnode_create_l1(struct capref *ret_dest, struct cnoderef *cnoderef)
+errval_t cnode_create_foreign(struct capref *ret_dest, struct cnoderef *cnoderef,
+ enum objtype cntype)
{
errval_t err;
- // Allocate a slot in root cn for destination
+ // Allocate a slot in our cspace
assert(ret_dest != NULL);
- err = slot_alloc_root(ret_dest);
+ err = slot_alloc(ret_dest);
if (err_is_fail(err)) {
return err_push(err, LIB_ERR_SLOT_ALLOC);
}
cslot_t retslots;
- err = cnode_create_raw(*ret_dest, cnoderef, L2_CNODE_SLOTS, &retslots);
+ err = cnode_create_raw(*ret_dest, cnoderef, cntype, L2_CNODE_SLOTS, &retslots);
if (retslots != L2_CNODE_SLOTS) {
- debug_printf("Unable to create properly sized L2 CNode: got %"PRIuCSLOT" slots instead of %"PRIuCSLOT"\n",
- retslots, (cslot_t)L2_CNODE_SLOTS);
+ debug_printf("Unable to create properly sized foreign CNode: "
+ "got %"PRIuCSLOT" slots instead of %"PRIuCSLOT"\n",
+ retslots, (cslot_t)L2_CNODE_SLOTS);
}
return err;
}
*
* \param dest location in which to place CNode cap
* \param cnoderef cnoderef struct, filled-in if non-NULL with relevant info
+ * \param cntype, type of new cnode
* \param slots Minimum number of slots in created CNode
* \param retslots If non-NULL, filled in with the number of slots in created CNode
*
* The intermediate ram cap is destroyed.
*/
errval_t cnode_create_raw(struct capref dest, struct cnoderef *cnoderef,
- cslot_t slots, cslot_t *retslots)
+ enum objtype cntype, cslot_t slots, cslot_t *retslots)
{
errval_t err;
struct capref ram;
assert(slots > 0);
- uint8_t bits = log2ceil(slots);
- assert((1UL << bits) >= slots);
- if (bits < DEFAULT_CNODE_BITS) {
- bits = DEFAULT_CNODE_BITS;
+
+ if (cntype != ObjType_L1CNode &&
+ cntype != ObjType_L2CNode)
+ {
+ return LIB_ERR_CNODE_TYPE;
+ }
+
+ if (slots < L2_CNODE_SLOTS ||
+ (cntype == ObjType_L2CNode && slots != L2_CNODE_SLOTS))
+ {
+ return LIB_ERR_CNODE_SLOTS;
}
if (retslots != NULL) {
- *retslots = 1UL << bits;
+ *retslots = slots;
}
+ // XXX: mem_serv should serve non-power-of-two requests
+ uint8_t bits = log2ceil(slots);
+ assert(slots >= (1UL << bits));
+
// Allocate some memory
err = ram_alloc(&ram, bits + OBJBITS_CTE);
if (err_is_fail(err)) {
return err_push(err, LIB_ERR_RAM_ALLOC);
}
- err = cnode_create_from_mem(dest, ram, cnoderef, bits);
+ err = cnode_create_from_mem(dest, ram, cntype, cnoderef, slots);
if (err_is_fail(err)) {
return err_push(err, LIB_ERR_CNODE_CREATE_FROM_MEM);
}
cslot_t slots, cslot_t *retslots,
uint64_t guard, uint8_t guard_size)
{
- errval_t err;
-
- /* Create an intermediate cnode cap */
- struct capref inter;
- err = cnode_create(&inter, NULL, slots, retslots);
- if (err_is_fail(err)) {
- return err_push(err, LIB_ERR_CNODE_CREATE);
- }
-
- /* Mint it to the new destination setting the guard */
- err = cap_mint(dest, inter, guard, guard_size);
- if (err_is_fail(err)) {
- return err_push(err, LIB_ERR_CAP_MINT);
- }
-
- /* Free the intermediate cnode cap and slot */
- err = cap_delete(inter);
- if (err_is_fail(err)) {
- return err_push(err, LIB_ERR_WHILE_DELETING);
- }
- err = slot_free(inter);
- if (err_is_fail(err)) {
- return err_push(err, LIB_ERR_WHILE_FREEING_SLOT);
- }
-
- /* Build the cnoderef */
- if (cnoderef != NULL) {
- assert(slots > 0);
- uint8_t bits = log2ceil(slots);
- assert((1UL << bits) >= slots);
- if (bits < DEFAULT_CNODE_BITS) {
- bits = DEFAULT_CNODE_BITS;
- }
- *cnoderef = build_cnoderef(dest, bits);
- cnoderef->guard_size = guard_size;
- }
-
- return SYS_ERR_OK;
+ USER_PANIC("%s: GPT CNodes are deprecated\n", __FUNCTION__);
}
/**
}
assert(type_is_vnode(type));
- err = cap_retype(dest, ram, 0, type, 0, 1);
+ err = cap_retype(dest, ram, 0, type, vnode_objsize(type), 1);
if (err_is_fail(err)) {
return err_push(err, LIB_ERR_CAP_RETYPE);
}
return err;
}
- if (cap.type != ObjType_CNode) {
+ if (cap.type != ObjType_L1CNode &&
+ cap.type != ObjType_L2CNode) {
return LIB_ERR_NOT_CNODE;
}
- cnoder->address = get_cap_addr(capr);
- cnoder->address_bits = get_cap_valid_bits(capr);
- cnoder->size_bits = cap.u.cnode.bits;
- cnoder->guard_size = cap.u.cnode.guard_size;
-
- return SYS_ERR_OK;
-}
-
-errval_t cnode_build_l1cnoderef(struct cnoderef *cnoder, struct capref capr)
-{
- struct capability cap;
- errval_t err = debug_cap_identify(capr, &cap);
- if (err_is_fail(err)) {
- return err;
- }
-
- if (cap.type != ObjType_L1CNode) {
- return LIB_ERR_NOT_CNODE;
+ if (!cnodecmp(capr.cnode, cnode_root)) {
+ USER_PANIC("cnode_build_cnoderef NYI for non rootcn caprefs");
}
- cnoder->address = get_cap_addr(capr);
- cnoder->address_bits = get_cap_valid_bits(capr);
- cnoder->size_bits = log2ceil(cap.u.l1cnode.allocated_bytes);
- cnoder->guard_size = 0;
+ cnoder->croot = get_croot_addr(capr);
+ cnoder->cnode = capr.slot << L2_CNODE_BITS;
+ cnoder->level = CNODE_TYPE_OTHER;
return SYS_ERR_OK;
}
abort();
}
+static inline errval_t
+invoke_monitor_identify_cap(capaddr_t cap, int level, struct capability *out)
+{
+ return cap_invoke4(cap_kernel, KernelCmd_Identify_cap, cap, level,
+ (uintptr_t)out).error;
+}
errval_t debug_cap_identify(struct capref cap, struct capability *ret)
{
errval_t err, msgerr;
return SYS_ERR_CAP_NOT_FOUND;
}
+ uint8_t level = get_cap_level(cap);
+ capaddr_t caddr = get_cap_addr(cap);
+ err = invoke_monitor_identify_cap(caddr, level, ret);
+ if (err_is_ok(err)) {
+ // we have kernel cap, return result;
+ return SYS_ERR_OK;
+ }
+
+ // Direct invocation failed, try via monitor
union {
monitor_blocking_caprep_t caprep;
struct capability capability;
*/
static void walk_cspace(struct cnoderef cnode, uint8_t level)
{
+ USER_PANIC("walk_cspace NYI for 2-level cspace layout\n");
+#if 0
struct capability cap;
errval_t err;
walk_cspace(childcn, level + 1);
}
}
+#endif
}
/**
errval_t err = debug_cap_identify(root, &cap);
assert(err_is_ok(err));
- struct cnoderef cnode = {
- .address = get_cap_addr(root),
- .address_bits = get_cap_valid_bits(root),
- .size_bits = cap.u.cnode.bits,
- .guard_size = cap.u.cnode.guard_size,
- };
-
+ struct cnoderef cnode = build_cnoderef(root, 0);
walk_cspace(cnode, 0);
}
void debug_my_cspace(void)
{
- // XXX: Assume my root CNode has a size of #DEFAULT_CNODE_BITS
- struct cnoderef cnode = {
- .address = 0,
- .address_bits = 0,
- .size_bits = DEFAULT_CNODE_BITS,
- .guard_size = 0,
- };
-
- walk_cspace(cnode, 0);
+ walk_cspace(cnode_root, 0);
}
int debug_print_capref(char *buf, size_t len, struct capref cap)
{
- return snprintf(buf, len, "CNode addr 0x%08" PRIxCADDR
- ", vbits = %d, slot %" PRIuCADDR ", vbits = %d",
- get_cnode_addr(cap), get_cnode_valid_bits(cap), cap.slot,
- get_cap_valid_bits(cap));
+ return snprintf(buf, len, "CSpace root addr 0x%08" PRIxCADDR", "
+ "CNode addr 0x%08" PRIxCADDR
+ ", level = %d, slot %" PRIuCADDR ", level = %d",
+ get_croot_addr(cap), get_cnode_addr(cap),
+ get_cnode_level(cap), cap.slot, get_cap_level(cap));
+}
+
+int debug_print_cnoderef(char *buf, size_t len, struct cnoderef cnode)
+{
+ return snprintf(buf, len, "CSpace root addr 0x%08"PRIxCADDR", "
+ "CNode addr 0x%08"PRIxCADDR", level = %d",
+ cnode.croot, cnode.cnode, cnode.level);
}
void debug_dump_mem(lvaddr_t start_addr, lvaddr_t end_addr, lvaddr_t point)
#endif
}
+#if 0
/**
* \brief Copy cap to root CNode, enabling its use with LRPC
*
return SYS_ERR_OK;
}
+#endif
/// Handler for LMP bind reply messages from the Monitor
static void bind_lmp_reply_handler(struct monitor_binding *b,
struct capref endpoint)
{
struct lmp_chan *lc = (void *)conn_id;
- errval_t err;
assert(lc->connstate == LMP_BIND_WAIT);
if (err_is_ok(success)) { /* bind succeeded */
lc->connstate = LMP_CONNECTED;
-
+ lc->remote_cap = endpoint;
+#if 0
/* Place the cap in the rootcn, to allow LRPC */
+ errval_t err;
err = move_to_root(endpoint, &lc->remote_cap);
if (err_is_fail(err)) {
DEBUG_ERR(err, "error moving endpoint cap to root in LMP bind reply");
// leave it where it is, and continue
lc->remote_cap = endpoint;
}
+#endif
}
/* either way, tell the user what happened */
static void endpoint_init(struct lmp_endpoint *ep)
{
ep->k.delivered = ep->k.consumed = 0;
+ ep->k.recv_cspc = 0;
ep->k.recv_cptr = 0;
- ep->k.recv_bits = 0;
ep->seen = 0;
waitset_chanstate_init(&ep->waitset_state, CHANTYPE_LMP_IN);
}
uintptr_t epoffset = (uintptr_t)&ep->k - (uintptr_t)curdispatcher();
+ // debug_printf("%s: calling mint with epoffset = %"PRIuPTR", buflen = %zu\n", __FUNCTION__, epoffset, buflen);
+
// mint new badged cap from our existing reply endpoint
return cap_mint(dest, cap_selfep, epoffset, buflen);
}
*/
void lmp_endpoint_set_recv_slot(struct lmp_endpoint *ep, struct capref slot)
{
- ep->k.recv_cptr = get_cnode_addr(slot);
- ep->k.recv_bits = get_cnode_valid_bits(slot);
- ep->k.recv_slot = slot.slot;
+ // debug_printf("%s: recv_cspace = %"PRIxCADDR", recv_cptr = %"PRIxCADDR"\n",
+ // __FUNCTION__, get_croot_addr(slot), get_cap_addr(slot));
+ ep->k.recv_cspc = get_croot_addr(slot);
+ ep->k.recv_cptr = get_cap_addr(slot);
ep->recv_slot = slot;
}
if (cap != NULL) {
*cap = ep->recv_slot;
}
- ep->k.recv_cptr = ep->k.recv_bits = ep->k.recv_slot = 0;
+ ep->k.recv_cptr = ep->k.recv_cspc = 0;
} else if (cap != NULL) {
*cap = NULL_CAP;
}
}
thread_mutex_unlock(&ca->mutex); // cnode_create_raw uses ram_alloc
// which may call slot_alloc
- err = cnode_create_raw(cap, &cnode, ca->nslots, NULL);
+ err = cnode_create_raw(cap, &cnode, ObjType_CNode, ca->nslots, NULL);
if (err_is_fail(err)) {
return err_push(err, LIB_ERR_CNODE_CREATE);
}
if (err_is_fail(err)) {
return err_push(err, LIB_ERR_SLOT_ALLOC);
}
- err = cnode_create_raw(cap, &cnode, nslots, NULL);
+ err = cnode_create_raw(cap, &cnode, ObjType_CNode, nslots, NULL);
if (err_is_fail(err)) {
return err_push(err, LIB_ERR_CNODE_CREATE);
}
if (err_is_fail(err)) {
return err_push(err, LIB_ERR_SLOT_ALLOC);
}
- err = cnode_create_raw(cap, &cnode, nslots, NULL);
+ err = cnode_create_raw(cap, &cnode, ObjType_CNode, nslots, NULL);
if (err_is_fail(err)) {
return err_push(err, LIB_ERR_CNODE_CREATE);
}
ret->cnode = sca->cnode;
ret->slot = sca->head->slot;
+#if 0
+ char buf[256];
+ debug_print_capref(buf, 256, *ret);
+ debug_printf("%p->salloc: ret = %.*s\n", ca, 256, buf);
+#endif
+
// Decrement space
sca->head->space--;
sca->head->slot++;
return SYS_ERR_OK;
}
-static errval_t sfree(struct slot_allocator *ca, struct capref cap)
+static errval_t free_slot(struct single_slot_allocator *sca, cslot_t slot, struct thread_mutex *mutex)
{
- struct single_slot_allocator *sca = (struct single_slot_allocator*)ca;
- errval_t err = SYS_ERR_OK;
-
- if (!cnodecmp(cap.cnode, sca->cnode)) {
- return LIB_ERR_SLOT_ALLOC_WRONG_CNODE;
- }
+ thread_mutex_lock(mutex);
- thread_mutex_lock(&ca->mutex);
+ errval_t err = SYS_ERR_OK;
struct cnode_meta *walk = sca->head;
struct cnode_meta *prev = NULL;
// Entire cnode was allocated
if (!sca->head) {
sca->head = slab_alloc(&sca->slab);
- sca->head->slot = cap.slot;
+ sca->head->slot = slot;
sca->head->space = 1;
sca->head->next = NULL;
goto finish;
}
// Freeing one before head
- if (cap.slot + 1 == sca->head->slot) {
- sca->head->slot = cap.slot;
+ if (slot + 1 == sca->head->slot) {
+ sca->head->slot = slot;
sca->head->space++;
goto finish;
}
// Freeing before head
- if (cap.slot < sca->head->slot) {
+ if (slot < sca->head->slot) {
struct cnode_meta *new = slab_alloc(&sca->slab);
- new->slot = cap.slot;
+ new->slot = slot;
new->space = 1;
new->next = sca->head;
sca->head = new;
while (walk != NULL) {
// Freeing at the edge of walk
- if (cap.slot == walk->slot + walk->space) {
+ if (slot == walk->slot + walk->space) {
walk->space++;
// check if we can merge walk to next
goto finish;
}
- else if (cap.slot < walk->slot + walk->space) {
+ else if (slot < walk->slot + walk->space) {
err = LIB_ERR_SLOT_UNALLOCATED;
goto unlock;
}
// Freing just before walk->next
- if (walk->next && cap.slot + 1 == walk->next->slot) {
- walk->next->slot = cap.slot;
+ if (walk->next && slot + 1 == walk->next->slot) {
+ walk->next->slot = slot;
walk->next->space++;
goto finish;
}
// Freeing after walk and before walk->next
- if (walk->next && cap.slot < walk->next->slot) {
+ if (walk->next && slot < walk->next->slot) {
struct cnode_meta *new = walk->next;
walk->next = slab_alloc(&sca->slab);
- walk->next->slot = cap.slot;
+ walk->next->slot = slot;
walk->next->space = 1;
walk->next->next = new;
goto finish;
// Freeing after the list
prev->next = slab_alloc(&sca->slab);
- prev->next->slot = cap.slot;
+ prev->next->slot = slot;
prev->next->space = 1;
prev->next->next = NULL;
sca->a.space++;
unlock:
- thread_mutex_unlock(&ca->mutex);
+ thread_mutex_unlock(mutex);
return err;
}
+static errval_t sfree(struct slot_allocator *ca, struct capref cap)
+{
+ struct single_slot_allocator *sca = (struct single_slot_allocator*)ca;
+ if (!cnodecmp(cap.cnode, sca->cnode)) {
+ return LIB_ERR_SLOT_ALLOC_WRONG_CNODE;
+ }
+
+ return free_slot(sca, cap.slot, &ca->mutex);
+}
+
errval_t single_slot_alloc_init_raw(struct single_slot_allocator *ret,
struct capref cap, struct cnoderef cnode,
cslot_t nslots, void *buf, size_t buflen)
if (err_is_fail(err)) {
return err_push(err, LIB_ERR_SINGLE_SLOT_ALLOC_INIT_RAW);
}
- state->rootca.a.space = DEFAULT_CNODE_SLOTS - ROOTCN_FREE_EP_SLOTS;
- state->rootca.head->space = DEFAULT_CNODE_SLOTS - ROOTCN_FREE_EP_SLOTS;
- state->rootca.head->slot = ROOTCN_FREE_EP_SLOTS;
+ state->rootca.a.space = DEFAULT_CNODE_SLOTS - ROOTCN_FREE_SLOTS;
+ state->rootca.head->space = DEFAULT_CNODE_SLOTS - ROOTCN_FREE_SLOTS;
+ state->rootca.head->slot = ROOTCN_FREE_SLOTS;
return SYS_ERR_OK;
}
def->top = NULL;
def->head = &state->head;
- def->head->next = &state->extra;
- def->head->next->next = NULL;
+ def->head->next = NULL;
def->reserve = &state->reserve;
def->reserve->next = NULL;
+#if 0
// Top: not used for 2level cspace, put CNode into standard list
- def->top = NULL;
cap.cnode = cnode_root;
cap.slot = ROOTCN_SLOT_SLOT_ALLOC0;
- cnode = build_cnoderef(cap, SLOT_ALLOC_CNODE_BITS);
+ //cnode = build_cnoderef(cap, SLOT_ALLOC_CNODE_BITS);
+ cnode.croot = CPTR_ROOTCN;
+ cnode.root_level = CSPACE_LEVEL_L2;
+ cnode.cnode = ROOTCN_SLOT_ADDR(ROOTCN_SLOT_SLOT_ALLOC0);
+ cnode.node_level = CSPACE_LEVEL_L2;
err = single_slot_alloc_init_raw((struct single_slot_allocator*)def->head->next,
cap, cnode,
SLOT_ALLOC_CNODE_SLOTS, state->top_buf,
if (err_is_fail(err)) {
return err_push(err, LIB_ERR_SINGLE_SLOT_ALLOC_INIT_RAW);
}
+#endif
// Head
cap.cnode = cnode_root;
cap.slot = ROOTCN_SLOT_SLOT_ALLOC1;
- cnode = build_cnoderef(cap, SLOT_ALLOC_CNODE_BITS);
+ cnode = build_cnoderef(cap, CNODE_TYPE_OTHER);
+ /*
+ cnode.croot = CPTR_ROOTCN;
+ cnode.cnode = ROOTCN_SLOT_ADDR(ROOTCN_SLOT_SLOT_ALLOC1);
+ cnode.level = CSPACE_LEVEL_L2;
+ */
err = single_slot_alloc_init_raw(&def->head->a, cap, cnode,
SLOT_ALLOC_CNODE_SLOTS, state->head_buf,
sizeof(state->head_buf));
// Reserve
cap.cnode = cnode_root;
cap.slot = ROOTCN_SLOT_SLOT_ALLOC2;
- cnode = build_cnoderef(cap, SLOT_ALLOC_CNODE_BITS);
+ cnode = build_cnoderef(cap, CNODE_TYPE_OTHER);
+ /*
+ cnode.croot = CPTR_ROOTCN;
+ cnode.root_level = CSPACE_LEVEL_L2;
+ cnode.cnode = ROOTCN_SLOT_ADDR(ROOTCN_SLOT_SLOT_ALLOC2);
+ cnode.node_level = CSPACE_LEVEL_L2;
+ */
err = single_slot_alloc_init_raw(&def->reserve->a, cap, cnode,
SLOT_ALLOC_CNODE_SLOTS, state->reserve_buf,
sizeof(state->reserve_buf));
if (err_is_fail(err)) {
return err_push(err, LIB_ERR_SINGLE_SLOT_ALLOC_INIT_RAW);
}
- state->rootca.a.space = DEFAULT_CNODE_SLOTS - ROOTCN_FREE_EP_SLOTS;
- state->rootca.head->space = DEFAULT_CNODE_SLOTS - ROOTCN_FREE_EP_SLOTS;
- state->rootca.head->slot = ROOTCN_FREE_EP_SLOTS;
+ state->rootca.a.space = DEFAULT_CNODE_SLOTS - ROOTCN_FREE_SLOTS;
+ state->rootca.head->space = DEFAULT_CNODE_SLOTS - ROOTCN_FREE_SLOTS;
+ state->rootca.head->slot = ROOTCN_FREE_SLOTS;
return SYS_ERR_OK;
}
}
thread_mutex_unlock(&ca->mutex); // cnode_create_raw uses ram_alloc
// which may call slot_alloc
- err = cnode_create_raw(cap, &cnode, ca->nslots, NULL);
+ err = cnode_create_raw(cap, &cnode, ObjType_L2CNode, ca->nslots, NULL);
if (err_is_fail(err)) {
return err_push(err, LIB_ERR_CNODE_CREATE);
}
return err_push(err, LIB_ERR_VNODE_CREATE);
}
+ // XXX: need to make sure that vnode cap that we will invoke is in our cspace!
+ if (get_croot_addr(newvnode->u.vnode.cap) != CPTR_ROOTCN) {
+ // debug_printf("%s: creating vnode for another domain in that domain's cspace; need to copy vnode cap to our cspace to make it invokable\n", __FUNCTION__);
+ err = slot_alloc(&newvnode->u.vnode.invokable);
+ assert(err_is_ok(err));
+ err = cap_copy(newvnode->u.vnode.invokable, newvnode->u.vnode.cap);
+ assert(err_is_ok(err));
+ } else {
+ // debug_printf("vnode in our cspace: copying capref to invokable\n");
+ newvnode->u.vnode.invokable = newvnode->u.vnode.cap;
+ }
+ assert(!capref_is_null(newvnode->u.vnode.cap));
+ assert(!capref_is_null(newvnode->u.vnode.invokable));
+
err = pmap->p.slot_alloc->alloc(pmap->p.slot_alloc, &newvnode->mapping);
if (err_is_fail(err)) {
return err_push(err, LIB_ERR_SLOT_ALLOC);
}
// Map it
- err = vnode_map(root->u.vnode.cap, newvnode->u.vnode.cap, entry,
+ err = vnode_map(root->u.vnode.invokable, newvnode->u.vnode.cap, entry,
PTABLE_ACCESS_DEFAULT, 0, 1, newvnode->mapping);
if (err_is_fail(err)) {
return err_push(err, LIB_ERR_VNODE_MAP);
debug_printf("remove_empty_vnodes: cap_delete (vnode): %s\n",
err_getstring(err));
}
+ if (!capcmp(n->u.vnode.cap, n->u.vnode.invokable)) {
+ // invokable is always allocated in our cspace
+ err = cap_destroy(n->u.vnode.invokable);
+ if (err_is_fail(err)) {
+ debug_printf("remove_empty_vnodes: cap_delete (vnode.invokable): %s\n",
+ err_getstring(err));
+
+ }
+ }
err = pmap->p.slot_alloc->free(pmap->p.slot_alloc, n->u.vnode.cap);
if (err_is_fail(err)) {
debug_printf("remove_empty_vnodes: slot_free (vnode): %s\n",
assert(n != NULL);
// populate it and append to parent's list of children
- n->is_vnode = true;
- n->entry = (*in)->entry;
- n->u.vnode.cap.cnode = cnode_page;
- n->u.vnode.cap.slot = (*in)->slot;
- n->u.vnode.children = NULL;
- n->next = parent->u.vnode.children;
+ n->is_vnode = true;
+ n->entry = (*in)->entry;
+ n->u.vnode.cap.cnode = cnode_page;
+ n->u.vnode.cap.slot = (*in)->slot;
+ n->u.vnode.invokable = n->u.vnode.cap;
+ n->u.vnode.children = NULL;
+ n->next = parent->u.vnode.children;
parent->u.vnode.children = n;
(*in)++;
paging_x86_64_flags_t pmap_flags = vregion_to_pmap_flag(flags);
// Get the paging structure and set paging relevant parameters
- struct vnode *ptable;
+ struct vnode *ptable = NULL;
errval_t err;
size_t table_base;
}
// do map
- err = vnode_map(ptable->u.vnode.cap, frame, table_base,
+ assert(!capref_is_null(ptable->u.vnode.invokable));
+ err = vnode_map(ptable->u.vnode.invokable, frame, table_base,
pmap_flags, offset, pte_count, page->mapping);
if (err_is_fail(err)) {
return err_push(err, LIB_ERR_VNODE_MAP);
// get base address and size of frame
struct frame_identity fi;
- err = invoke_frame_identify(frame, &fi);
+ if (get_croot_addr(frame) != CPTR_ROOTCN) {
+ struct capref local_frame = frame;
+ err = slot_alloc(&local_frame);
+ assert(err_is_ok(err));
+ err = cap_copy(local_frame, frame);
+ assert(err_is_ok(err));
+ err = invoke_frame_identify(local_frame, &fi);
+ errval_t err2 = cap_destroy(local_frame);
+ assert(err_is_ok(err2));
+ } else {
+ err = invoke_frame_identify(frame, &fi);
+ }
if (err_is_fail(err)) {
return err_push(err, LIB_ERR_PMAP_DO_MAP);
}
struct pmap_x86 *x86 = (struct pmap_x86*)pmap;
struct frame_identity fi;
- err = invoke_frame_identify(frame, &fi);
+ if (get_croot_addr(frame) != CPTR_ROOTCN) {
+ struct capref local_frame = frame;
+ err = slot_alloc(&local_frame);
+ assert(err_is_ok(err));
+ err = cap_copy(local_frame, frame);
+ assert(err_is_ok(err));
+ err = invoke_frame_identify(local_frame, &fi);
+ errval_t err2 = cap_destroy(local_frame);
+ assert(err_is_ok(err2));
+ } else {
+ err = invoke_frame_identify(frame, &fi);
+ }
if (err_is_fail(err)) {
return err_push(err, LIB_ERR_PMAP_FRAME_IDENTIFY);
}
x86->root.is_vnode = true;
x86->root.u.vnode.cap = vnode;
+ x86->root.u.vnode.invokable = vnode;
+ if (get_croot_addr(vnode) != CPTR_ROOTCN) {
+ errval_t err = slot_alloc(&x86->root.u.vnode.invokable);
+ assert(err_is_ok(err));
+ err = cap_copy(x86->root.u.vnode.invokable, vnode);
+ assert(err_is_ok(err));
+ }
+ assert(!capref_is_null(x86->root.u.vnode.cap));
+ assert(!capref_is_null(x86->root.u.vnode.invokable));
x86->root.u.vnode.children = NULL;
x86->root.next = NULL;
/* Check if domain is part of a session. */
err = debug_cap_identify(cap_sessionid, &cap);
if (err_is_ok(err)) {
+ char buf[128];
+ debug_print_cap(buf, 128, &cap);
+ debug_printf("have sesion cap %s: initializing libterm client\n", buf);
/* Initialize libterm_client. */
err = term_client_blocking_init(&state->client, cap_sessionid);
if (err_is_fail(err)) {
new->size = size;
new->foffset = foffset;
- {
- struct frame_identity id;
- err = invoke_frame_identify(frame, &id);
+ struct frame_identity fi;
+ if (get_croot_addr(frame) != CPTR_ROOTCN) {
+ struct capref local_frame = frame;
+ err = slot_alloc(&local_frame);
assert(err_is_ok(err));
- new->pa = id.base;
+ err = cap_copy(local_frame, frame);
+ assert(err_is_ok(err));
+ err = invoke_frame_identify(local_frame, &fi);
+ errval_t err2 = cap_destroy(local_frame);
+ assert(err_is_ok(err2));
+ } else {
+ err = invoke_frame_identify(frame, &fi);
}
+ assert(err_is_ok(err));
+ new->pa = fi.base;
// Insert in order
struct memobj_frame_list *walk = anon->frame_list;
int sub_ret = mdb_sub_find_range(root, address, size, max_precision, sub,
&sub_result);
if (sub_ret > max_precision) {
- *result = NULL;
+ *result = sub_result;
*ret = sub_ret;
}
else if (sub_ret > *ret) {
ret = MDB_RANGE_FOUND_SURROUNDING;
}
if (ret > max_precision) {
- *ret_node = NULL;
+ *ret_node = result;
return ret;
}
}
N(current)->left, /*inout*/&ret,
/*inout*/&result);
if (ret > max_precision) {
- *ret_node = NULL;
+ *ret_node = result;
return ret;
}
}
N(current)->right, /*inout*/&ret,
/*inout*/&result);
if (ret > max_precision) {
- *ret_node = NULL;
+ *ret_node = result;
return ret;
}
}
return err_push(err, MM_ERR_SLOT_NOSLOTS);
}
- err = cap_retype(temp, cap, offset, mm->objtype, 1UL << blockbits, 1);
+ err = cap_retype(temp, cap, offset, mm->objtype, blockbytes, 1);
if (err_is_fail(err)) {
DEBUG_ERR(err, "Retyping region");
return err_push(err, MM_ERR_MM_ADD_MULTI);
}
// Retype to and build the top level cnode
- err = cnode_create_from_mem(this->top_cnode_slot, ram_cap,
+ err = cnode_create_from_mem(this->top_cnode_slot, ram_cap, ObjType_CNode,
&this->top_cnode, this->cnode_size_bits);
if (err_is_fail(err)) {
return err_push(err, LIB_ERR_CNODE_CREATE);
.cnode = this->top_cnode,
.slot = this->top_used++
};
- err = cnode_create_from_mem(cnode_cap, ram_cap, &this->meta[refill].cap.cnode,
+ err = cnode_create_from_mem(cnode_cap, ram_cap, ObjType_CNode,
+ &this->meta[refill].cap.cnode,
this->cnode_size_bits);
if (err_is_fail(err)) {
return err_push(err, LIB_ERR_CNODE_CREATE);
errval_t slot_alloc_basecn_init(struct slot_alloc_basecn *this)
{
- // set free to 0 to trigger refill on first allocation
- this->free = 0;
+ // Use ROOTCN_SLOT_SLOT_ALLOC0 as CNode fore basecn allocator
+ this->cap.cnode.croot = CPTR_ROOTCN;
+ this->cap.cnode.cnode = ROOTCN_SLOT_ADDR(ROOTCN_SLOT_SLOT_ALLOC0);
+ this->cap.cnode.level = CNODE_TYPE_OTHER;
+ this->cap.slot = 0;
+ this->free = L2_CNODE_SLOTS;
return SYS_ERR_OK;
}
if (nslots > this->free) {
/* XXX: Special case for init, need to get memory from basecn */
struct capref ram;
- err = ram_alloc_fixed(&ram, BASE_PAGE_BITS, 0, 0);
+ err = ram_alloc(&ram, L2_CNODE_BITS + OBJBITS_CTE);
if (err_is_fail(err)) {
+ DEBUG_ERR(err, "ram_alloc in slot_alloc_basecn cannot allocate L2 "
+ "CNode-sized ram cap");
return err_push(err, LIB_ERR_RAM_ALLOC);
}
return err_push(err, LIB_ERR_SLOT_ALLOC);
}
- err = cnode_create_from_mem(cnode, ram, &this->cap.cnode,
- DEFAULT_CNODE_BITS);
+ err = cnode_create_from_mem(cnode, ram, ObjType_L2CNode,
+ &this->cap.cnode, L2_CNODE_SLOTS);
if (err_is_fail(err)) {
return err_push(err, LIB_ERR_CNODE_CREATE);
}
return err_push(err, LIB_ERR_SLOT_ALLOC);
}
- err = cnode_create_from_mem(cnode_cap, ram_cap,
- &this->meta[refill].cap.cnode, L2_CNODE_BITS);
+ err = cnode_create_from_mem(cnode_cap, ram_cap, ObjType_L2CNode,
+ &this->meta[refill].cap.cnode, L2_CNODE_SLOTS);
if (err_is_fail(err)) {
return err_push(err, LIB_ERR_CNODE_CREATE);
}
if (err_is_fail(err)) {
return err_push(err, OCT_ERR_IDCAP_INVOKE);
}
- cap_delete(idcap);
+
+ err = cap_delete(idcap);
+ assert(err_is_ok(err));
if (attributes == NULL) {
attributes = "";
struct oct_reply_state *drs = NULL;
struct ast_object *ast = NULL;
- err = build_query_with_idcap(&query, idcap, "");
- if (err_is_fail(err)) {
- goto out;
- }
-
OCT_DEBUG("get_with_idcap_handler: %s\n", query);
err = new_oct_reply_state(&drs, get_with_idcap_reply);
assert(err_is_ok(err));
+ err = build_query_with_idcap(&query, idcap, "");
+ if (err_is_fail(err)) {
+ goto out;
+ }
+
err = check_query_length(query);
if (err_is_fail(err)) {
goto out;
// XXX: this code assumes that elf_load never needs more than 256 slots for
// text frame capabilities.
err = cnode_create_l2(&local_cnode_cap, &si->segcn);
+
if (err_is_fail(err)) {
return err_push(err, SPAWN_ERR_CREATE_SEGCN);
}
// Mint SegCN into new domain's cspace
- err = cap_mint(cnode_cap, local_cnode_cap, 0, 0);
+ err = cap_copy(cnode_cap, local_cnode_cap);
if (err_is_fail(err)) {
return err_push(err, SPAWN_ERR_MINT_SEGCN);
}
return spawn_parse_omp_functions(si->name, binary, binary_size);
}
+ /* delete our copy of segcn cap */
+ err = cap_destroy(local_cnode_cap);
+ assert(err_is_ok(err));
+
return SYS_ERR_OK;
}
struct capref t1;
/* Create root CNode */
- err = cnode_create_l1(&si->rootcn_cap, &si->rootcn);
+ err = cnode_create_foreign(&si->rootcn_cap, &si->rootcn, ObjType_L1CNode);
if (err_is_fail(err)) {
return err_push(err, SPAWN_ERR_CREATE_ROOTCN);
}
/* Create taskcn */
- err = cnode_create_l2(&si->taskcn_cap, &si->taskcn);
+ err = cnode_create_foreign(&si->taskcn_cap, &si->taskcn, ObjType_L2CNode);
if (err_is_fail(err)) {
return err_push(err, SPAWN_ERR_CREATE_TASKCN);
}
- // Mint into rootcn setting the guard
+ /* Copy taskcn into rootcn */
t1.cnode = si->rootcn;
t1.slot = ROOTCN_SLOT_TASKCN;
- err = cap_mint(t1, si->taskcn_cap, 0,
- GUARD_REMAINDER(2 * L2_CNODE_BITS));
+
+ err = cap_copy(t1, si->taskcn_cap);
if (err_is_fail(err)) {
return err_push(err, SPAWN_ERR_MINT_TASKCN);
}
+ /* Update taskcn cnoderef to refer to copy in new cspace */
+ si->taskcn.croot = get_cap_addr(si->rootcn_cap);
+ si->taskcn.cnode = ROOTCN_SLOT_ADDR(ROOTCN_SLOT_TASKCN);
+ si->taskcn.level = CNODE_TYPE_OTHER;
+
/* Create slot_alloc_cnode */
t1.cnode = si->rootcn;
t1.slot = ROOTCN_SLOT_SLOT_ALLOC0;
- err = cnode_create_raw(t1, NULL, L2_CNODE_SLOTS, NULL);
+ err = cnode_create_raw(t1, NULL, ObjType_L2CNode, L2_CNODE_SLOTS, NULL);
if (err_is_fail(err)) {
return err_push(err, SPAWN_ERR_CREATE_SLOTALLOC_CNODE);
}
t1.cnode = si->rootcn;
t1.slot = ROOTCN_SLOT_SLOT_ALLOC1;
- err = cnode_create_raw(t1, NULL, L2_CNODE_SLOTS, NULL);
+ err = cnode_create_raw(t1, NULL, ObjType_L2CNode, L2_CNODE_SLOTS, NULL);
if (err_is_fail(err)) {
return err_push(err, SPAWN_ERR_CREATE_SLOTALLOC_CNODE);
}
t1.cnode = si->rootcn;
t1.slot = ROOTCN_SLOT_SLOT_ALLOC2;
- err = cnode_create_raw(t1, NULL, L2_CNODE_SLOTS, NULL);
+ err = cnode_create_raw(t1, NULL, ObjType_L2CNode, L2_CNODE_SLOTS, NULL);
if (err_is_fail(err)) {
return err_push(err, SPAWN_ERR_CREATE_SLOTALLOC_CNODE);
}
- // Create DCB
- si->dcb.cnode = si->taskcn;
- si->dcb.slot = TASKCN_SLOT_DISPATCHER;
+ // Create DCB: make si->dcb invokable
+ err = slot_alloc(&si->dcb);
+ if (err_is_fail(err)) {
+ return err_push(err, LIB_ERR_SLOT_ALLOC);
+ }
err = dispatcher_create(si->dcb);
if (err_is_fail(err)) {
return err_push(err, SPAWN_ERR_CREATE_DISPATCHER);
}
+ // Copy DCB to new taskcn
+ t1.cnode = si->taskcn;
+ t1.slot = TASKCN_SLOT_DISPATCHER;
+ err = cap_copy(t1, si->dcb);
+ if (err_is_fail(err)) {
+ return err_push(err, LIB_ERR_CAP_COPY);
+ }
+
// Give domain endpoint to itself (in taskcn)
struct capref selfep = {
.cnode = si->taskcn,
// Map root CNode (in taskcn)
t1.cnode = si->taskcn;
t1.slot = TASKCN_SLOT_ROOTCN;
- err = cap_mint(t1, si->rootcn_cap, 0, 0);
+ err = cap_copy(t1, si->rootcn_cap);
if (err_is_fail(err)) {
return err_push(err, SPAWN_ERR_MINT_ROOTCN);
}
struct cnoderef basecn;
// Create basecn in our rootcn so we can copy stuff in there
- err = cnode_create_l2(&basecn_cap, &basecn);
+ err = cnode_create_foreign(&basecn_cap, &basecn, ObjType_L2CNode);
if (err_is_fail(err)) {
return err_push(err, LIB_ERR_CNODE_CREATE);
}
- // Place the ram caps
- for (cslot_t i = 0; i < L2_CNODE_SLOTS; i++) {
- struct capref base = {
- .cnode = basecn,
- .slot = i
- };
- struct capref ram;
- err = ram_alloc(&ram, BASE_PAGE_BITS);
- if (err_is_fail(err)) {
- return err_push(err, LIB_ERR_RAM_ALLOC);
- }
- err = cap_copy(base, ram);
-
- if (err_is_fail(err)) {
- DEBUG_ERR(err, "copying ram");
- return err_push(err, LIB_ERR_CAP_COPY);
- }
- err = cap_destroy(ram);
- if (err_is_fail(err)) {
- return err_push(err, LIB_ERR_CAP_DESTROY);
- }
- }
-
- // Mint basecn into si->rootcn
+ // copy basecn into new cspace's rootcn
t1.cnode = si->rootcn;
t1.slot = ROOTCN_SLOT_BASE_PAGE_CN;
- err = cap_mint(t1, basecn_cap, 0, 0);
+ err = cap_copy(t1, basecn_cap);
if (err_is_fail(err)) {
return err_push(err, SPAWN_ERR_MINT_BASE_PAGE_CN);
}
+ basecn.croot = get_cap_addr(si->rootcn_cap);
+ basecn.cnode = ROOTCN_SLOT_ADDR(ROOTCN_SLOT_BASE_PAGE_CN);
+ basecn.level = CNODE_TYPE_OTHER;
+
+ // get big RAM cap for L2_CNODE_SLOTS BASE_PAGE_SIZEd caps
+ struct capref ram;
+ err = ram_alloc(&ram, L2_CNODE_BITS + BASE_PAGE_BITS);
+ if (err_is_fail(err)) {
+ return err_push(err, LIB_ERR_RAM_ALLOC);
+ }
+
+ // retype big RAM cap into small caps in new basecn
+ struct capref base = {
+ .cnode = basecn,
+ .slot = 0,
+ };
+ err = cap_retype(base, ram, 0, ObjType_RAM, BASE_PAGE_SIZE, L2_CNODE_SLOTS);
+ if (err_is_fail(err)) {
+ return err_push(err, LIB_ERR_CAP_RETYPE);
+ }
+
+ // delete big RAM cap
+ err = cap_destroy(ram);
+ if (err_is_fail(err)) {
+ return err_push(err, LIB_ERR_CAP_DESTROY);
+ }
+
return SYS_ERR_OK;
}
errval_t err;
/* Create pagecn */
- err = cnode_create_l2(&si->pagecn_cap, &si->pagecn);
+ err = cnode_create_foreign(&si->pagecn_cap, &si->pagecn, ObjType_L2CNode);
if (err_is_fail(err)) {
return err_push(err, SPAWN_ERR_CREATE_PAGECN);
}
+ // fixup si->pagecn
+ si->pagecn.croot = get_cap_addr(si->rootcn_cap);
+ si->pagecn.cnode = ROOTCN_SLOT_ADDR(ROOTCN_SLOT_PAGECN);
+ si->pagecn.level = CNODE_TYPE_OTHER;
/* Mint pagecn into si->rootcn */
struct capref pagecn = (struct capref){.cnode = si->rootcn, .slot = ROOTCN_SLOT_PAGECN};
- err = cap_mint(pagecn, si->pagecn_cap, 0, 0);
+ err = cap_copy(pagecn, si->pagecn_cap);
if (err_is_fail(err)) {
return err_push(err, SPAWN_ERR_MINT_PAGECN);
}
si->handle = handle;
return SYS_ERR_OK;
+
}
errval_t spawn_map_bootinfo(struct spawninfo *si, genvaddr_t *retvaddr)
return err_push(err, LIB_ERR_SLOT_ALLOC);
}
- err = cap_mint(inheritcn_cncap, inheritcn_cap, 0, 0);
+ err = cap_copy(inheritcn_cncap, inheritcn_cap);
if (err_is_fail(err)) {
return err_push(err, SPAWN_ERR_MINT_INHERITCN);
}
struct memobj_anon *m = (struct memobj_anon *)si->vregion[i]->memobj;
assert(m->m.type == ANONYMOUS);
for(struct memobj_frame_list *f = m->frame_list; f != NULL; f = f->next) {
- struct frame_identity id;
- err = invoke_frame_identify(f->frame, &id);
- assert(err_is_ok(err));
+ if (f->pa == 0) {
+ struct frame_identity id;
+ err = invoke_frame_identify(f->frame, &id);
+ assert(err_is_ok(err));
+ f->pa = id.base;
+ }
char str[128];
- snprintf(str, 128, "%" PRIxGENVADDR ":%" PRIxGENPADDR ":%zx ", si->base[i] + f->offset, id.base, f->size);
+ snprintf(str, 128, "%" PRIxGENVADDR ":%" PRIxGENPADDR ":%zx ",
+ si->base[i] + f->offset, f->pa + f->foffset, f->size);
strcat(envstr, str);
}
}
return err;
}
- /* Create pagecn */
+ /* Create pagecn: default L2 CNode size */
t1.cnode = si->rootcn;
t1.slot = ROOTCN_SLOT_PAGECN;
- err = cnode_create_raw(t1, &cnode, PAGE_CNODE_SLOTS, NULL);
+ err = cnode_create_raw(t1, &cnode, ObjType_L2CNode, L2_CNODE_SLOTS, NULL);
if (err_is_fail(err)) {
return err_push(err, SPAWN_ERR_CREATE_PAGECN);
}
DECL(CAP_ENDPOINT_EPBUFLEN, struct capability, u.endpoint.epbuflen);
DECL(CAP_ENDPOINT_LISTENER, struct capability, u.endpoint.listener);
+ DECL(CAP_L1CNODE_CNODE, struct capability, u.l1cnode.cnode);
+ DECL(CAP_L2CNODE_CNODE, struct capability, u.l2cnode.cnode);
+ DECL(CAP_L1CNODE_ALLOCATED_BYTES, struct capability, u.l1cnode.allocated_bytes);
+
DECL(DISP_DISABLED, struct dispatcher_shared_generic, disabled);
DECL(DISP_RUN, struct dispatcher_shared_generic, dispatcher_run);
DECL(DISP_LRPC, struct dispatcher_shared_generic, dispatcher_lrpc);
DECL(LMP_ENDPOINT_KERNPART, struct lmp_endpoint, k);
EMIT(OBJTYPE_ENDPOINT, ObjType_EndPoint);
+ EMIT(OBJTYPE_L1CNODE, ObjType_L1CNode);
+ EMIT(OBJTYPE_L2CNODE, ObjType_L2CNode);
// register offsets in save areas
#if defined (__x86_64__) || defined(__k1om__)
// error codes needed in LRPC path
EMIT(SYS_ERR_OK, SYS_ERR_OK);
+ EMIT(SYS_ERR_CAP_NOT_FOUND, SYS_ERR_CAP_NOT_FOUND);
EMIT(SYS_ERR_LMP_TARGET_DISABLED, SYS_ERR_LMP_TARGET_DISABLED);
EMIT(SYS_ERR_LMP_BUF_OVERFLOW, SYS_ERR_LMP_BUF_OVERFLOW);
EMIT(SYS_ERR_LRPC_SLOT_INVALID, SYS_ERR_LRPC_SLOT_INVALID);
EMIT(SYS_ERR_LRPC_NOT_ENDPOINT, SYS_ERR_LRPC_NOT_ENDPOINT);
+ EMIT(SYS_ERR_LRPC_NOT_L1, SYS_ERR_LRPC_NOT_L1);
+ EMIT(SYS_ERR_LRPC_NOT_L2, SYS_ERR_LRPC_NOT_L2);
/* sanity check size of various structures, so we break the build if they
* don't match */
MENUFILE=""
ARCH=""
DEBUG_SCRIPT=""
-SMP=2
+# Grab SMP from env, if unset default to 2
+SMP=${SMP:-2}
usage () {
echo "Usage: $0 --menu <file> --arch <arch> [options]"
// Build the capref for the first physical address capability
struct capref phys_cap;
- phys_cap.cnode = build_cnoderef(pacn, PHYSADDRCN_BITS);
+ phys_cap.cnode = build_cnoderef(pacn, CNODE_TYPE_OTHER);
phys_cap.slot = 0;
/* copy connection into the new domain */
struct capref destep = {
- .cnode = si.rootcn,
- .slot = ROOTCN_SLOT_MONITOREP,
+ .cnode = si.taskcn,
+ .slot = TASKCN_SLOT_MONITOREP,
};
err = cap_copy(destep, monep);
if (err_is_fail(err)) {
{
errval_t err;
- // check that we have a slot in the root cnode
- if (chan->remote_cap.cnode.address != CPTR_ROOTCN) {
- printf("lrpc benchmark: not in root CN\n");
- abort();
- }
-
if (cache_benchmark) {
perfmon_setup(curdispatcher(), 0, event, umask, true);
struct capref region_for_init;
err = slot_alloc_basecn(&init_slot_alloc, 1, ®ion_for_init);
if (err_is_fail(err)) {
+ DEBUG_ERR(err, "slot_alloc_basecn in initialize_ram_alloc");
return err_push(err, MM_ERR_SLOT_NOSLOTS);
}
src.slot = ROOTCN_SLOT_BSPKCB;
err = cap_copy(dest, src);
if (err_is_fail(err)) {
- return err_push(err, INIT_ERR_COPY_KERNEL_CAP);
+ return err_push(err, INIT_ERR_COPY_BSP_KCB);
}
/* Give monitor the perfmon capability */
errval_t err;
/* Initialize slot allocator by passing a L2 cnode cap for it to start with */
- struct capref cnode_cap;
- err = slot_alloc_root(&cnode_cap);
- assert(err_is_ok(err));
- struct capref cnode_start_cap = { .slot = 0 };
-
- struct capref ram;
- err = ram_alloc_fixed(&ram, BASE_PAGE_BITS, 0, 0);
- assert(err_is_ok(err));
- err = cnode_create_from_mem(cnode_cap, ram, &cnode_start_cap.cnode,
- DEFAULT_CNODE_BITS);
- assert(err_is_ok(err));
+ // Use ROOTCN_SLOT_SLOT_ALLOC0 as initial cnode for mm slot allocator
+ struct capref cnode_start_cap = {
+ .cnode = {
+ .croot = CPTR_ROOTCN,
+ .cnode = ROOTCN_SLOT_ADDR(ROOTCN_SLOT_SLOT_ALLOC0),
+ .level = CNODE_TYPE_OTHER,
+ },
+ .slot = 0,
+ };
/* init slot allocator */
err = slot_prealloc_init_2(&ram_slot_alloc, MAXCHILDBITS,
/* walk bootinfo and add all unused RAM caps to allocator */
struct capref mem_cap = {
.cnode = cnode_super,
- .slot = 0,
+ .slot = 0,
};
for (int i = 0; i < bi->regions_length; i++) {
return err_push(err, LIB_ERR_RAM_ALLOC);
}
- err = cnode_create_from_mem(cnode_cap, ram, &cnode_start_cap.cnode,
- DEFAULT_CNODE_BITS);
+ err = cnode_create_from_mem(cnode_cap, ram, ObjType_CNode,
+ &cnode_start_cap.cnode, DEFAULT_CNODE_BITS);
if (err_is_fail(err)) {
return err_push(err, LIB_ERR_CNODE_CREATE_FROM_MEM);
}
void
caplock_unlock(struct domcapref cap)
{
- errval_t err = monitor_unlock_cap(cap.croot, cap.cptr, cap.bits);
+ errval_t err = monitor_unlock_cap(cap.croot, cap.cptr, cap.level);
if (err_no(err) == SYS_ERR_CAP_NOT_FOUND ||
err == err_push(SYS_ERR_CAP_NOT_FOUND, SYS_ERR_IDENTIFY_LOOKUP))
{
errval_t err;
struct capability cap;
- err = monitor_domains_cap_identify(src.croot, src.cptr, src.bits, &cap);
+ err = monitor_domains_cap_identify(src.croot, src.cptr, src.level, &cap);
if (err_is_fail(err)) {
return err;
}
{
errval_t err;
struct capability cap;
- err = monitor_domains_cap_identify(capref.croot, capref.cptr, capref.bits,
+ err = monitor_domains_cap_identify(capref.croot, capref.cptr, capref.level,
&cap);
if (err_is_fail(err)) {
return err;
err = monitor_copy_if_exists(&cap, capref);
if (err_is_ok(err)) {
err = monitor_set_cap_owner(cap_root, get_cap_addr(capref),
- get_cap_valid_bits(capref), from);
+ get_cap_level(capref), from);
}
if (err_no(err) == SYS_ERR_CAP_NOT_FOUND) {
err = SYS_ERR_OK;
if (rpc_st->delete_after && rpc_st->is_last) {
struct domcapref domcapref = get_cap_domref(capref);
err = monitor_lock_cap(domcapref.croot, domcapref.cptr,
- domcapref.bits);
+ domcapref.level);
// callers of owner_copy should already check cap lock state
PANIC_IF_ERR(err, "locking cap for true give_away failed");
assert(!(remote_relations & RRELS_COPY_BIT));
// a give_away was performed, need to unlock and set new owner
err = monitor_set_cap_owner(cap_root,
get_cap_addr(rpc_st->cap),
- get_cap_valid_bits(rpc_st->cap),
+ get_cap_level(rpc_st->cap),
rpc_st->to);
PANIC_IF_ERR(err, "updating owner after true"
" give_away failed");
send_result:
err2 = recv_copy_result__enq(from, err, get_cnode_addr(dest),
- get_cnode_valid_bits(dest), dest.slot, st);
+ get_cnode_level(dest), dest.slot, st);
if (err_is_fail(err2)) {
USER_PANIC_ERR(err2, "recv_copy_result enque failed, cap will leak");
}
if (dest == my_core_id) {
// tried to send copy to owning core, success!
err = recv_copy_result__enq(from, SYS_ERR_OK, get_cnode_addr(capref),
- get_cnode_valid_bits(capref), capref.slot,
+ get_cnode_level(capref), capref.slot,
st);
PANIC_IF_ERR(err, "sending result to request_copy sender");
}
}
result_handler(err, get_cnode_addr(res),
- get_cnode_valid_bits(res), res.slot, st);
+ get_cnode_level(res), res.slot, st);
} else if (distcap_state_is_foreign(state) && distcap_needs_locality(cap.type)) {
DEBUG_CAPOPS("capops_copy: sending copy from non-owner, forward request to owner\n");
bool locked = true;
err = monitor_delete_last(del_st->capref.croot, del_st->capref.cptr,
- del_st->capref.bits, del_st->newcap);
+ del_st->capref.level, del_st->newcap);
GOTO_IF_ERR(err, report_error);
if (err_no(err) == SYS_ERR_RAM_CAP_CREATED) {
DEBUG_CAPOPS("%s: sending reclaimed RAM to memserv.\n", __FUNCTION__);
// remote copies have been deleted, reset corresponding relations bit
err = monitor_domcap_remote_relations(del_st->capref.croot,
del_st->capref.cptr,
- del_st->capref.bits,
+ del_st->capref.level,
0, RRELS_COPY_BIT, NULL);
if (err_is_fail(err)) {
USER_PANIC_ERR(err, "clearing remote descs bit after remote delete");
// no core with cap exists, delete local cap with cleanup
err = monitor_domcap_remote_relations(del_st->capref.croot,
del_st->capref.cptr,
- del_st->capref.bits,
+ del_st->capref.level,
0, RRELS_COPY_BIT, NULL);
if (err_is_fail(err)) {
if (err_no(err) == SYS_ERR_CAP_NOT_FOUND) {
}
err = monitor_lock_cap(del_st->capref.croot, del_st->capref.cptr,
- del_st->capref.bits);
+ del_st->capref.level);
if (err_no(err) == SYS_ERR_CAP_LOCKED) {
caplock_wait(del_st->capref, &del_st->lock_qn,
MKCLOSURE(delete_trylock_cont, del_st));
uint8_t relations;
err = monitor_domcap_remote_relations(del_st->capref.croot,
del_st->capref.cptr,
- del_st->capref.bits,
+ del_st->capref.level,
0, 0, &relations);
GOTO_IF_ERR(err, report_error);
err = calloce(1, sizeof(*del_st), &del_st);
GOTO_IF_ERR(err, err_cont);
- err = monitor_domains_cap_identify(cap.croot, cap.cptr, cap.bits,
+ err = monitor_domains_cap_identify(cap.croot, cap.cptr, cap.level,
&del_st->cap);
GOTO_IF_ERR(err, free_st);
goto free_slot;
}
- err = monitor_lock_cap(domcapref.croot, domcapref.cptr, domcapref.bits);
+ err = monitor_lock_cap(domcapref.croot, domcapref.cptr, domcapref.level);
if (err_is_fail(err)) {
goto destroy_cap;
}
}
err = monitor_domcap_remote_relations(domcapref.croot, domcapref.cptr,
- domcapref.bits, relations,
+ domcapref.level, relations,
~(uint8_t)0, NULL);
if (err_is_fail(err)) {
goto reset_owner;
unlock_cap:
send_err = monitor_unlock_cap(domcapref.croot, domcapref.cptr,
- domcapref.bits);
+ domcapref.level);
if (err_is_fail(send_err)) {
USER_PANIC_ERR(send_err, "failed to unlock cap while handling move failure");
}
}
struct capability cap;
- err = monitor_domains_cap_identify(capref.croot, capref.cptr, capref.bits, &cap);
+ err = monitor_domains_cap_identify(capref.croot, capref.cptr, capref.level, &cap);
if (err_is_fail(err)) {
return err;
}
return MON_ERR_CAP_MOVE;
}
- err = monitor_lock_cap(capref.croot, capref.cptr, capref.bits);
+ err = monitor_lock_cap(capref.croot, capref.cptr, capref.level);
if (err_is_fail(err)) {
return err;
}
uint8_t relations;
- err = monitor_domcap_remote_relations(capref.croot, capref.cptr, capref.bits,
+ err = monitor_domcap_remote_relations(capref.croot, capref.cptr, capref.level,
0, 0, &relations);
if (err_is_fail(err)) {
caplock_unlock(capref);
}
GOTO_IF_ERR(err, report_error);
- err = monitor_lock_cap(cap.croot, cap.cptr, cap.bits);
+ err = monitor_lock_cap(cap.croot, cap.cptr, cap.level);
GOTO_IF_ERR(err, report_error);
struct retrieve_rpc_st *rst = NULL;
rst->result_handler = result_handler;
rst->st = st;
- err = monitor_domains_cap_identify(cap.croot, cap.cptr, cap.bits, &rst->rawcap);
+ err = monitor_domains_cap_identify(cap.croot, cap.cptr, cap.level, &rst->rawcap);
GOTO_IF_ERR(err, free_st);
err = monitor_get_domcap_owner(cap, &rst->prev_owner);
rst->relations = relations | remote_relations | RRELS_COPY_BIT;
err = monitor_set_cap_owner(cap_root, get_cap_addr(cap),
- get_cap_valid_bits(cap),
+ get_cap_level(cap),
rst->from);
delete_cap:
}
err = monitor_domcap_remote_relations(rst->cap.croot, rst->cap.cptr,
- rst->cap.bits, relations, 0xFF,
+ rst->cap.level, relations, 0xFF,
NULL);
PANIC_IF_ERR(err, "setting rrels for retrieved cap");
struct capability cap;
err = monitor_domains_cap_identify(req_st->check.src.croot,
req_st->check.src.cptr,
- req_st->check.src.bits, &cap);
+ req_st->check.src.level, &cap);
GOTO_IF_ERR(err, err_cont);
req_st->queue_elem.cont = retype_request__send;
// of descendents on any core.
err = monitor_domcap_remote_relations(check_st->src.croot,
check_st->src.cptr,
- check_st->src.bits,
+ check_st->src.level,
RRELS_DESC_BIT,
RRELS_DESC_BIT, NULL);
goto cont_err;
}
err = monitor_lock_cap(check_st->src.croot, check_st->src.cptr,
- check_st->src.bits);
+ check_st->src.level);
GOTO_IF_ERR(err, cont_err);
err = capsend_find_descendants(check_st->src, find_descendants__rx,
struct domcapref *destcn = &output->destcn;
assert(capcmp(src->croot, destcn->croot));
err = monitor_create_caps(src->croot, check->type, check->objsize,
- check->count, src->cptr, src->bits,
- check->offset, destcn->cptr, destcn->bits,
+ check->count, src->cptr, src->level,
+ check->offset, destcn->cptr, destcn->level,
output->start_slot);
}
struct result_closure cont = output->cont;
void
capops_retype(enum objtype type, size_t objsize, size_t count, struct capref croot,
- capaddr_t dest_cn, uint8_t dest_bits, cslot_t dest_slot,
- capaddr_t src, uint8_t src_bits, gensize_t offset,
+ capaddr_t dest_cn, uint8_t dest_level, cslot_t dest_slot,
+ capaddr_t src, uint8_t src_level, gensize_t offset,
retype_result_handler_t result_handler, void *st)
{
errval_t err;
struct retype_request_st *rtp_req_st;
struct local_retype_st *rtp_loc_st;
- err = invoke_cnode_get_state(croot, src, src_bits, &src_state);
+ err = invoke_cnode_get_state(croot, src, src_level, &src_state);
GOTO_IF_ERR(err, err_cont);
if (distcap_state_is_busy(src_state)) {
goto err_cont;
}
- err = invoke_cnode_retype(croot, src, offset, type, objsize, count,
- dest_cn, dest_slot, dest_bits);
+ // TODO: propagate proper rootcn addrs through monitor retype
+ capaddr_t root_cptr = get_cap_addr(croot);
+
+ err = invoke_cnode_retype(croot, root_cptr, src, offset, type, objsize,
+ count, root_cptr, dest_cn, dest_level,
+ dest_slot);
if (err_no(err) != SYS_ERR_RETRY_THROUGH_MONITOR) {
goto err_cont;
}
rtp_req_st->check.src = (struct domcapref){
.croot = croot,
.cptr = src,
- .bits = src_bits,
+ .level = src_level,
};
rtp_req_st->output.destcn = (struct domcapref){
.croot = croot,
.cptr = dest_cn,
- .bits = dest_bits,
+ .level = dest_level,
};
rtp_req_st->output.start_slot = dest_slot;
rtp_req_st->output.cont = MKRESCONT(result_handler, st);
rtp_loc_st->check.src = (struct domcapref){
.croot = croot,
.cptr = src,
- .bits = src_bits,
+ .level = src_level,
};
rtp_loc_st->output.destcn = (struct domcapref){
.croot = croot,
.cptr = dest_cn,
- .bits = dest_bits,
+ .level = dest_level,
};
rtp_loc_st->output.start_slot = dest_slot;
rtp_loc_st->output.cont = MKRESCONT(result_handler, st);
err = calloce(1, sizeof(*rst), &rst);
GOTO_IF_ERR(err, report_error);
rst->cap = cap;
- err = monitor_domains_cap_identify(cap.croot, cap.cptr, cap.bits, &rst->rawcap);
+ err = monitor_domains_cap_identify(cap.croot, cap.cptr, cap.level, &rst->rawcap);
GOTO_IF_ERR(err, free_st);
rst->result_handler = result_handler;
rst->st = st;
if (err_is_ok(result)) {
// clear the remote copies bit
err = monitor_domcap_remote_relations(st->cap.croot, st->cap.cptr,
- st->cap.bits, 0, RRELS_COPY_BIT,
+ st->cap.level, 0, RRELS_COPY_BIT,
NULL);
if (err_is_fail(err) && err_no(err) != SYS_ERR_CAP_NOT_FOUND) {
DEBUG_ERR(err, "resetting remote copies bit after revoke");
err = monitor_revoke_mark_target(st->cap.croot,
st->cap.cptr,
- st->cap.bits);
+ st->cap.level);
PANIC_IF_ERR(err, "marking revoke");
DEBUG_CAPOPS("%s: mon_revoke_mark_tgt()\n", __FUNCTION__);
err = monitor_revoke_mark_target(st->cap.croot,
st->cap.cptr,
- st->cap.bits);
+ st->cap.level);
PANIC_IF_ERR(err, "marking revoke");
}
static inline errval_t
-invoke_monitor_identify_cap(capaddr_t cap, int bits, struct capability *out)
+invoke_monitor_identify_cap(capaddr_t cap, int level, struct capability *out)
{
- return cap_invoke4(cap_kernel, KernelCmd_Identify_cap, cap, bits,
+ return cap_invoke4(cap_kernel, KernelCmd_Identify_cap, cap, level,
(uintptr_t)out).error;
}
static inline errval_t dom_cnode_get_state(struct domcapref cap, distcap_state_t *ret)
{
- return invoke_cnode_get_state(cap.croot, cap.cptr, cap.bits, ret);
+ return invoke_cnode_get_state(cap.croot, cap.cptr, cap.level, ret);
}
static inline errval_t dom_cnode_delete(struct domcapref cap)
{
- return invoke_cnode_delete(cap.croot, cap.cptr, cap.bits);
+ return invoke_cnode_delete(cap.croot, cap.cptr, cap.level);
}
#endif
struct domcapref {
struct capref croot;
capaddr_t cptr;
- uint8_t bits;
+ uint8_t level;
};
static inline struct domcapref
get_cap_domref(struct capref cap)
{
- uint8_t bits = get_cap_valid_bits(cap);
return (struct domcapref) {
+ // XXX: should be get_croot_addr(cap)?
.croot = cap_root,
- .cptr = get_cap_addr(cap) >> (CPTR_BITS - bits),
- .bits = bits,
+ .cptr = get_cap_addr(cap),
+ .level = get_cap_level(cap),
};
}
monitor_get_domcap_owner(struct domcapref cap, coreid_t *ret_owner)
{
- return monitor_get_cap_owner(cap.croot, cap.cptr << (CPTR_BITS - cap.bits),
- cap.bits, ret_owner);
+ return monitor_get_cap_owner(cap.croot, cap.cptr, cap.level, ret_owner);
}
static inline errval_t
monitor_set_domcap_owner(struct domcapref cap, coreid_t owner)
{
- return monitor_set_cap_owner(cap.croot, cap.cptr << (CPTR_BITS - cap.bits),
- cap.bits, owner);
+ return monitor_set_cap_owner(cap.croot, cap.cptr, cap.level, owner);
}
/*
goto nullcap;
}
- uint8_t vbits = get_cap_valid_bits(cap);
- capaddr_t caddr = get_cap_addr(cap) >> (CPTR_BITS - vbits);
- errval_t err = invoke_monitor_identify_cap(caddr, vbits, out);
+ uint8_t level = get_cap_level(cap);
+ capaddr_t caddr = get_cap_addr(cap);
+ errval_t err = invoke_monitor_identify_cap(caddr, level, out);
if (err_no(err) == SYS_ERR_IDENTIFY_LOOKUP &&
err_no(err>>10) == SYS_ERR_CAP_NOT_FOUND)
{
* \param out Struct to return the metadata
*/
errval_t monitor_domains_cap_identify(struct capref croot, capaddr_t cap,
- int vbits, struct capability *out)
+ int level, struct capability *out)
{
assert (out != NULL);
- uint8_t rootcap_vbits = get_cap_valid_bits(croot);
+ uint8_t rootcap_level = get_cap_level(croot);
capaddr_t rootcap_addr = get_cap_addr(croot);
- rootcap_addr >>= (CPTR_BITS - rootcap_vbits);
- return invoke_monitor_identify_domains_cap(rootcap_addr, rootcap_vbits,
- cap, vbits, out);
+ return invoke_monitor_identify_domains_cap(rootcap_addr, rootcap_level,
+ cap, level, out);
}
/**
* resulting remote relation flags.
*/
errval_t monitor_domcap_remote_relations(struct capref croot, capaddr_t cptr,
- int bits, uint8_t relations,
+ int level, uint8_t relations,
uint8_t mask, uint8_t *ret_relations)
{
- uint8_t rootcap_vbits = get_cap_valid_bits(croot);
+ uint8_t rootcap_level = get_cap_level(croot);
capaddr_t rootcap_addr = get_cap_addr(croot);
- rootcap_addr >>= (CPTR_BITS - rootcap_vbits);
- return invoke_monitor_remote_relations(rootcap_addr, rootcap_vbits,
- cptr, bits, relations, mask,
+ return invoke_monitor_remote_relations(rootcap_addr, rootcap_level,
+ cptr, level, relations, mask,
ret_relations);
}
errval_t monitor_remote_relations(struct capref cap, uint8_t relations,
uint8_t mask, uint8_t *ret_relations)
{
- uint8_t bits = get_cap_valid_bits(cap);
+ uint8_t level = get_cap_level(cap);
capaddr_t cptr = get_cap_addr(cap);
- cptr >>= (CPTR_BITS - bits);
- return monitor_domcap_remote_relations(cap_root, cptr, bits, relations,
+ return monitor_domcap_remote_relations(cap_root, cptr, level, relations,
mask, ret_relations);
}
uint8_t *res)
{
capaddr_t caddr = get_cap_addr(cap);
- uint8_t bits = get_cap_valid_bits(cap);
- caddr >>= (CPTR_BITS - bits);
- return invoke_monitor_cap_has_relations(caddr, bits, mask, res);
+ uint8_t level = get_cap_level(cap);
+ return invoke_monitor_cap_has_relations(caddr, level, mask, res);
}
/**
errval_t monitor_nullify_cap(struct capref cap)
{
capaddr_t caddr = get_cap_addr(cap);
- uint8_t vbits = get_cap_valid_bits(cap);
- caddr >>= (CPTR_BITS - vbits);
- return invoke_monitor_nullify_cap(caddr, vbits);
+ uint8_t level = get_cap_level(cap);
+ return invoke_monitor_nullify_cap(caddr, level);
}
/**
coreid_t owner)
{
capaddr_t caddr = get_cnode_addr(dest);
- uint8_t vbits = get_cnode_valid_bits(dest);
+ uint8_t level = get_cnode_level(dest);
size_t slot = dest.slot;
- return invoke_monitor_create_cap((uint64_t*)cap, caddr, vbits, slot, owner);
+ return invoke_monitor_create_cap((uint64_t*)cap, caddr, level, slot, owner);
}
/**
*/
errval_t monitor_retype_remote_cap(struct capref croot, capaddr_t src, gensize_t offset,
enum objtype newtype, gensize_t objsize,
- gensize_t count, capaddr_t to, capaddr_t slot, int bits)
+ gensize_t count, capaddr_t to, capaddr_t slot, int level)
{
- uint8_t rootcap_vbits = get_cap_valid_bits(croot);
+ uint8_t rootcap_level = get_cap_level(croot);
capaddr_t rootcap_addr = get_cap_addr(croot);
- rootcap_addr >>= (CPTR_BITS - rootcap_vbits);
- return invoke_monitor_remote_cap_retype(rootcap_addr, rootcap_vbits, src, offset,
- newtype, objsize, count, to, slot, bits);
+ return invoke_monitor_remote_cap_retype(rootcap_addr, rootcap_level, src, offset,
+ newtype, objsize, count, to, slot, level);
}
errval_t monitor_create_caps(struct capref croot, enum objtype newtype,
gensize_t objsize, size_t count, capaddr_t src,
- int src_bits, size_t offset, capaddr_t dest_cn,
- int dest_bits, cslot_t dest_slot)
+ int src_level, size_t offset, capaddr_t dest_cn,
+ int dest_level, cslot_t dest_slot)
{
- uint8_t rootcap_vbits = get_cap_valid_bits(croot);
+ uint8_t rootcap_level = get_cap_level(croot);
capaddr_t rootcap_addr = get_cap_addr(croot);
- rootcap_addr >>= (CPTR_BITS - rootcap_vbits);
- return invoke_monitor_remote_cap_retype(rootcap_addr, rootcap_vbits, src, offset,
+ return invoke_monitor_remote_cap_retype(rootcap_addr, rootcap_level, src, offset,
newtype, objsize, count, dest_cn,
- dest_slot, dest_bits);
+ dest_slot, dest_level);
}
errval_t monitor_copy_if_exists(struct capability* cap, struct capref dest)
{
capaddr_t caddr = get_cnode_addr(dest);
- uint8_t vbits = get_cnode_valid_bits(dest);
+ uint8_t level = get_cnode_level(dest);
size_t slot = dest.slot;
- return invoke_monitor_copy_existing((uint64_t*)cap, caddr, vbits, slot);
+ return invoke_monitor_copy_existing((uint64_t*)cap, caddr, level, slot);
}
/**
* \brief Determine the current owner of a cap and its copies.
*/
-errval_t monitor_get_cap_owner(struct capref croot, capaddr_t cptr, int bits, coreid_t *ret_owner)
+errval_t monitor_get_cap_owner(struct capref croot, capaddr_t cptr, int level, coreid_t *ret_owner)
{
capaddr_t root_addr = get_cap_addr(croot);
- uint8_t root_bits = get_cap_valid_bits(croot);
- root_addr >>= (CPTR_BITS - root_bits);
- cptr >>= (CPTR_BITS - bits);
+ uint8_t root_level = get_cap_level(croot);
- return invoke_monitor_get_cap_owner(root_addr, root_bits, cptr, bits, ret_owner);
+ return invoke_monitor_get_cap_owner(root_addr, root_level, cptr, level, ret_owner);
}
/**
* \brief Change the owner of a cap and its copies.
*/
-errval_t monitor_set_cap_owner(struct capref croot, capaddr_t cptr, int bits, coreid_t owner)
+errval_t monitor_set_cap_owner(struct capref croot, capaddr_t cptr, int level, coreid_t owner)
{
capaddr_t root_addr = get_cap_addr(croot);
- uint8_t root_bits = get_cap_valid_bits(croot);
- root_addr >>= (CPTR_BITS - root_bits);
- cptr >>= (CPTR_BITS - bits);
+ uint8_t root_level = get_cap_level(croot);
- return invoke_monitor_set_cap_owner(root_addr, root_bits, cptr, bits, owner);
+ return invoke_monitor_set_cap_owner(root_addr, root_level, cptr, level, owner);
}
/**
* \brief Lock the cap and its copies
*/
-errval_t monitor_lock_cap(struct capref croot, capaddr_t cptr, int bits)
+errval_t monitor_lock_cap(struct capref croot, capaddr_t cptr, int level)
{
capaddr_t root_addr = get_cap_addr(croot);
- uint8_t root_bits = get_cap_valid_bits(croot);
- root_addr >>= (CPTR_BITS - root_bits);
+ uint8_t root_level = get_cap_level(croot);
- return invoke_monitor_lock_cap(root_addr, root_bits, cptr, bits);
+ return invoke_monitor_lock_cap(root_addr, root_level, cptr, level);
}
/**
* \brief Unlock the cap and its copies
*/
-errval_t monitor_unlock_cap(struct capref croot, capaddr_t cptr, int bits)
+errval_t monitor_unlock_cap(struct capref croot, capaddr_t cptr, int level)
{
capaddr_t root_addr = get_cap_addr(croot);
- uint8_t root_bits = get_cap_valid_bits(croot);
- root_addr >>= (CPTR_BITS - root_bits);
+ uint8_t root_level = get_cap_level(croot);
- return invoke_monitor_unlock_cap(root_addr, root_bits, cptr, bits);
+ return invoke_monitor_unlock_cap(root_addr, root_level, cptr, level);
}
errval_t monitor_has_descendants(struct capability *cap, bool *res)
return invoke_monitor_has_descendants((uint64_t*)cap, res);
}
-errval_t monitor_delete_last(struct capref croot, capaddr_t cptr, int bits, struct capref ret_cap)
+errval_t monitor_delete_last(struct capref croot, capaddr_t cptr, int level, struct capref ret_cap)
{
capaddr_t root_addr = get_cap_addr(croot);
- uint8_t root_bits = get_cap_valid_bits(croot);
- root_addr >>= (CPTR_BITS - root_bits);
+ uint8_t root_level = get_cap_level(croot);
capaddr_t ret_cn = get_cnode_addr(ret_cap);
- uint8_t ret_cn_bits = get_cnode_valid_bits(ret_cap);
+ uint8_t ret_cn_level = get_cnode_level(ret_cap);
cslot_t ret_slot = ret_cap.slot;
- return invoke_monitor_delete_last(root_addr, root_bits, cptr, bits,
- ret_cn, ret_cn_bits, ret_slot);
+ return invoke_monitor_delete_last(root_addr, root_level, cptr, level,
+ ret_cn, ret_cn_level, ret_slot);
}
errval_t monitor_delete_foreigns(struct capref cap)
{
capaddr_t cptr = get_cap_addr(cap);
- uint8_t bits = get_cap_valid_bits(cap);
- cptr >>= (CPTR_BITS - bits);
- return invoke_monitor_delete_foreigns(cptr, bits);
+ uint8_t level = get_cap_level(cap);
+ return invoke_monitor_delete_foreigns(cptr, level);
}
errval_t monitor_revoke_mark_target(struct capref croot, capaddr_t cptr,
- int bits)
+ int level)
{
capaddr_t root_addr = get_cap_addr(croot);
- uint8_t root_bits = get_cap_valid_bits(croot);
- root_addr >>= (CPTR_BITS - root_bits);
- return invoke_monitor_revoke_mark_target(root_addr, root_bits, cptr, bits);
+ uint8_t root_level = get_cap_level(croot);
+ return invoke_monitor_revoke_mark_target(root_addr, root_level, cptr, level);
}
errval_t monitor_revoke_mark_relations(struct capability *cap)
errval_t monitor_delete_step(struct capref ret_cap)
{
return invoke_monitor_delete_step(get_cnode_addr(ret_cap),
- get_cnode_valid_bits(ret_cap),
+ get_cnode_level(ret_cap),
ret_cap.slot);
}
errval_t monitor_clear_step(struct capref ret_cap)
{
return invoke_monitor_clear_step(get_cnode_addr(ret_cap),
- get_cnode_valid_bits(ret_cap),
+ get_cnode_level(ret_cap),
ret_cap.slot);
}
static void remote_cap_retype(struct monitor_blocking_binding *b,
struct capref croot, capaddr_t src, uint64_t offset,
uint64_t new_type, uint64_t objsize, uint64_t count,
- capaddr_t to, capaddr_t slot, int32_t to_vbits)
+ capaddr_t to, capaddr_t slot, int32_t to_level)
{
- capops_retype(new_type, objsize, count, croot, to, to_vbits, slot, src,
+ capops_retype(new_type, objsize, count, croot, to, to_level, slot, src,
CPTR_BITS, offset, retype_reply_status, (void*)b);
}
}
static void remote_cap_delete(struct monitor_blocking_binding *b,
- struct capref croot, capaddr_t src, uint8_t vbits)
+ struct capref croot, capaddr_t src, uint8_t level)
{
- struct domcapref cap = { .croot = croot, .cptr = src, .bits = vbits };
+ struct domcapref cap = { .croot = croot, .cptr = src, .level = level };
capops_delete(cap, delete_reply_status, (void*)b);
}
}
static void remote_cap_revoke(struct monitor_blocking_binding *b,
- struct capref croot, capaddr_t src, uint8_t vbits)
+ struct capref croot, capaddr_t src, uint8_t level)
{
- struct domcapref cap = { .croot = croot, .cptr = src, .bits = vbits };
+ struct domcapref cap = { .croot = croot, .cptr = src, .level = level };
capops_revoke(cap, revoke_reply_status, (void*)b);
}
* locked or in a delete already, furthermore if the function is called
* from the monitor through it's self-client binding we still create a
* copy of the capability, and need to cleanup our copy */
- uint8_t vbits = get_cap_valid_bits(cap);
- capaddr_t src = get_cap_addr(cap) >> (CPTR_BITS - vbits);
- struct domcapref dcap = { .croot = cap_root,
- .cptr = src,
- .bits = vbits };
+ struct domcapref dcap = get_cap_domref(cap);
capops_delete(dcap, cap_identify_delete_result_handler, st);
}
// copy the endpoint cap to the recipient
struct capref dest = {
- .cnode = si->rootcn,
- .slot = ROOTCN_SLOT_MONITOREP,
+ .cnode = si->taskcn,
+ .slot = TASKCN_SLOT_MONITOREP,
};
err = cap_copy(dest, b->chan.local_cap);
STATIC_ASSERT_SIZEOF(caprep, sizeof(caprep2));
memcpy(&caprep, &caprep2, sizeof(caprep));
- err = monitor_set_cap_owner(cap_root, get_cap_addr(*cap), get_cap_valid_bits(*cap), from);
+ err = monitor_set_cap_owner(cap_root, get_cap_addr(*cap), get_cap_level(*cap), from);
if (err_is_fail(err)) {
reterr = err;
memset(&caprep, 0, sizeof(caprep));
}
static errval_t
-captx_get_capref(capaddr_t cnaddr, uint8_t cnbits, cslot_t slot,
+captx_get_capref(capaddr_t cnaddr, uint8_t cnlevel, cslot_t slot,
struct capref *ret)
{
errval_t err;
- if (cnaddr == 0 && cnbits == 0 && slot == 0) {
+ if (cnaddr == 0 && cnlevel == 0 && slot == 0) {
// got a null cap, return null capref
*ret = NULL_CAP;
return SYS_ERR_OK;
}
struct capability cnode_cap;
- err = invoke_monitor_identify_cap(cnaddr, cnbits, &cnode_cap);
+ err = invoke_monitor_identify_cap(cnaddr, cnlevel, &cnode_cap);
if (err_is_fail(err)) {
return err;
}
- if (cnode_cap.type != ObjType_CNode) {
+ if (cnode_cap.type != ObjType_L1CNode &&
+ cnode_cap.type != ObjType_L2CNode) {
return SYS_ERR_CNODE_TYPE;
}
*ret = (struct capref) {
.cnode = {
- .address = cnaddr << (CPTR_BITS-cnbits),
- .address_bits = cnbits,
- .size_bits = cnode_cap.u.cnode.bits,
- .guard_size = cnode_cap.u.cnode.guard_size,
+ .cnode = cnaddr,
+ .level = cnlevel,
+ .croot = CPTR_ROOTCN,
},
.slot = slot,
};
.cnode = cnode_root,
.slot = ROOTCN_SLOT_MODULECN,
};
- err = cnode_create_raw(modulecn_cap, NULL,
- ((cslot_t)1 << MODULECN_SIZE_BITS), NULL);
+ err = cnode_create_raw(modulecn_cap, NULL, ObjType_L2CNode,
+ ((cslot_t)1 << L2_CNODE_BITS), NULL);
if (err_is_fail(err)) {
DEBUG_ERR(err, "cnode_create_raw failed");
abort();
/* copy connection into the new domain */
struct capref destep = {
- .cnode = si.rootcn,
- .slot = ROOTCN_SLOT_MONITOREP,
+ .cnode = si.taskcn,
+ .slot = TASKCN_SLOT_MONITOREP,
};
err = cap_copy(destep, monep);
if (err_is_fail(err)) {
int result = 0;
errval_t err;
struct frame_identity fi;
- struct capref cap, cap2, cnram, cncap, tmp;
+ struct capref cap, cap2, cnram, cncap;
/* get slots for results */
err = slot_alloc(&cap);
}
printf("...ok\n");
+#if 0 // this test does not make sense with twolevel cspace layout
/* split 16kB into 4kB CNode, and 3x4kB Frame */
printf(" split 16kB into 4kB CNode, and 3x4kB Frame: ");
err = cap_retype(cnram, cap2, 0, ObjType_RAM, BASE_PAGE_SIZE, 1);
}
printf("...ok\n");
+#endif
out:
// create old-style CNodeRef
printf(" setup: building old-style CNref for L1 CNode and setting L2 capref ");
- err = cnode_build_l1cnoderef(&l2_cnode.cnode, l1_cnode);
+ err = cnode_build_cnoderef(&l2_cnode.cnode, l1_cnode);
l2_cnode.slot = 0;
GOTO_IF_ERR(err, out);
printf("...ok\n");