/* Size of DCB: */
define dispatcher_size 10;
/* Size of (x86_64) VNode: */
-define vnode_size 12; /* BASE_PAGE_BITS */
+define vnode_size 4096; /* BASE_PAGE_SIZE */
/* size of a kernel control block */
define kcb_size 16; /* OBJBITS_KCB */
address genpaddr base; /* Base address of untyped region */
pasid pasid; /* Physical Address Space ID */
- size_bits uint8 bits; /* Address bits that untyped region bears */
-
+ size gensize bytes; /* size of region */
};
/** The following caps are similar to the previous one **/
address genpaddr base; /* Base address of untyped region */
pasid pasid; /* Physical Address Space ID */
- size_bits uint8 bits; /* Address bits that untyped region bears */
-
+ size gensize bytes; /* Size of region in bytes */
};
cap CNode from RAM {
address genpaddr base; /* Physical base address of frame */
pasid pasid; /* Physical Address Space ID */
- size_bits uint8 bits; /* Address bits this frame bears */
+ size gensize bytes; /* Size of region in bytes */
};
cap Frame_Mapping from Frame {
address genpaddr base; /* Physical base address of frame */
pasid pasid; /* Physical Address Space ID */
- size_bits uint8 bits; /* Address bits this frame bears */
+ size gensize bytes; /* Size of region in bytes */
};
cap DevFrame_Mapping from DevFrame {
/* PML4 */
address genpaddr base; /* Base address of VNode */
- size_bits { vnode_size };
+ size { vnode_size };
};
cap VNode_x86_64_pml4_Mapping from VNode_x86_64_pml4 {
/* PDPT */
address genpaddr base; /* Base address of VNode */
- size_bits { vnode_size };
+ size { vnode_size };
};
cap VNode_x86_64_pdpt_Mapping from VNode_x86_64_pdpt {
/* Page directory */
address genpaddr base; /* Base address of VNode */
- size_bits { vnode_size };
+ size { vnode_size };
};
cap VNode_x86_64_pdir_Mapping from VNode_x86_64_pdir {
/* Page table */
address genpaddr base; /* Base address of VNode */
- size_bits { vnode_size };
+ size { vnode_size };
};
cap VNode_x86_64_ptable_Mapping from VNode_x86_64_ptable {
/* PDPT */
address genpaddr base; /* Base address of VNode */
- size_bits { vnode_size };
+ size { vnode_size };
};
cap VNode_x86_32_pdpt_Mapping from VNode_x86_32_pdpt {
/* Page directory */
address genpaddr base; /* Base address of VNode */
- size_bits { vnode_size };
+ size { vnode_size };
};
cap VNode_x86_32_pdir_Mapping from VNode_x86_32_pdir {
/* Page table */
address genpaddr base; /* Base address of VNode */
- size_bits { vnode_size };
+ size { vnode_size };
};
cap VNode_x86_32_ptable_Mapping from VNode_x86_32_ptable {
/* L1 Page Table */
address genpaddr base; /* Base address of VNode */
- size_bits { vnode_size };
+ size { vnode_size };
};
cap VNode_ARM_l1_Mapping from VNode_ARM_l1 {
cap VNode_ARM_l2 from RAM {
/* L2 Page Table */
address genpaddr base; /* Base address of VNode */
- size_bits { vnode_size };
+ size { vnode_size };
};
cap VNode_ARM_l2_Mapping from VNode_ARM_l2 {
cap VNode_AARCH64_l1 from RAM {
/* L1 Page Table */
address genpaddr base; /* Base address of VNode */
- size_bits { vnode_size };
+ size { vnode_size };
};
cap VNode_AARCH64_l1_Mapping from VNode_AARCH64_l1 {
cap VNode_AARCH64_l2 from RAM {
/* L2 Page Table */
address genpaddr base; /* Base address of VNode */
- size_bits { vnode_size };
+ size { vnode_size };
};
cap VNode_AARCH64_l2_Mapping from VNode_AARCH64_l2 {
cap VNode_AARCH64_l3 from RAM {
/* L3 Page Table */
address genpaddr base; /* Base address of VNode */
- size_bits { vnode_size };
+ size { vnode_size };
};
cap VNode_AARCH64_l3_Mapping from VNode_AARCH64_l3 {
failure DEST_TYPE_INVALID "Destination capability is of invalid type",
failure INVALID_RETYPE "Invalid source/destination type pair for retyping",
failure RETYPE_MAPPING_EXPLICIT "Invalid explicit retype to mapping type",
+ failure RETYPE_INVALID_COUNT "Invalid number of new objects requested",
failure REVOKE_FIRST "Capability already has descendants or siblings",
failure INVALID_SIZE_BITS "Invalid size for new objects",
+ failure INVALID_SIZE "Invalid size for new objects",
failure SLOTS_INVALID "Destination capability slots exceed capacity of CNode",
failure SLOTS_IN_USE "One or more destination capability slots occupied",
failure RETYPE_CREATE "Error while creating new capabilities in retype",
+ failure RETYPE_INVALID_OFFSET "Offset into source capability invalid for retype",
failure NO_LOCAL_COPIES "No copies of specified capability in local MDB",
failure RETRY_THROUGH_MONITOR "There is a remote copy of the capability, monitor must be involved to perform a cross core agreement protocol",
failure TYPE_NOT_CREATABLE "Specified capability type is not creatable at runtime. Consider retyping it from another capability.",
failure MULTI_SLOT_ALLOC_INIT "Failure in multi_slot_alloc_init()",
failure MULTI_SLOT_ALLOC_INIT_RAW "Failure in multi_slot_alloc_init_raw()",
failure SINGLE_SLOT_ALLOC "Failure in single_slot_alloc()",
+ failure RANGE_ALLOC_NOT_HEAD "Function called on non-head range allocator",
failure SLOT_ALLOC "Failure in slot_alloc()",
failure SLOT_FREE "Failure in slot_free()",
failure SLOT_UNALLOCATED "slot_free() was called on an unallocated slot",
+ failure SLOT_ALLLOC_REFILL "Failure in slot_alloc_refill()",
// vspace
failure VSPACE_CURRENT_INIT "Failure in vspace_current_init()",
failure SLOT_ALLOC_INIT "Failure initialising slot allocator",
failure MM_INIT "Failure in mm_init()",
failure MM_ADD "Failure in mm_add()",
+ failure MM_ADD_MULTI "Failure in mm_add_multi()",
failure MM_FREE "Failure in mm_free()",
failure NEW_NODE "Failed allocating new node from slot allocator",
failure OUT_OF_BOUNDS "Given memory base address / size exceeds bounds of allocator",
module /armv7/sbin/memtest core=1
-# Phyiscal memory at 0MB up to 512MB
+# Phyiscal memory at 0MB up to 256MB
# (This should match the physical memory size configured
# in the simulator, e.g. via Benchmarks.py)
-mmap map 0x0 0x20000000 1
+mmap map 0x0 0x10000000 1
message capops_revoke_done(capop_st st);
// XXX: uint32 for bits? -MN
- message capops_request_retype(caprep src, uint32 desttype, uint32 destbits, capop_st st);
+ message capops_request_retype(caprep src, uint64 offset,
+ uint32 desttype, uint64 destsize, uint64 count,
+ capop_st st);
message capops_retype_response(errval status, capop_st st);
// ownership / relation messages
rpc get_bootinfo(out errval err, out cap frame, out size frame_size);
/* Remote cap operation messages */
- rpc remote_cap_retype(in cap croot, in uint32 src,
- in uint64 objtype, in uint8 size_bits,
- in uint32 to, in uint32 slot,
+ rpc remote_cap_retype(in cap croot, in uint32 src, in uint64 offset,
+ in uint64 objtype, in uint64 objsize,
+ in uint64 count, in uint32 to, in uint32 slot,
in int dcn_vbits, out errval err);
rpc remote_cap_delete(in cap croot, in uint32 src, in uint8 vbits,
out errval err);
cap_invoke(to, 1, _a, _b, 0, 0, 0, 0, 0, 0, 0, 0, 0)
#define cap_invoke1(to, _a) \
cap_invoke(to, 0, _a, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
+
/**
- * \brief Retype a capability.
+ * \brief Retype (part of) a capability.
*
- * Retypes CPtr 'cap' into 2^'objbits' caps of type 'newtype' and places them
+ * Retypes (part of) CPtr 'cap' into 'objsize'd caps of type 'newtype' and places them
* into slots starting at slot 'slot' in the CNode, addressed by 'to', with
* 'bits' address bits of 'to' valid.
*
* See also cap_retype(), which wraps this.
*
- * \param root Capability of the CNode to invoke
+ * \param root Capability of the Root CNode to invoke
* \param cap Address of cap to retype.
+ * \param offset Offset into cap to retype
* \param newtype Kernel object type to retype to.
- * \param objbits Size of created objects, for variable-sized types
+ * \param objsize Size of created objects, for variable-sized types
+ * \param count Number of objects to create
* \param to Address of CNode cap to place retyped caps into.
* \param slot Slot in CNode cap to start placement.
* \param bits Number of valid address bits in 'to'.
*
* \return Error code
*/
+STATIC_ASSERT(ObjType_Num < 0xFFFF, "retype invocation argument packing does not truncate enum objtype");
static inline errval_t invoke_cnode_retype(struct capref root, capaddr_t cap,
- enum objtype newtype, int objbits,
+ gensize_t offset, enum objtype newtype,
+ gensize_t objsize, size_t count,
capaddr_t to, capaddr_t slot, int bits)
{
assert(cap != CPTR_NULL);
-
- uint8_t invoke_bits = get_cap_valid_bits(root);
- capaddr_t invoke_cptr = get_cap_addr(root) >> (CPTR_BITS - invoke_bits);
-
assert(newtype < ObjType_Num);
- assert(objbits <= 0xff);
- assert(bits <= 0xff);
- return syscall6((invoke_bits << 16) | (CNodeCmd_Retype << 8) | SYSCALL_INVOKE, invoke_cptr, cap,
- (newtype << 16) | (objbits << 8) | bits,
- to, slot).error;
+ assert(bits <= 0xFF);
+ assert(slot <= 0xFFFF);
+ return cap_invoke7(root, CNodeCmd_Retype, cap, offset,
+ ((uint64_t)slot << 32) | ((uint64_t)bits << 16) | newtype,
+ objsize, count, to).error;
}
/**
#endif
invoke_frame_identify (struct capref frame, struct frame_identity *ret)
{
- uint8_t invoke_bits = get_cap_valid_bits(frame);
- capaddr_t invoke_cptr = get_cap_addr(frame) >> (CPTR_BITS - invoke_bits);
-
- uintptr_t arg1 = ((uintptr_t)invoke_bits) << 16;
- arg1 |= ((uintptr_t)FrameCmd_Identify<<8);
- arg1 |= (uintptr_t)SYSCALL_INVOKE;
- struct sysret sysret =
- syscall2(arg1, //(invoke_bits << 16) | (FrameCmd_Identify << 8) | SYSCALL_INVOKE,
- invoke_cptr);
-
assert(ret != NULL);
+
+ struct sysret sysret = cap_invoke2(frame, FrameCmd_Identify, (uint64_t)ret);
if (err_is_ok(sysret.error)) {
- ret->base = sysret.value & (~BASE_PAGE_MASK);
- ret->bits = sysret.value & BASE_PAGE_MASK;
return sysret.error;
}
- ret->base = 0;
- ret->bits = 0;
+ ret->base = 0;
+ ret->bytes = 0;
return sysret.error;
}
cap_invoke(to, 1, _a, _b, 0, 0, 0, 0, 0, 0, 0, 0, 0)
#define cap_invoke1(to, _a) \
cap_invoke(to, 0, _a, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
+
/**
- * \brief Retype a capability.
+ * \brief Retype (part of) a capability.
*
- * Retypes CPtr 'cap' into 2^'objbits' caps of type 'newtype' and places them
+ * Retypes (part of) CPtr 'cap' into 'objsize'd caps of type 'newtype' and places them
* into slots starting at slot 'slot' in the CNode, addressed by 'to', with
* 'bits' address bits of 'to' valid.
*
* See also cap_retype(), which wraps this.
*
- * \param root Capability of the CNode to invoke
+ * \param root Capability of the Root CNode to invoke
* \param cap Address of cap to retype.
+ * \param offset Offset into cap to retype
* \param newtype Kernel object type to retype to.
- * \param objbits Size of created objects, for variable-sized types
+ * \param objsize Size of created objects, for variable-sized types
+ * \param count Number of objects to create
* \param to Address of CNode cap to place retyped caps into.
* \param slot Slot in CNode cap to start placement.
* \param bits Number of valid address bits in 'to'.
*
* \return Error code
*/
+STATIC_ASSERT(ObjType_Num < 0xFFFF, "retype invocation argument packing does not truncate enum objtype");
static inline errval_t invoke_cnode_retype(struct capref root, capaddr_t cap,
- enum objtype newtype, int objbits,
+ gensize_t offset, enum objtype newtype,
+ gensize_t objsize, size_t count,
capaddr_t to, capaddr_t slot, int bits)
{
assert(cap != CPTR_NULL);
- uint8_t invoke_bits = get_cap_valid_bits(root);
- capaddr_t invoke_cptr = get_cap_addr(root) >> (CPTR_BITS - invoke_bits);
-
assert(newtype < ObjType_Num);
- assert(objbits <= 0xff);
- assert(bits <= 0xff);
- return syscall6((invoke_bits << 16) | (CNodeCmd_Retype << 8) | SYSCALL_INVOKE, invoke_cptr, cap,
- (newtype << 16) | (objbits << 8) | bits,
- to, slot).error;
+ assert(bits <= 0xFF);
+ assert(slot <= 0xFFFF);
+
+ return cap_invoke8(root, CNodeCmd_Retype, cap, offset,
+ ((uint32_t)bits << 16) | newtype,
+ objsize, count, to, slot).error;
}
/**
#endif
invoke_frame_identify (struct capref frame, struct frame_identity *ret)
{
+ assert(ret != NULL);
+
uint8_t invoke_bits = get_cap_valid_bits(frame);
capaddr_t invoke_cptr = get_cap_addr(frame) >> (CPTR_BITS - invoke_bits);
arg1 |= ((uintptr_t)FrameCmd_Identify<<8);
arg1 |= (uintptr_t)SYSCALL_INVOKE;
struct sysret sysret =
- syscall2(arg1, //(invoke_bits << 16) | (FrameCmd_Identify << 8) | SYSCALL_INVOKE,
- invoke_cptr);
+ syscall3(arg1, //(invoke_bits << 16) | (FrameCmd_Identify << 8) | SYSCALL_INVOKE,
+ invoke_cptr, (uintptr_t)ret);
- assert(ret != NULL);
if (err_is_ok(sysret.error)) {
- ret->base = sysret.value & (~BASE_PAGE_MASK);
- ret->bits = sysret.value & BASE_PAGE_MASK;
return sysret.error;
}
ret->base = 0;
- ret->bits = 0;
+ ret->bytes = 0;
return sysret.error;
}
/**
- * \brief Retype a capability.
+ * \brief Retype (part of) a capability.
*
- * Retypes CPtr 'cap' into 2^'objbits' caps of type 'newtype' and places them
+ * Retypes (part of) CPtr 'cap' into 'objsize'd caps of type 'newtype' and places them
* into slots starting at slot 'slot' in the CNode, addressed by 'to', with
* 'bits' address bits of 'to' valid.
*
* See also cap_retype(), which wraps this.
*
- * \param root Capability of the CNode to invoke
+ * \param root Capability of the Root CNode to invoke
* \param cap Address of cap to retype.
+ * \param offset Offset into cap to retype
* \param newtype Kernel object type to retype to.
- * \param objbits Size of created objects, for variable-sized types
+ * \param objsize Size of created objects, for variable-sized types
+ * \param count Number of objects to create
* \param to Address of CNode cap to place retyped caps into.
* \param slot Slot in CNode cap to start placement.
* \param bits Number of valid address bits in 'to'.
* \return Error code
*/
static inline errval_t invoke_cnode_retype(struct capref root, capaddr_t cap,
- enum objtype newtype, int objbits,
+ gensize_t offset, enum objtype newtype,
+ gensize_t objsize, size_t count,
capaddr_t to, capaddr_t slot, int bits)
{
assert(cap != CPTR_NULL);
- return cap_invoke7(root, CNodeCmd_Retype, cap, newtype, objbits, to,
- slot, bits).error;
+ return cap_invoke9(root, CNodeCmd_Retype, cap, offset, newtype, objsize,
+ count, to, slot, bits).error;
}
/**
static inline errval_t invoke_frame_identify(struct capref frame,
struct frame_identity *ret)
{
- struct sysret sysret = cap_invoke1(frame, FrameCmd_Identify);
-
assert(ret != NULL);
+
+ struct sysret sysret = cap_invoke2(frame, FrameCmd_Identify, (uintptr_t)ret);
+
if (err_is_ok(sysret.error)) {
- ret->base = sysret.value & (~BASE_PAGE_MASK);
- ret->bits = sysret.value & BASE_PAGE_MASK;
return sysret.error;
}
ret->base = 0;
- ret->bits = 0;
+ ret->bytes = 0;
return sysret.error;
}
errval_t cnode_create_from_mem(struct capref dest, struct capref src,
struct cnoderef *cnoderef, uint8_t slot_bits);
-errval_t cap_retype(struct capref dest_start, struct capref src,
- enum objtype new_type, uint8_t size_bits);
+errval_t cap_retype(struct capref dest_start, struct capref src, gensize_t offset,
+ enum objtype new_type, gensize_t objsize, size_t count);
errval_t cap_create(struct capref dest, enum objtype type, uint8_t size_bits);
errval_t cap_delete(struct capref cap);
errval_t cap_revoke(struct capref cap);
struct cnoderef cnode; ///< cnoderef for the cnode to allocate from
struct cnode_meta *meta; ///< Linked list of meta data
struct slab_allocator slab; ///< Slab allocation
- struct thread_mutex mutex; ///< Mutex for thread safety
+ struct thread_mutex mutex; ///< Mutex for thread safety (used when is_head == true)
+ struct range_slot_allocator *next; ///< Next slot allocator
+ bool is_head; ///< Is this instance head of a chain
};
// single_slot_alloc_init_raw() requires a specific buflen
cslot_t nslots);
errval_t range_slot_alloc_init(struct range_slot_allocator *ret,
cslot_t nslots, cslot_t *retslots);
+size_t range_slot_alloc_freecount(struct range_slot_allocator *alloc);
+errval_t range_slot_alloc_refill(struct range_slot_allocator *alloc, cslot_t slots);
__END_DECLS
*
* @param type Object type.
*
- * @return Number of bits represented by a VNode.
+ * @return Number of bits a VNode object occupies.
*/
static inline size_t vnode_objbits(enum objtype type)
{
type == ObjType_VNode_x86_32_pdir ||
type == ObjType_VNode_x86_32_ptable)
{
- return 12; // BASE_PAGE_BITS
+ return 12;
}
else if (type == ObjType_VNode_AARCH64_l1 ||
type == ObjType_VNode_AARCH64_l2 ||
}
else if (type == ObjType_VNode_ARM_l2)
{
+ // XXX: should be 1024, once we get around to untangling the ARMv7
+ // page table mess, cf. T243.
return 12;
}
}
/**
+ * Return size of vnode in bytes. This is the size of a page table page.
+ *
+ * @param type Object type.
+ *
+ * @return Size of a VNode in bytes.
+ */
+static inline size_t vnode_objsize(enum objtype type)
+{
+ // This function should be emitted by hamlet or somesuch.
+ STATIC_ASSERT(46 == ObjType_Num, "Check VNode definitions");
+
+ if (type == ObjType_VNode_x86_64_pml4 ||
+ type == ObjType_VNode_x86_64_pdpt ||
+ type == ObjType_VNode_x86_64_pdir ||
+ type == ObjType_VNode_x86_64_ptable ||
+ type == ObjType_VNode_x86_32_pdpt ||
+ type == ObjType_VNode_x86_32_pdir ||
+ type == ObjType_VNode_x86_32_ptable)
+ {
+ // XXX: cannot use BASE_PAGE_SIZE here because asmoffsets does not
+ // include the right files
+ return 4096; // BASE_PAGE_SIZE
+ }
+ else if (type == ObjType_VNode_AARCH64_l1 ||
+ type == ObjType_VNode_AARCH64_l2 ||
+ type == ObjType_VNode_AARCH64_l3)
+ {
+ return 4096;
+ }
+ else if (type == ObjType_VNode_ARM_l1)
+ {
+ // ARMv7 L1 page table is 16kB.
+ return 16384;
+ }
+ else if (type == ObjType_VNode_ARM_l2)
+ {
+ // XXX: should be 1024, once we get around to untangling the ARMv7
+ // page table mess, cf. T243.
+ return 4096;
+ }
+
+ assert(0 && !"Page table size unknown.");
+ return 0;
+}
+
+/**
* Return number of page table entries for vnode in bits.
* @param type Object type.
* @return Number of page table entries in bits
};
/**
+ * RAM capability commands
+ */
+enum ram_cmd {
+ RAMCmd_Identify, ///< Return physical address of frame
+};
+
+/**
* IRQ Table capability commands.
*/
enum irqtable_cmd {
*/
struct frame_identity {
genpaddr_t base; ///< Physical base address of frame
- uint8_t bits; ///< Size of frame, in bits
+ gensize_t bytes; ///< Size of frame, in bytes
};
/**
struct mem_region {
genpaddr_t mr_base;///< Address of the start of the region
enum region_type mr_type;///< Type of region
- uint8_t mr_bits;///< Size as a power of two shift (not module type)
+ gensize_t mr_bytes;///< Size in bytes
bool mr_consumed;///< Flag for user code to mark region consumed
size_t mrmod_size;///< Size in bytes (module type only)
ptrdiff_t mrmod_data;///< Offset of module string (module type only)
void mm_destroy(struct mm *mm);
errval_t mm_add(struct mm *mm, struct capref cap, uint8_t sizebits,
genpaddr_t base);
+errval_t mm_add_multi(struct mm *mm, struct capref cap, gensize_t size,
+ genpaddr_t base);
errval_t mm_alloc(struct mm *mm, uint8_t sizebits, struct capref *retcap,
genpaddr_t *retbase);
errval_t mm_alloc_range(struct mm *mm, uint8_t sizebits, genpaddr_t minbase,
uint8_t role; // Role of buffer (RX/TX)
lpaddr_t pa; // Physical address of buffer
- uint64_t bits; // Size of buffer (encoded in bits)
+ uint64_t bytes; // Size of buffer in bytes
void *va; // Virtual address of buffer
uint64_t queueid; // The queueid to which this buffer belongs
int argc
)
{
- assert(2 == argc);
+ assert(3 == argc);
+
+ struct registers_arm_syscall_args* sa = &context->syscall_args;
assert(to->type == ObjType_Frame || to->type == ObjType_DevFrame);
- assert((to->u.frame.base & BASE_PAGE_MASK) == 0);
- assert(to->u.frame.bits < BASE_PAGE_SIZE);
+ assert((get_address(to) & BASE_PAGE_MASK) == 0);
- return (struct sysret) {
- .error = SYS_ERR_OK,
- .value = to->u.frame.base | to->u.frame.bits,
- };
+ struct frame_identity *fi = (struct frame_identity *)sa->arg2;
+
+ if(!access_ok(ACCESS_WRITE, (lvaddr_t)fi, sizeof(struct frame_identity))) {
+ return SYSRET(SYS_ERR_INVALID_USER_BUFFER);
+ }
+
+ fi->base = get_address(to);
+ fi->bytes = get_size(to);
+
+ return SYSRET(SYS_ERR_OK);
}
static struct sysret
int argc
)
{
- assert(6 == argc);
+ assert(9 == argc);
struct registers_arm_syscall_args* sa = &context->syscall_args;
// Source capability cptr
capaddr_t source_cptr = sa->arg2;
- uintptr_t word = sa->arg3;
+ gensize_t offset = sa->arg3;
// Type to retype to
- enum objtype type = word >> 16;
+ uint64_t word = sa->arg4;
+ enum objtype type = word & 0xFFFF;
// Object bits for variable-sized types
- uint8_t objbits = (word >> 8) & 0xff;
+ gensize_t objsize = sa->arg5;
+ // number of new objects
+ size_t count = sa->arg6;
// Destination cnode cptr
- capaddr_t dest_cnode_cptr = sa->arg4;
+ capaddr_t dest_cnode_cptr = sa->arg7;
// Destination slot number
- capaddr_t dest_slot = sa->arg5;
+ capaddr_t dest_slot = sa->arg8;
// Valid bits in destination cnode cptr
- uint8_t dest_vbits = (word & 0xff);
+ uint8_t dest_vbits = (word >> 16) & 0xFF;
- return sys_retype(root, source_cptr, type, objbits, dest_cnode_cptr,
- dest_slot, dest_vbits, from_monitor);
+ return sys_retype(root, source_cptr, offset, type, objsize, count,
+ dest_cnode_cptr, dest_slot, dest_vbits,
+ from_monitor);
}
static struct sysret
/// Different handler for cap operations performed by the monitor
INVOCATION_HANDLER(monitor_handle_retype)
{
- INVOCATION_PRELUDE(8);
+ INVOCATION_PRELUDE(10);
errval_t err;
struct capability *root;
- err = caps_lookup_cap(&dcb_current->cspace.cap, sa->arg6,
- sa->arg7, &root, CAPRIGHTS_READ);
+ /* rootcap_addr is in sa->arg9, rootcap_vbits is in upper half of sa->arg8 */
+ uint8_t rootcap_vbits = (sa->arg8 >> 16) & 0xFF;
+ err = caps_lookup_cap(&dcb_current->cspace.cap, sa->arg9,
+ rootcap_vbits, &root, CAPRIGHTS_READ);
if (err_is_fail(err)) {
return SYSRET(err_push(err, SYS_ERR_ROOT_CAP_LOOKUP));
}
+ // mask out rootcap_vbits, so retype_common is not confused
+ sa->arg8 &= 0xFFFF;
/* XXX: this hides the first argument which retype_common doesn't know
* about */
- return handle_retype_common(root, true, context, 6);
+ return handle_retype_common(root, true, context, 9);
}
INVOCATION_HANDLER(monitor_handle_has_descendants)
arch_registers_state_t *context,
int argc)
{
- return sys_handle_kcb_identify(to);
+ assert(3 == argc);
+
+ struct registers_arm_syscall_args* sa = &context->syscall_args;
+
+ return sys_handle_kcb_identify(to, (struct frame_identity *)sa->arg2);
}
typedef struct sysret (*invocation_t)(struct capability*, arch_registers_state_t*, int);
// create cap for strings area in first slot of modulecn
assert(st->modulecn_slot == 0);
- err = caps_create_new(ObjType_Frame, mmstrings_phys, BASE_PAGE_BITS,
- BASE_PAGE_BITS, my_core_id,
+ err = caps_create_new(ObjType_Frame, mmstrings_phys, BASE_PAGE_SIZE,
+ BASE_PAGE_SIZE, my_core_id,
caps_locate_slot(CNODE(st->modulecn),
st->modulecn_slot++));
assert(err_is_ok(err));
// round up to page size for caps
remain = ROUND_UP(remain, BASE_PAGE_SIZE);
+ assert((base_addr & BASE_PAGE_MASK) == 0);
+ assert((remain & BASE_PAGE_MASK) == 0);
- // Create max-sized caps to multiboot module in module cnode
- while (remain > 0) {
- assert((base_addr & BASE_PAGE_MASK) == 0);
- assert((remain & BASE_PAGE_MASK) == 0);
-
- // determine size of next chunk
- uint8_t block_size = bitaddralign(remain, base_addr);
-
- assert(st->modulecn_slot < (1UL << st->modulecn->cap.u.cnode.bits));
- // create as DevFrame cap to avoid zeroing memory contents
- err = caps_create_new(ObjType_DevFrame, base_addr, block_size,
- block_size, my_core_id,
- caps_locate_slot(CNODE(st->modulecn),
- st->modulecn_slot++));
- assert(err_is_ok(err));
-
- // Advance by that chunk
- base_addr += ((genpaddr_t)1 << block_size);
- remain -= ((genpaddr_t)1 << block_size);
- }
+ assert(st->modulecn_slot < (1U << st->modulecn->cap.u.cnode.bits));
+ // create as DevFrame cap to avoid zeroing memory contents
+ err = caps_create_new(ObjType_DevFrame, base_addr, remain,
+ remain, my_core_id,
+ caps_locate_slot(CNODE(st->modulecn),
+ st->modulecn_slot++));
+ assert(err_is_ok(err));
// Copy multiboot module string to mmstrings area
strcpy((char *)mmstrings, MBADDR_ASSTRING(m->string));
caps_create_new(
ObjType_VNode_ARM_l1,
mem_to_local_phys((lvaddr_t)init_l1),
- vnode_objbits(ObjType_VNode_ARM_l1), 0,
+ vnode_objsize(ObjType_VNode_ARM_l1), 0,
my_core_id,
caps_locate_slot(CNODE(spawn_state.pagecn), pagecn_pagemap++)
);
// Map L2 into successive slots in pagecn
size_t i;
for (i = 0; i < INIT_L2_BYTES / BASE_PAGE_SIZE; i++) {
- size_t objbits_vnode = vnode_objbits(ObjType_VNode_ARM_l2);
- assert(objbits_vnode == BASE_PAGE_BITS);
+ size_t objsize_vnode = vnode_objsize(ObjType_VNode_ARM_l2);
+ assert(objsize_vnode == BASE_PAGE_SIZE);
caps_create_new(
ObjType_VNode_ARM_l2,
- mem_to_local_phys((lvaddr_t)init_l2) + (i << objbits_vnode),
- objbits_vnode, 0,
+ mem_to_local_phys((lvaddr_t)init_l2) + i * objsize_vnode,
+ objsize_vnode, 0,
my_core_id,
caps_locate_slot(CNODE(spawn_state.pagecn), pagecn_pagemap++)
);
TASKCN_SLOT_MON_URPC);
// XXX: Create as devframe so the memory is not zeroed out
err = caps_create_new(ObjType_DevFrame, core_data->urpc_frame_base,
- core_data->urpc_frame_bits, core_data->urpc_frame_bits,
+ 1UL << core_data->urpc_frame_bits, 1UL << core_data->urpc_frame_bits,
my_core_id, urpc_frame_cte);
assert(err_is_ok(err));
urpc_frame_cte->cap.type = ObjType_Frame;
// create cap for strings area in first slot of modulecn
assert(st->modulecn_slot == 0);
- err = caps_create_new(ObjType_Frame, mmstrings_phys, BASE_PAGE_BITS,
- BASE_PAGE_BITS, my_core_id,
+ err = caps_create_new(ObjType_Frame, mmstrings_phys, BASE_PAGE_SIZE,
+ BASE_PAGE_SIZE, my_core_id,
caps_locate_slot(CNODE(st->modulecn),
st->modulecn_slot++));
assert(err_is_ok(err));
// round up to page size for caps
remain = ROUND_UP(remain, BASE_PAGE_SIZE);
+ assert((base_addr & BASE_PAGE_MASK) == 0);
+ assert((remain & BASE_PAGE_MASK) == 0);
- // Create max-sized caps to multiboot module in module cnode
- while (remain > 0) {
- assert((base_addr & BASE_PAGE_MASK) == 0);
- assert((remain & BASE_PAGE_MASK) == 0);
-
- // determine size of next chunk
- uint8_t block_size = bitaddralign(remain, base_addr);
-
- assert(st->modulecn_slot < (1U << st->modulecn->cap.u.cnode.bits));
- // create as DevFrame cap to avoid zeroing memory contents
- err = caps_create_new(ObjType_DevFrame, base_addr, block_size,
- block_size, my_core_id,
- caps_locate_slot(CNODE(st->modulecn),
- st->modulecn_slot++));
- assert(err_is_ok(err));
-
- // Advance by that chunk
- base_addr += ((genpaddr_t)1 << block_size);
- remain -= ((genpaddr_t)1 << block_size);
- }
+ assert(st->modulecn_slot < (1U << st->modulecn->cap.u.cnode.bits));
+ // create as DevFrame cap to avoid zeroing memory contents
+ err = caps_create_new(ObjType_DevFrame, base_addr, remain,
+ remain, my_core_id,
+ caps_locate_slot(CNODE(st->modulecn),
+ st->modulecn_slot++));
+ assert(err_is_ok(err));
// Copy multiboot module string to mmstrings area
strcpy((char *)mmstrings, MBADDR_ASSTRING(m->string));
caps_create_new(
ObjType_VNode_AARCH64_l1,
mem_to_local_phys((lvaddr_t)init_l1),
- vnode_objbits(ObjType_VNode_AARCH64_l1), 0,
+ vnode_objsize(ObjType_VNode_AARCH64_l1), 0,
my_core_id,
caps_locate_slot(CNODE(spawn_state.pagecn), pagecn_pagemap++)
);
//STARTUP_PROGRESS();
for(size_t i = 0; i < INIT_L2_SIZE; i++) {
- size_t objbits_vnode = vnode_objbits(ObjType_VNode_AARCH64_l2);
- assert(objbits_vnode == BASE_PAGE_BITS);
+ size_t objsize_vnode = vnode_objsize(ObjType_VNode_AARCH64_l2);
+ assert(objsize_vnode == BASE_PAGE_SIZE);
caps_create_new(
ObjType_VNode_AARCH64_l2,
- mem_to_local_phys((lvaddr_t)init_l2) + (i << objbits_vnode),
- objbits_vnode, 0,
- my_core_id,
+ mem_to_local_phys((lvaddr_t)init_l2) + (i << objsize_vnode),
+ objsize_vnode, 0, my_core_id,
caps_locate_slot(CNODE(spawn_state.pagecn), pagecn_pagemap++)
);
}
// Map L3 into successive slots in pagecn
for(size_t i = 0; i < INIT_L3_SIZE; i++) {
- size_t objbits_vnode = vnode_objbits(ObjType_VNode_AARCH64_l3);
- assert(objbits_vnode == BASE_PAGE_BITS);
- caps_create_new(
- ObjType_VNode_AARCH64_l3,
- mem_to_local_phys((lvaddr_t)init_l3) + (i << objbits_vnode),
- objbits_vnode, 0,
- my_core_id,
- caps_locate_slot(CNODE(spawn_state.pagecn), pagecn_pagemap++)
- );
- }
+ size_t objsize_vnode = vnode_objsize(ObjType_VNode_AARCH64_l2);
+ assert(objsize_vnode == BASE_PAGE_SIZE);
+ caps_create_new(
+ ObjType_VNode_AARCH64_l3,
+ mem_to_local_phys((lvaddr_t)init_l3) + (i << objsize_vnode),
+ objsize_vnode, 0,
+ my_core_id,
+ caps_locate_slot(CNODE(spawn_state.pagecn), pagecn_pagemap++)
+ );
+ }
/*
* Initialize init page tables - this just wires the L1
assert(err_is_ok(err));*/
struct cte *iocap = caps_locate_slot(CNODE(spawn_state.taskcn), TASKCN_SLOT_IO);
- errval_t err = caps_create_new(ObjType_DevFrame, 0x10000000, 28, 28, my_core_id, iocap);
+ errval_t err = caps_create_new(ObjType_DevFrame, 0x10000000, 1UL << 28,
+ 1UL << 28, my_core_id, iocap);
assert(err_is_ok(err));
struct dispatcher_shared_generic *disp
TASKCN_SLOT_MON_URPC);
// XXX: Create as devframe so the memory is not zeroed out
err = caps_create_new(ObjType_DevFrame, core_data->urpc_frame_base,
- core_data->urpc_frame_bits, core_data->urpc_frame_bits,
+ 1UL << core_data->urpc_frame_bits, 1UL << core_data->urpc_frame_bits,
my_core_id, urpc_frame_cte);
assert(err_is_ok(err));
urpc_frame_cte->cap.type = ObjType_Frame;
int argc
)
{
- assert(2 == argc);
+ assert(3 == argc);
+
+ struct registers_aarch64_syscall_args* sa = &context->syscall_args;
assert(to->type == ObjType_Frame || to->type == ObjType_DevFrame);
- assert((to->u.frame.base & BASE_PAGE_MASK) == 0);
- assert(to->u.frame.bits < BASE_PAGE_SIZE);
+ assert((get_address(to) & BASE_PAGE_MASK) == 0);
- return (struct sysret) {
- .error = SYS_ERR_OK,
- .value = to->u.frame.base | to->u.frame.bits,
- };
+ struct frame_identity *fi = (struct frame_identity *)sa->arg2;
+
+ if (!access_ok(ACCESS_WRITE, (lvaddr_t)fi, sizeof(struct frame_identity))) {
+ return SYSRET(SYS_ERR_INVALID_USER_BUFFER);
+ }
+
+ fi->base = get_address(to);
+ fi->bytes = get_size(to);
+
+ return SYSRET(SYS_ERR_OK);
}
static struct sysret
int argc
)
{
- assert(6 == argc);
+ assert(8 == argc);
struct registers_aarch64_syscall_args* sa = &context->syscall_args;
// Source capability cptr
capaddr_t source_cptr = sa->arg2;
- uintptr_t word = sa->arg3;
+ gensize_t offset = sa->arg3;
// Type to retype to
- enum objtype type = word >> 16;
+ uint64_t word = sa->arg4;
+ enum objtype type = word & 0xFFFF;
// Object bits for variable-sized types
- uint8_t objbits = (word >> 8) & 0xff;
+ gensize_t objsize = sa->arg5;
+ // number of new objects
+ size_t count = sa->arg6;
// Destination cnode cptr
- capaddr_t dest_cnode_cptr = sa->arg4;
+ capaddr_t dest_cnode_cptr = sa->arg7;
// Destination slot number
- capaddr_t dest_slot = sa->arg5;
+ capaddr_t dest_slot = (word >> 32) & 0xFFFFF;
// Valid bits in destination cnode cptr
- uint8_t dest_vbits = (word & 0xff);
+ uint8_t dest_vbits = (word >> 16) & 0xFF;
- return sys_retype(root, source_cptr, type, objbits, dest_cnode_cptr,
- dest_slot, dest_vbits, from_monitor);
+ return sys_retype(root, source_cptr, offset, type, objsize, count,
+ dest_cnode_cptr, dest_slot, dest_vbits,
+ from_monitor);
}
static struct sysret
INVOCATION_PRELUDE(8);
errval_t err;
+ /* lookup root cap for retype:
+ * sa->arg7 is (rootcap_addr | (rootcap_vbits << 32)) */
+ capaddr_t rootcap_addr = sa->arg7 & 0xFFFFFFFF;
+ uint8_t rootcap_vbits = (sa->arg7 >> 32) & 0xFF;
struct capability *root;
- err = caps_lookup_cap(&dcb_current->cspace.cap, sa->arg6,
- sa->arg7, &root, CAPRIGHTS_READ);
+ err = caps_lookup_cap(&dcb_current->cspace.cap, rootcap_addr,
+ rootcap_vbits, &root, CAPRIGHTS_READ);
if (err_is_fail(err)) {
return SYSRET(err_push(err, SYS_ERR_ROOT_CAP_LOOKUP));
}
- /* XXX: this hides the first argument which retype_common doesn't know
+ /* XXX: this hides the last argument which retype_common doesn't know
* about */
- return handle_retype_common(root, true, context, 6);
+ return handle_retype_common(root, true, context, 8);
}
INVOCATION_HANDLER(monitor_handle_has_descendants)
arch_registers_state_t *context,
int argc)
{
- return sys_handle_kcb_identify(to);
+ assert(3 == argc);
+
+ struct registers_aarch64_syscall_args* sa = &context->syscall_args;
+
+ return sys_handle_kcb_identify(to, (struct frame_identity *)sa->arg2);
}
typedef struct sysret (*invocation_t)(struct capability*,
int pagecn_pagemap = 0;
// Map PML4 (slot 0 in pagecn)
caps_create_new(ObjType_VNode_x86_64_pml4, mem_to_local_phys((lvaddr_t)init_pml4),
- BASE_PAGE_BITS, 0, my_core_id,
+ BASE_PAGE_SIZE, 0, my_core_id,
caps_locate_slot(CNODE(st->pagecn), pagecn_pagemap++));
// Map PDPT into successive slots in pagecn
for(size_t i = 0; i < INIT_PDPT_SIZE; i++) {
caps_create_new(ObjType_VNode_x86_64_pdpt,
mem_to_local_phys((lvaddr_t)init_pdpt) + i * BASE_PAGE_SIZE,
- BASE_PAGE_BITS, 0, my_core_id,
+ BASE_PAGE_SIZE, 0, my_core_id,
caps_locate_slot(CNODE(st->pagecn), pagecn_pagemap++));
}
// Map PDIR into successive slots in pagecn
for(size_t i = 0; i < INIT_PDIR_SIZE; i++) {
caps_create_new(ObjType_VNode_x86_64_pdir,
mem_to_local_phys((lvaddr_t)init_pdir) + i * BASE_PAGE_SIZE,
- BASE_PAGE_BITS, 0, my_core_id,
+ BASE_PAGE_SIZE, 0, my_core_id,
caps_locate_slot(CNODE(st->pagecn), pagecn_pagemap++));
}
// Map page tables into successive slots in pagecn
for(size_t i = 0; i < INIT_PTABLE_SIZE; i++) {
caps_create_new(ObjType_VNode_x86_64_ptable,
mem_to_local_phys((lvaddr_t)init_ptable) + i * BASE_PAGE_SIZE,
- BASE_PAGE_BITS, 0, my_core_id,
+ BASE_PAGE_SIZE, 0, my_core_id,
caps_locate_slot(CNODE(st->pagecn), pagecn_pagemap++));
}
// Connect all page tables to page directories.
// Map IO cap in task cnode
struct cte *iocap = caps_locate_slot(CNODE(st->taskcn), TASKCN_SLOT_SYSMEM);
err = caps_create_new(ObjType_DevFrame, XEON_PHI_SYSMEM_BASE,
- XEON_PHI_SYSMEM_SIZE_BITS, XEON_PHI_SYSMEM_SIZE_BITS,
+ 1UL << XEON_PHI_SYSMEM_SIZE_BITS,
+ 1UL << XEON_PHI_SYSMEM_SIZE_BITS,
my_core_id, iocap);
/*
* XXX: there is no IO on the xeon phi, we use this slot to put in the
*/
struct cte *mmiocap = caps_locate_slot(CNODE(st->taskcn), TASKCN_SLOT_IO);
err = caps_create_new(ObjType_DevFrame, XEON_PHI_SBOX_BASE,
- XEON_PHI_SBOX_SIZE_BITS, XEON_PHI_SBOX_SIZE_BITS,
+ 1UL << XEON_PHI_SBOX_SIZE_BITS,
+ 1UL << XEON_PHI_SBOX_SIZE_BITS,
my_core_id, mmiocap);
struct cte *coreboot = caps_locate_slot(CNODE(st->taskcn), TASKCN_SLOT_COREBOOT);
- err = caps_create_new(ObjType_DevFrame, 0, 16, 16, my_core_id, coreboot);
+ // XXX: make the 64k below a named constant
+ err = caps_create_new(ObjType_DevFrame, 0, 65536, 65536, my_core_id, coreboot);
assert(err_is_ok(err));
// use fact that cap is foreign to avoid zeroing it
assert(core_data->src_core_id != my_core_id);
err = caps_create_new(ObjType_Frame, core_data->urpc_frame_base,
- core_data->urpc_frame_bits,
- core_data->urpc_frame_bits, core_data->src_core_id,
+ 1UL << core_data->urpc_frame_bits,
+ 1UL << core_data->urpc_frame_bits, core_data->src_core_id,
urpc_frame_cte);
assert(err_is_ok(err));
lpaddr_t urpc_ptr = gen_phys_to_local_phys(urpc_frame_cte->cap.u.frame.base);
// create cap for strings area in first slot of modulecn
assert(st->modulecn_slot == 0);
- err = caps_create_new(ObjType_Frame, mmstrings_phys, BASE_PAGE_BITS,
- BASE_PAGE_BITS, my_core_id,
+ err = caps_create_new(ObjType_Frame, mmstrings_phys, BASE_PAGE_SIZE,
+ BASE_PAGE_SIZE, my_core_id,
caps_locate_slot(CNODE(st->modulecn),
st->modulecn_slot++));
assert(err_is_ok(err));
// round up to page size for caps
remain = ROUND_UP(remain, BASE_PAGE_SIZE);
-
- // Create max-sized caps to multiboot module in module cnode
- while (remain > 0) {
- assert((base_addr & BASE_PAGE_MASK) == 0);
- assert((remain & BASE_PAGE_MASK) == 0);
-
- // determine size of next chunk
- uint8_t block_size = bitaddralign(remain, base_addr);
-
- assert(st->modulecn_slot < (1UL << st->modulecn->cap.u.cnode.bits));
- // create as DevFrame cap to avoid zeroing memory contents
- err = caps_create_new(ObjType_DevFrame, base_addr, block_size,
- block_size, my_core_id,
- caps_locate_slot(CNODE(st->modulecn),
- st->modulecn_slot++));
- assert(err_is_ok(err));
-
- // Advance by that chunk
- base_addr += ((genpaddr_t)1 << block_size);
- remain -= ((genpaddr_t)1 << block_size);
- }
+ assert((base_addr & BASE_PAGE_MASK) == 0);
+ assert((remain & BASE_PAGE_MASK) == 0);
+
+ assert(st->modulecn_slot < (1U << st->modulecn->cap.u.cnode.bits));
+ // create as DevFrame cap to avoid zeroing memory contents
+ err = caps_create_new(ObjType_DevFrame, base_addr, remain,
+ remain, my_core_id,
+ caps_locate_slot(CNODE(st->modulecn),
+ st->modulecn_slot++));
+ assert(err_is_ok(err));
// Copy multiboot module string to mmstrings area
strcpy((char *)mmstrings, MBADDR_ASSTRING(m->string));
*/
caps_create_new(ObjType_VNode_ARM_l1,
mem_to_local_phys((lvaddr_t)init_l1),
- vnode_objbits(ObjType_VNode_ARM_l1), 0, my_core_id,
+ vnode_objsize(ObjType_VNode_ARM_l1), 0, my_core_id,
caps_locate_slot(CNODE(spawn_state.pagecn),
pagecn_pagemap++)
);
// Map L2 into successive slots in pagecn
size_t i;
for (i = 0; i < INIT_L2_BYTES / BASE_PAGE_SIZE; i++) {
- size_t objbits_vnode = vnode_objbits(ObjType_VNode_ARM_l2);
- assert(objbits_vnode == BASE_PAGE_BITS);
+ size_t objsize_vnode = vnode_objsize(ObjType_VNode_ARM_l2);
+ assert(objsize_vnode == BASE_PAGE_SIZE);
caps_create_new(
ObjType_VNode_ARM_l2,
- mem_to_local_phys((lvaddr_t)init_l2) + (i << objbits_vnode),
- objbits_vnode, 0, my_core_id,
+ mem_to_local_phys((lvaddr_t)init_l2) + i*objsize_vnode,
+ objsize_vnode, 0, my_core_id,
caps_locate_slot(CNODE(spawn_state.pagecn),
pagecn_pagemap++)
);
* should not be a problem.
*/
struct cte *iocap = caps_locate_slot(CNODE(spawn_state.taskcn), TASKCN_SLOT_IO);
- errval_t err = caps_create_new(ObjType_DevFrame, 0x40000000, 30, 30, my_core_id, iocap);
+ errval_t err = caps_create_new(ObjType_DevFrame, 0x40000000, 1UL << 30,
+ 1UL << 30, my_core_id, iocap);
assert(err_is_ok(err));
struct dispatcher_shared_generic *disp
// create cap for strings area in first slot of modulecn
assert(st->modulecn_slot == 0);
- err = caps_create_new(ObjType_Frame, mmstrings_phys, BASE_PAGE_BITS,
- BASE_PAGE_BITS, my_core_id,
+ err = caps_create_new(ObjType_Frame, mmstrings_phys, BASE_PAGE_SIZE,
+ BASE_PAGE_SIZE, my_core_id,
caps_locate_slot(CNODE(st->modulecn),
st->modulecn_slot++));
assert(err_is_ok(err));
remain = ROUND_UP(remain, BASE_PAGE_SIZE);
// Create max-sized caps to multiboot module in module cnode
- while (remain > 0) {
- assert((base_addr & BASE_PAGE_MASK) == 0);
- assert((remain & BASE_PAGE_MASK) == 0);
-
- // determine size of next chunk
- uint8_t block_size = bitaddralign(remain, base_addr);
-
- assert(st->modulecn_slot < (1UL << st->modulecn->cap.u.cnode.bits));
- // create as DevFrame cap to avoid zeroing memory contents
- err = caps_create_new(ObjType_DevFrame, base_addr, block_size,
- block_size, my_core_id,
- caps_locate_slot(CNODE(st->modulecn),
- st->modulecn_slot++));
- assert(err_is_ok(err));
-
- // Advance by that chunk
- base_addr += ((genpaddr_t)1 << block_size);
- remain -= ((genpaddr_t)1 << block_size);
- }
+ assert((base_addr & BASE_PAGE_MASK) == 0);
+ assert((remain & BASE_PAGE_MASK) == 0);
+
+
+ assert(st->modulecn_slot < (1UL << st->modulecn->cap.u.cnode.bits));
+ // create as DevFrame cap to avoid zeroing memory contents
+ err = caps_create_new(ObjType_DevFrame, base_addr, remain,
+ remain, my_core_id,
+ caps_locate_slot(CNODE(st->modulecn),
+ st->modulecn_slot++));
+ assert(err_is_ok(err));
// Copy multiboot module string to mmstrings area
strcpy((char *)mmstrings, MBADDR_ASSTRING(m->string));
genpaddr_t off = offset;
if (off + pte_count * X86_64_HUGE_PAGE_SIZE > get_size(src)) {
+ printk(LOG_NOTE, "frame offset invalid: %zx > 0x%"PRIxGENSIZE"\n",
+ off + pte_count * X86_64_BASE_PAGE_SIZE, get_size(src));
return SYS_ERR_FRAME_OFFSET_INVALID;
}
// Calculate page access protection flags /
genpaddr_t off = offset;
if (off + pte_count * X86_64_LARGE_PAGE_SIZE > get_size(src)) {
+ printk(LOG_NOTE, "frame offset invalid: %zx > 0x%"PRIxGENSIZE"\n",
+ off + pte_count * X86_64_BASE_PAGE_SIZE, get_size(src));
return SYS_ERR_FRAME_OFFSET_INVALID;
}
// Calculate page access protection flags /
// check offset within frame
genpaddr_t off = offset;
if (off + pte_count * X86_64_BASE_PAGE_SIZE > get_size(src)) {
- debug(SUBSYS_PAGING, "frame offset invalid\n");
+ debug(SUBSYS_PAGING, "frame offset invalid: %zx > 0x%"PRIxGENSIZE"\n",
+ off + pte_count * X86_64_BASE_PAGE_SIZE, get_size(src));
+ printk(LOG_NOTE, "frame offset invalid: %zx > 0x%"PRIxGENSIZE"\n",
+ off + pte_count * X86_64_BASE_PAGE_SIZE, get_size(src));
+ char buf[256];
+ sprint_cap(buf,256,src);
+ printk(LOG_NOTE, "src = %s\n", buf);
return SYS_ERR_FRAME_OFFSET_INVALID;
}
int pagecn_pagemap = 0;
// Map PML4 (slot 0 in pagecn)
caps_create_new(ObjType_VNode_x86_64_pml4, mem_to_local_phys((lvaddr_t)init_pml4),
- BASE_PAGE_BITS, 0, my_core_id,
+ BASE_PAGE_SIZE, 0, my_core_id,
caps_locate_slot(CNODE(st->pagecn), pagecn_pagemap++));
// Map PDPT into successive slots in pagecn
for(size_t i = 0; i < INIT_PDPT_SIZE; i++) {
caps_create_new(ObjType_VNode_x86_64_pdpt,
mem_to_local_phys((lvaddr_t)init_pdpt) + i * BASE_PAGE_SIZE,
- BASE_PAGE_BITS, 0, my_core_id,
+ BASE_PAGE_SIZE, 0, my_core_id,
caps_locate_slot(CNODE(st->pagecn), pagecn_pagemap++));
}
// Map PDIR into successive slots in pagecn
for(size_t i = 0; i < INIT_PDIR_SIZE; i++) {
caps_create_new(ObjType_VNode_x86_64_pdir,
mem_to_local_phys((lvaddr_t)init_pdir) + i * BASE_PAGE_SIZE,
- BASE_PAGE_BITS, 0, my_core_id,
+ BASE_PAGE_SIZE, 0, my_core_id,
caps_locate_slot(CNODE(st->pagecn), pagecn_pagemap++));
}
// Map page tables into successive slots in pagecn
for(size_t i = 0; i < INIT_PTABLE_SIZE; i++) {
caps_create_new(ObjType_VNode_x86_64_ptable,
mem_to_local_phys((lvaddr_t)init_ptable) + i * BASE_PAGE_SIZE,
- BASE_PAGE_BITS, 0, my_core_id,
+ BASE_PAGE_SIZE, 0, my_core_id,
caps_locate_slot(CNODE(st->pagecn), pagecn_pagemap++));
}
// Connect all page tables to page directories.
// use fact that cap is foreign to avoid zeroing it
assert(core_data->src_core_id != my_core_id);
err = caps_create_new(ObjType_Frame, core_data->urpc_frame_base,
- core_data->urpc_frame_bits,
- core_data->urpc_frame_bits, core_data->src_core_id,
+ 1UL << core_data->urpc_frame_bits,
+ 1UL << core_data->urpc_frame_bits, core_data->src_core_id,
urpc_frame_cte);
assert(err_is_ok(err));
lpaddr_t urpc_ptr = gen_phys_to_local_phys(urpc_frame_cte->cap.u.frame.base);
#include <barrelfish_kpi/lmp.h>
#include <barrelfish_kpi/dispatcher_shared_target.h>
#include <trace/trace.h>
+#include <useraccess.h>
#ifndef __k1om__
#include <vmkit.h>
#include <dev/amd_vmcb_dev.h>
bool from_monitor)
{
uint64_t source_cptr = args[0];
- uint64_t type = args[1];
- uint64_t objbits = args[2];
- uint64_t dest_cnode_cptr = args[3];
- uint64_t dest_slot = args[4];
- uint64_t dest_vbits = args[5];
+ uint64_t offset = args[1];
+ uint64_t type = args[2];
+ uint64_t objsize = args[3];
+ uint64_t objcount = args[4];
+ uint64_t dest_cnode_cptr = args[5];
+ uint64_t dest_slot = args[6];
+ uint64_t dest_vbits = args[7];
TRACE(KERNEL, SC_RETYPE, 0);
- struct sysret sr = sys_retype(root, source_cptr, type, objbits, dest_cnode_cptr,
- dest_slot, dest_vbits, from_monitor);
+ struct sysret sr = sys_retype(root, source_cptr, offset, type, objsize,
+ objcount, dest_cnode_cptr, dest_slot, dest_vbits,
+ from_monitor);
TRACE(KERNEL, SC_RETYPE, 1);
return sr;
}
static struct sysret handle_retype(struct capability *root,
int cmd, uintptr_t *args)
{
- return handle_retype_common(root, args, false);
+ return handle_retype_common(root, args, false);
}
static struct sysret handle_create(struct capability *root,
{
errval_t err;
- capaddr_t root_caddr = args[0];
- capaddr_t root_vbits = args[1];
+ capaddr_t root_caddr = args[0] & 0xFFFFFFFF;
+ capaddr_t root_vbits = (args[0] >> 32);
struct capability *root;
err = caps_lookup_cap(&dcb_current->cspace.cap, root_caddr, root_vbits,
return SYSRET(err_push(err, SYS_ERR_ROOT_CAP_LOOKUP));
}
- /* XXX: this hides the first two arguments */
- return handle_retype_common(root, &args[2], true);
+ /* This hides the first argument, which is resolved here and passed as 'root' */
+ return handle_retype_common(root, &args[1], true);
}
static struct sysret monitor_handle_has_descendants(struct capability *kernel_cap,
{
// Return with physical base address of frame
// XXX: pack size into bottom bits of base address
- assert(to->type == ObjType_Frame || to->type == ObjType_DevFrame);
- assert((to->u.frame.base & BASE_PAGE_MASK) == 0);
- return (struct sysret) {
- .error = SYS_ERR_OK,
- .value = to->u.frame.base | to->u.frame.bits,
- };
+ assert(to->type == ObjType_Frame || to->type == ObjType_DevFrame ||
+ to->type == ObjType_RAM);
+ assert((get_address(to) & BASE_PAGE_MASK) == 0);
+
+ struct frame_identity *fi = (struct frame_identity *)args[0];
+
+ if (!access_ok(ACCESS_WRITE, (lvaddr_t)fi, sizeof(struct frame_identity))) {
+ return SYSRET(SYS_ERR_INVALID_USER_BUFFER);
+ }
+
+ fi->base = get_address(to);
+ fi->bytes = get_size(to);
+
+ return SYSRET(SYS_ERR_OK);
}
static struct sysret handle_vnode_identify(struct capability *to,
return SYSRET(err);
}
if (vmcb_cte->cap.type != ObjType_Frame ||
- vmcb_cte->cap.u.frame.bits < BASE_PAGE_BITS) {
+ vmcb_cte->cap.u.frame.bytes < BASE_PAGE_SIZE) {
return SYSRET(SYS_ERR_VMKIT_VMCB_INVALID);
}
err = caps_copy_to_cte(&dcb->guest_desc.vmcb, vmcb_cte, false, 0, 0);
return SYSRET(err);
}
if (ctrl_cte->cap.type != ObjType_Frame ||
- ctrl_cte->cap.u.frame.bits < BASE_PAGE_BITS) {
+ ctrl_cte->cap.u.frame.bytes < BASE_PAGE_SIZE) {
return SYSRET(SYS_ERR_VMKIT_CTRL_INVALID);
}
err = caps_copy_to_cte(&dcb->guest_desc.ctrl, ctrl_cte, false, 0, 0);
static struct sysret handle_kcb_identify(struct capability *to,
int cmd, uintptr_t *args)
{
- return sys_handle_kcb_identify(to);
+ return sys_handle_kcb_identify(to, (struct frame_identity *)args[0]);
}
[ObjType_KernelControlBlock] = {
[FrameCmd_Identify] = handle_kcb_identify,
},
+ [ObjType_RAM] = {
+ [RAMCmd_Identify] = handle_frame_identify,
+ },
[ObjType_Frame] = {
[FrameCmd_Identify] = handle_frame_identify,
},
return SYS_ERR_SLOT_IN_USE;
}
- struct RAM ram = { .bits = 0 };
+ struct RAM ram = { .bytes = 0 };
size_t len = sizeof(struct RAM) / sizeof(uintptr_t) + 1;
if (!has_descendants(cte) && !has_ancestors(cte)) {
switch(cap->type) {
case ObjType_RAM:
ram.base = cap->u.ram.base;
- ram.bits = cap->u.ram.bits;
+ ram.bytes = cap->u.ram.bytes;
break;
case ObjType_Frame:
ram.base = cap->u.frame.base;
- ram.bits = cap->u.frame.bits;
+ ram.bytes = cap->u.frame.bytes;
break;
case ObjType_CNode:
ram.base = cap->u.cnode.cnode;
- ram.bits = cap->u.cnode.bits + OBJBITS_CTE;
+ ram.bytes = 1UL << (cap->u.cnode.bits + OBJBITS_CTE);
break;
case ObjType_Dispatcher:
// Convert to genpaddr
ram.base = local_phys_to_gen_phys(mem_to_local_phys((lvaddr_t)cap->u.dispatcher.dcb));
- ram.bits = OBJBITS_DISPATCHER;
+ ram.bytes = 1UL << OBJBITS_DISPATCHER;
break;
default:
// Handle VNodes here
if(type_is_vnode(cap->type)) {
ram.base = get_address(cap);
- ram.bits = vnode_objbits(cap->type);
+ ram.bytes = vnode_objsize(cap->type);
}
break;
}
// have cap to return to monitor but no allocated slot and no room in
// monitor channel; have user retry over monitor rpc interface
- if (ram.bits > 0 &&
+ if (ram.bytes > 0 &&
!ret_ram_cap &&
monitor_ep.type == ObjType_EndPoint &&
err_is_fail(lmp_can_deliver_payload(&monitor_ep, len)))
return err;
}
- if(ram.bits > 0) {
+ if(ram.bytes > 0) {
// Send back as RAM cap to monitor
if (ret_ram_cap) {
if (dcb_current != monitor_ep.u.endpoint.listener) {
len, false);
}
else {
- printk(LOG_WARN, "dropping ram cap base %08"PRIxGENPADDR" bits %"PRIu8"\n", ram.base, ram.bits);
+ printk(LOG_WARN, "dropping ram cap base %08"PRIxGENPADDR" bytes 0x%"PRIxGENSIZE"\n", ram.base, ram.bytes);
}
if (err_no(err) == SYS_ERR_LMP_BUF_OVERFLOW) {
- printk(LOG_WARN, "dropped ram cap base %08"PRIxGENPADDR" bits %"PRIu8"\n", ram.base, ram.bits);
+ printk(LOG_WARN, "dropped ram cap base %08"PRIxGENPADDR" bytes 0x%"PRIxGENSIZE"\n", ram.base, ram.bytes);
err = SYS_ERR_OK;
} else {
switch (cap->type) {
case ObjType_PhysAddr:
return snprintf(buf, len,
- "physical address range cap (0x%" PRIxGENPADDR ":%u)",
- cap->u.physaddr.base, cap->u.physaddr.bits);
+ "physical address range cap (0x%" PRIxGENPADDR ":0x%zx)",
+ cap->u.physaddr.base, cap->u.physaddr.bytes);
case ObjType_RAM:
- return snprintf(buf, len, "RAM cap (0x%" PRIxGENPADDR ":%u)",
- cap->u.ram.base, cap->u.ram.bits);
+ return snprintf(buf, len, "RAM cap (0x%" PRIxGENPADDR ":0x%zx)",
+ cap->u.ram.base, cap->u.ram.bytes);
case ObjType_CNode: {
int ret = snprintf(buf, len, "CNode cap "
return snprintf(buf, len, "Dispatcher cap %p", cap->u.dispatcher.dcb);
case ObjType_Frame:
- return snprintf(buf, len, "Frame cap (0x%" PRIxGENPADDR ":%u)",
- cap->u.frame.base, cap->u.frame.bits);
+ return snprintf(buf, len, "Frame cap (0x%" PRIxGENPADDR ":0x%zx)",
+ cap->u.frame.base, cap->u.frame.bytes);
case ObjType_DevFrame:
- return snprintf(buf, len, "Device Frame cap (0x%" PRIxGENPADDR ":%u)",
- cap->u.frame.base, cap->u.devframe.bits);
+ return snprintf(buf, len, "Device Frame cap (0x%" PRIxGENPADDR ":0x%zx)",
+ cap->u.frame.base, cap->u.devframe.bytes);
case ObjType_VNode_ARM_l1:
return snprintf(buf, len, "ARM L1 table at 0x%" PRIxGENPADDR,
assert(src != NULL);
assert(dest != NULL);
+ debug(SUBSYS_CAPS, "Copying cap from %#"PRIxLPADDR" to %#"PRIxLPADDR"\n",
+ mem_to_local_phys((lvaddr_t)cte_for_cap(src)),
+ mem_to_local_phys((lvaddr_t)cte_for_cap(dest)));
+
// Reserved object bits must always be greater/equal to actual object size
assert((1UL << OBJBITS_CTE) >= sizeof(struct cte));
* to caps_create().
*
* \param type Type of objects to create.
- * \param bits Size of memory area as 2^bits.
- * \param objbits For variable-sized objects, size multiplier as 2^bits.
+ * \param srcsize Size of memory area in bytes
+ * \param objsize For variable-sized objects, size multiplier
*
* \return Number of objects to be created, or zero on error
*/
// If you create more capability types you need to deal with them
// in the table below.
STATIC_ASSERT(46 == ObjType_Num, "Knowledge of all cap types");
-
-static size_t caps_numobjs(enum objtype type, uint8_t bits, uint8_t objbits)
+static size_t caps_max_numobjs(enum objtype type, gensize_t srcsize, gensize_t objsize)
{
switch(type) {
case ObjType_PhysAddr:
case ObjType_RAM:
case ObjType_Frame:
case ObjType_DevFrame:
- if (objbits > bits) {
+ if (objsize > srcsize) {
return 0;
} else {
- return 1UL << (bits - objbits);
+ return srcsize / objsize;
}
case ObjType_CNode:
- if (bits < OBJBITS_CTE || objbits > bits - OBJBITS_CTE) {
+ if (srcsize < sizeof(struct cte) || objsize > (srcsize / sizeof(struct cte))) {
return 0;
} else {
- return 1UL << (bits - OBJBITS_CTE - objbits);
+ return srcsize / objsize / (1UL << OBJBITS_CTE);
}
case ObjType_VNode_x86_64_pml4:
case ObjType_VNode_AARCH64_l2:
case ObjType_VNode_AARCH64_l3:
{
- size_t objbits_vnode = vnode_objbits(type);
- if (bits < objbits_vnode) {
+ if (srcsize < vnode_objsize(type)) {
return 0;
} else {
- return 1UL << (bits - objbits_vnode);
+ return srcsize / vnode_objsize(type);
}
}
case ObjType_Dispatcher:
- if (bits < OBJBITS_DISPATCHER) {
+ if (srcsize < 1UL << OBJBITS_DISPATCHER) {
return 0;
} else {
- return 1UL << (bits - OBJBITS_DISPATCHER);
+ return srcsize / (1UL << OBJBITS_DISPATCHER);
}
case ObjType_KernelControlBlock:
- if (bits < OBJBITS_KCB) {
+ if (srcsize < 1UL << OBJBITS_KCB) {
return 0;
} else {
- return 1UL << (bits - OBJBITS_KCB);
+ return srcsize / (1UL << OBJBITS_KCB);
}
case ObjType_Kernel:
*/
STATIC_ASSERT(46 == ObjType_Num, "Knowledge of all cap types");
-static errval_t caps_init_objects(enum objtype type, lpaddr_t lpaddr, uint8_t
- bits, uint8_t objbits, size_t numobjs)
+static errval_t caps_zero_objects(enum objtype type, lpaddr_t lpaddr,
+ gensize_t objsize, size_t count)
{
+ assert(type < ObjType_Num);
+
// Virtual address of the memory the kernel object resides in
// XXX: A better of doing this,
// this is creating caps that the kernel cannot address.
switch (type) {
case ObjType_Frame:
- trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 1);
- // XXX: SCC hack, while we don't have a devframe allocator
- if(lpaddr + ((lpaddr_t)1 << bits) < PADDR_SPACE_LIMIT) {
- memset((void*)lvaddr, 0, (lvaddr_t)1 << bits);
- } else {
- printk(LOG_WARN, "Allocating RAM at 0x%" PRIxLPADDR
- " uninitialized\n", lpaddr);
- }
- trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 0);
+ debug(SUBSYS_CAPS, "Frame: zeroing %zu bytes @%#"PRIxLPADDR"\n",
+ (size_t)objsize * count, lpaddr);
+ TRACE(KERNEL, BZERO, 1);
+ memset((void*)lvaddr, 0, objsize * count);
+ TRACE(KERNEL, BZERO, 0);
break;
case ObjType_CNode:
+ // scale objsize by size of slot for CNodes; objsize for CNodes given
+ // in slots.
+ objsize *= sizeof(struct cte);
+ debug(SUBSYS_CAPS, "CNode: zeroing %zu bytes @%#"PRIxLPADDR"\n",
+ (size_t)objsize * count, lpaddr);
+ TRACE(KERNEL, BZERO, 1);
+ memset((void*)lvaddr, 0, objsize * count);
+ TRACE(KERNEL, BZERO, 0);
+ break;
+
case ObjType_VNode_ARM_l1:
case ObjType_VNode_ARM_l2:
case ObjType_VNode_AARCH64_l1:
case ObjType_VNode_x86_64_pdir:
case ObjType_VNode_x86_64_pdpt:
case ObjType_VNode_x86_64_pml4:
+ // objsize is size of VNode; but not given as such
+ objsize = vnode_objsize(type);
+ debug(SUBSYS_CAPS, "VNode: zeroing %zu bytes @%#"PRIxLPADDR"\n",
+ (size_t)objsize * count, lpaddr);
+ TRACE(KERNEL, BZERO, 1);
+ memset((void*)lvaddr, 0, objsize * count);
+ TRACE(KERNEL, BZERO, 0);
+ break;
+
case ObjType_Dispatcher:
+ debug(SUBSYS_CAPS, "Dispatcher: zeroing %zu bytes @%#"PRIxLPADDR"\n",
+ ((size_t)1 << OBJBITS_DISPATCHER) * count, lpaddr);
+ TRACE(KERNEL, BZERO, 1);
+ memset((void*)lvaddr, 0, (1UL << OBJBITS_DISPATCHER) * count);
+ TRACE(KERNEL, BZERO, 0);
+ break;
+
case ObjType_KernelControlBlock:
+ debug(SUBSYS_CAPS, "KCB: zeroing %zu bytes @%#"PRIxLPADDR"\n",
+ ((size_t)1 << OBJBITS_KCB) * count, lpaddr);
TRACE(KERNEL, BZERO, 1);
- memset((void*)lvaddr, 0, 1UL << bits);
+ memset((void*)lvaddr, 0, (1UL << OBJBITS_KCB) * count);
TRACE(KERNEL, BZERO, 0);
break;
default:
+ debug(SUBSYS_CAPS, "Not zeroing %zu bytes @%#"PRIxLPADDR" for type %d\n",
+ (size_t)objsize * count, lpaddr, (int)type);
break;
}
/**
* \brief Create capabilities to kernel objects.
*
- * This function creates kernel objects of 'type' into the memory
- * area, based at 'addr' and of size 2^'bits', so they completely fill the
- * area. For each created kernel object, a capability is created to it and
- * put consecutively into the array of CTEs pointed to by 'caps'. The array
- * needs to have the appropriate size to hold all created caps. Some kernel
- * objects can have a variable size. In that case, 'objbits' should be non-zero
- * and give the a size multiplier as 2^'objbits'.
+ * This function creates 'count' kernel objects of 'type' into the memory
+ * area, based at 'addr' and of size 'objsize'. For each created kernel
+ * object, a capability is created to it and put consecutively into the array
+ * of CTEs pointed to by 'caps'. The array needs to have the appropriate size
+ * to hold all created caps. Some kernel objects can have a variable size. In
+ * that case, 'objsize' should be non-zero. and give the size multiplier. *
*
* \param type Type of objects to create.
- * \param addr Base address in the local address space.
- * \param bits Size of memory area as 2^bits.
- * \param objbits For variable-sized objects, size multiplier as 2^bits.
- * \param numobjs Number of objects to be created, from caps_numobjs()
+ * \param lpaddr Base address in the local address space.
+ * \param size Size of memory area as bytes.
+ * \param objsize For variable-sized objects, size in bytes.
+ * \param count Number of objects to be created
+ * (count <= caps_max_numobjs(type, size, objsize))
* \param dest_caps Pointer to array of CTEs to hold created caps.
*
* \return Error code
// in the table below.
STATIC_ASSERT(46 == ObjType_Num, "Knowledge of all cap types");
-static errval_t caps_create(enum objtype type, lpaddr_t lpaddr, uint8_t bits,
- uint8_t objbits, size_t numobjs, coreid_t owner,
+static errval_t caps_create(enum objtype type, lpaddr_t lpaddr, gensize_t size,
+ gensize_t objsize, size_t count, coreid_t owner,
struct cte *dest_caps)
{
errval_t err;
assert(dest_caps != NULL);
assert(type != ObjType_Null);
assert(type < ObjType_Num);
- assert(numobjs > 0);
+ assert(count > 0);
+ // objsize is 0 for non-sized types (e.g. VNodes)
+ // TODO cleanup semantics for type == CNode
+ //assert(objsize % BASE_PAGE_SIZE == 0);
assert(!type_is_mapping(type));
genpaddr_t genpaddr = local_phys_to_gen_phys(lpaddr);
+ debug(SUBSYS_CAPS, "creating caps for %#"PRIxGENPADDR
+ ", %zu bytes, objsize=%"PRIuGENSIZE
+ ", count=%zu, owner=%d, type=%d\n",
+ genpaddr, size, objsize, count, (int)owner, (int)type);
+
// Virtual address of the memory the kernel object resides in
// XXX: A better of doing this,
// this is creating caps that the kernel cannot address.
}
/* Initialize the created capability */
- struct capability src_cap;
- memset(&src_cap, 0, sizeof(struct capability));
- src_cap.type = type;
+ struct capability temp_cap;
+ memset(&temp_cap, 0, sizeof(struct capability));
+ temp_cap.type = type;
// XXX: Handle rights!
- src_cap.rights = CAPRIGHTS_ALLRIGHTS;
+ temp_cap.rights = CAPRIGHTS_ALLRIGHTS;
+ debug(SUBSYS_CAPS, "owner = %d, my_core_id = %d\n", owner, my_core_id);
if (owner == my_core_id) {
- // If we're creating new local objects, they need to be initialized
- err = caps_init_objects(type, lpaddr, bits, objbits, numobjs);
+ // If we're creating new local objects, they need to be cleared
+ err = caps_zero_objects(type, lpaddr, objsize, count);
if (err_is_fail(err)) {
return err;
}
/* Set the type specific fields and insert into #dest_caps */
switch(type) {
case ObjType_Frame:
- TRACE(KERNEL, BZERO, 1);
- // XXX: SCC hack, while we don't have a devframe allocator
- if(lpaddr + ((lpaddr_t)1 << bits) < PADDR_SPACE_LIMIT) {
- memset((void*)lvaddr, 0, (lvaddr_t)1 << bits);
- } else {
- printk(LOG_WARN, "Allocating RAM at 0x%" PRIxLPADDR
- " uninitialized\n", lpaddr);
- }
- TRACE(KERNEL, BZERO, 0);
- for(dest_i = 0; dest_i < numobjs; dest_i++) {
+ for(dest_i = 0; dest_i < count; dest_i++) {
// Initialize type specific fields
- src_cap.u.frame.base = genpaddr + dest_i * ((genpaddr_t)1 << objbits);
- src_cap.u.frame.bits = objbits;
- // Insert the capabilities
- err = set_cap(&dest_caps[dest_i].cap, &src_cap);
+ temp_cap.u.frame.base = genpaddr + dest_i * objsize;
+ temp_cap.u.frame.bytes = objsize;
+ assert((get_size(&temp_cap) & BASE_PAGE_MASK) == 0);
+ // Insert the capability
+ err = set_cap(&dest_caps[dest_i].cap, &temp_cap);
if (err_is_fail(err)) {
break;
}
break;
case ObjType_PhysAddr:
- for(dest_i = 0; dest_i < numobjs; dest_i++) {
+ for(dest_i = 0; dest_i < count; dest_i++) {
// Initialize type specific fields
- src_cap.u.physaddr.base = genpaddr + dest_i * ((genpaddr_t)1 << objbits);
- src_cap.u.physaddr.bits = objbits;
- // Insert the capabilities
- err = set_cap(&dest_caps[dest_i].cap, &src_cap);
+ temp_cap.u.physaddr.base = genpaddr + dest_i * objsize;
+ temp_cap.u.physaddr.bytes = objsize;
+ // Insert the capability
+ err = set_cap(&dest_caps[dest_i].cap, &temp_cap);
if (err_is_fail(err)) {
break;
}
break;
case ObjType_RAM:
- for(dest_i = 0; dest_i < numobjs; dest_i++) {
+ for(dest_i = 0; dest_i < count; dest_i++) {
// Initialize type specific fields
- src_cap.u.ram.base = genpaddr + dest_i * ((genpaddr_t)1 << objbits);
- src_cap.u.ram.bits = objbits;
+ temp_cap.u.ram.base = genpaddr + dest_i * objsize;
+ temp_cap.u.ram.bytes = objsize;
// Insert the capabilities
- err = set_cap(&dest_caps[dest_i].cap, &src_cap);
+ err = set_cap(&dest_caps[dest_i].cap, &temp_cap);
if (err_is_fail(err)) {
break;
}
break;
case ObjType_DevFrame:
- for(dest_i = 0; dest_i < numobjs; dest_i++) {
+ for(dest_i = 0; dest_i < count; dest_i++) {
// Initialize type specific fields
- src_cap.u.devframe.base = genpaddr + dest_i * ((genpaddr_t)1 << objbits);
- src_cap.u.devframe.bits = objbits;
+ temp_cap.u.devframe.base = genpaddr + dest_i * objsize;
+ temp_cap.u.devframe.bytes = objsize;
// Insert the capabilities
- err = set_cap(&dest_caps[dest_i].cap, &src_cap);
+ err = set_cap(&dest_caps[dest_i].cap, &temp_cap);
if (err_is_fail(err)) {
break;
}
case ObjType_CNode:
assert((1UL << OBJBITS_CTE) >= sizeof(struct cte));
- TRACE(KERNEL, BZERO, 1);
- memset((void*)lvaddr, 0, 1UL << bits);
- TRACE(KERNEL, BZERO, 0);
+ // TODO: make CNodes not be power-of-two sized
+ // (deferred to new CSpace layout)
+ assert((1UL << log2cl(objsize)) == objsize);
- for(dest_i = 0; dest_i < numobjs; dest_i++) {
+ for(dest_i = 0; dest_i < count; dest_i++) {
// Initialize type specific fields
- src_cap.u.cnode.cnode =
- lpaddr + dest_i * ((lpaddr_t)1 << (objbits + OBJBITS_CTE));
- src_cap.u.cnode.bits = objbits;
- src_cap.u.cnode.guard = 0;
- src_cap.u.cnode.guard_size = 0;
+ temp_cap.u.cnode.cnode =
+ lpaddr + dest_i * sizeof(struct cte) * objsize;
+ temp_cap.u.cnode.bits = log2cl(objsize);
+ temp_cap.u.cnode.guard = 0;
+ temp_cap.u.cnode.guard_size = 0;
// XXX: Handle rights!
- src_cap.u.cnode.rightsmask = CAPRIGHTS_ALLRIGHTS;
+ temp_cap.u.cnode.rightsmask = CAPRIGHTS_ALLRIGHTS;
// Insert the capability
- err = set_cap(&dest_caps[dest_i].cap, &src_cap);
+ err = set_cap(&dest_caps[dest_i].cap, &temp_cap);
if (err_is_fail(err)) {
break;
}
case ObjType_VNode_ARM_l1:
{
- size_t objbits_vnode = vnode_objbits(type);
+ size_t objsize_vnode = vnode_objsize(type);
- TRACE(KERNEL, BZERO, 1);
- memset((void*)lvaddr, 0, 1UL << bits);
- TRACE(KERNEL, BZERO, 0);
-
- for(dest_i = 0; dest_i < numobjs; dest_i++) {
+ for(dest_i = 0; dest_i < count; dest_i++) {
// Initialize type specific fields
- src_cap.u.vnode_arm_l1.base =
- genpaddr + dest_i * ((genpaddr_t)1 << objbits_vnode);
+ temp_cap.u.vnode_arm_l1.base =
+ genpaddr + dest_i * objsize_vnode;
#ifdef __arm__
// Insert kernel/mem mappings into new table.
paging_make_good(
gen_phys_to_local_phys(
- local_phys_to_mem(src_cap.u.vnode_arm_l1.base)
+ local_phys_to_mem(temp_cap.u.vnode_arm_l1.base)
),
- 1u << objbits_vnode
+ objsize_vnode
);
#endif
// Insert the capability
- err = set_cap(&dest_caps[dest_i].cap, &src_cap);
+ err = set_cap(&dest_caps[dest_i].cap, &temp_cap);
if (err_is_fail(err)) {
break;
}
case ObjType_VNode_ARM_l2:
{
- size_t objbits_vnode = vnode_objbits(type);
+ size_t objsize_vnode = vnode_objsize(type);
- TRACE(KERNEL, BZERO, 1);
- memset((void*)lvaddr, 0, 1UL << bits);
- TRACE(KERNEL, BZERO, 0);
-
- for(dest_i = 0; dest_i < numobjs; dest_i++) {
+ for(dest_i = 0; dest_i < count; dest_i++) {
// Initialize type specific fields
- src_cap.u.vnode_arm_l2.base =
- genpaddr + dest_i * ((genpaddr_t)1 << objbits_vnode);
+ temp_cap.u.vnode_arm_l2.base =
+ genpaddr + dest_i * objsize_vnode;
// Insert the capability
- err = set_cap(&dest_caps[dest_i].cap, &src_cap);
+ err = set_cap(&dest_caps[dest_i].cap, &temp_cap);
if (err_is_fail(err)) {
break;
}
case ObjType_VNode_AARCH64_l1:
{
- size_t objbits_vnode = vnode_objbits(type);
+ size_t objsize_vnode = vnode_objsize(type);
- TRACE(KERNEL, BZERO, 1);
- memset((void*)lvaddr, 0, 1UL << bits);
- TRACE(KERNEL, BZERO, 0);
-
- for(dest_i = 0; dest_i < numobjs; dest_i++) {
+ for(dest_i = 0; dest_i < count; dest_i++) {
// Initialize type specific fields
- src_cap.u.vnode_aarch64_l1.base =
- genpaddr + dest_i * ((genpaddr_t)1 << objbits_vnode);
+ temp_cap.u.vnode_aarch64_l1.base =
+ genpaddr + dest_i * objsize_vnode;
#ifdef __aarch64__
// Insert kernel/mem mappings into new table.
- lpaddr_t var = gen_phys_to_local_phys(src_cap.u.vnode_aarch64_l1.base);
+ lpaddr_t var = gen_phys_to_local_phys(temp_cap.u.vnode_aarch64_l1.base);
paging_make_good(var);
#endif
// Insert the capability
- err = set_cap(&dest_caps[dest_i].cap, &src_cap);
+ err = set_cap(&dest_caps[dest_i].cap, &temp_cap);
if (err_is_fail(err)) {
break;
}
case ObjType_VNode_AARCH64_l2:
{
- size_t objbits_vnode = vnode_objbits(type);
+ size_t objsize_vnode = vnode_objsize(type);
- TRACE(KERNEL, BZERO, 1);
- memset((void*)lvaddr, 0, 1UL << bits);
- TRACE(KERNEL, BZERO, 0);
-
- for(dest_i = 0; dest_i < numobjs; dest_i++) {
+ for(dest_i = 0; dest_i < count; dest_i++) {
// Initialize type specific fields
- src_cap.u.vnode_aarch64_l2.base =
- genpaddr + dest_i * ((genpaddr_t)1 << objbits_vnode);
+ temp_cap.u.vnode_aarch64_l2.base =
+ genpaddr + dest_i * objsize_vnode;
// Insert the capability
- err = set_cap(&dest_caps[dest_i].cap, &src_cap);
+ err = set_cap(&dest_caps[dest_i].cap, &temp_cap);
if (err_is_fail(err)) {
break;
case ObjType_VNode_AARCH64_l3:
{
- size_t objbits_vnode = vnode_objbits(type);
-
- TRACE(KERNEL, BZERO, 1);
- memset((void*)lvaddr, 0, 1UL << bits);
- TRACE(KERNEL, BZERO, 0);
+ size_t objsize_vnode = vnode_objsize(type);
- for(dest_i = 0; dest_i < numobjs; dest_i++) {
+ for(dest_i = 0; dest_i < count; dest_i++) {
// Initialize type specific fields
- src_cap.u.vnode_aarch64_l3.base =
- genpaddr + dest_i * ((genpaddr_t)1 << objbits_vnode);
+ temp_cap.u.vnode_aarch64_l3.base =
+ genpaddr + dest_i * objsize_vnode;
// Insert the capability
- err = set_cap(&dest_caps[dest_i].cap, &src_cap);
+ err = set_cap(&dest_caps[dest_i].cap, &temp_cap);
if (err_is_fail(err)) {
break;
}
case ObjType_VNode_x86_32_ptable:
{
- size_t objbits_vnode = vnode_objbits(type);
+ size_t objsize_vnode = vnode_objsize(type);
- TRACE(KERNEL, BZERO, 1);
- memset((void*)lvaddr, 0, 1UL << bits);
- TRACE(KERNEL, BZERO, 0);
-
- for(dest_i = 0; dest_i < numobjs; dest_i++) {
+ for(dest_i = 0; dest_i < count; dest_i++) {
// Initialize type specific fields
- src_cap.u.vnode_x86_32_ptable.base =
- genpaddr + dest_i * ((genpaddr_t)1 << objbits_vnode);
+ temp_cap.u.vnode_x86_32_ptable.base =
+ genpaddr + dest_i * objsize_vnode;
// Insert the capability
- err = set_cap(&dest_caps[dest_i].cap, &src_cap);
+ err = set_cap(&dest_caps[dest_i].cap, &temp_cap);
if (err_is_fail(err)) {
break;
}
case ObjType_VNode_x86_32_pdir:
{
- size_t objbits_vnode = vnode_objbits(type);
+ size_t objsize_vnode = vnode_objsize(type);
- TRACE(KERNEL, BZERO, 1);
- memset((void*)lvaddr, 0, 1UL << bits);
- TRACE(KERNEL, BZERO, 0);
-
- for(dest_i = 0; dest_i < numobjs; dest_i++) {
+ for(dest_i = 0; dest_i < count; dest_i++) {
// Initialize type specific fields
- src_cap.u.vnode_x86_32_pdir.base =
- genpaddr + dest_i * ((genpaddr_t)1 << objbits_vnode);
+ temp_cap.u.vnode_x86_32_pdir.base =
+ genpaddr + dest_i * objsize_vnode;
#if defined(__i386__) && !defined(CONFIG_PAE)
// Make it a good PDE by inserting kernel/mem VSpaces
- lpaddr = gen_phys_to_local_phys(src_cap.u.vnode_x86_32_pdir.base);
+ lpaddr = gen_phys_to_local_phys(temp_cap.u.vnode_x86_32_pdir.base);
paging_x86_32_make_good_pdir(lpaddr);
#endif
// Insert the capability
- err = set_cap(&dest_caps[dest_i].cap, &src_cap);
+ err = set_cap(&dest_caps[dest_i].cap, &temp_cap);
if (err_is_fail(err)) {
break;
}
case ObjType_VNode_x86_32_pdpt:
{
- size_t objbits_vnode = vnode_objbits(type);
-
- TRACE(KERNEL, BZERO, 1);
- memset((void*)lvaddr, 0, 1UL << bits);
- TRACE(KERNEL, BZERO, 0);
+ size_t objsize_vnode = vnode_objsize(type);
- for(dest_i = 0; dest_i < numobjs; dest_i++) {
+ for(dest_i = 0; dest_i < count; dest_i++) {
// Initialize type specific fields
- src_cap.u.vnode_x86_32_pdir.base =
- genpaddr + dest_i * ((genpaddr_t)1 << objbits_vnode);
+ temp_cap.u.vnode_x86_32_pdir.base =
+ genpaddr + dest_i * objsize_vnode;
#if defined(__i386__) && defined(CONFIG_PAE)
// Make it a good PDPTE by inserting kernel/mem VSpaces
lpaddr_t var =
- gen_phys_to_local_phys(src_cap.u.vnode_x86_32_pdpt.base);
+ gen_phys_to_local_phys(temp_cap.u.vnode_x86_32_pdpt.base);
paging_x86_32_make_good_pdpte(var);
#endif
// Insert the capability
- err = set_cap(&dest_caps[dest_i].cap, &src_cap);
+ err = set_cap(&dest_caps[dest_i].cap, &temp_cap);
if (err_is_fail(err)) {
break;
}
case ObjType_VNode_x86_64_ptable:
{
- size_t objbits_vnode = vnode_objbits(type);
-
- TRACE(KERNEL, BZERO, 1);
- memset((void*)lvaddr, 0, 1UL << bits);
- TRACE(KERNEL, BZERO, 0);
+ size_t objsize_vnode = vnode_objsize(type);
- for(dest_i = 0; dest_i < numobjs; dest_i++) {
+ for(dest_i = 0; dest_i < count; dest_i++) {
// Initialize type specific fields
- src_cap.u.vnode_x86_64_ptable.base =
- genpaddr + dest_i * ((genpaddr_t)1 << objbits_vnode);
+ temp_cap.u.vnode_x86_64_ptable.base =
+ genpaddr + dest_i * objsize_vnode;
// Insert the capability
- err = set_cap(&dest_caps[dest_i].cap, &src_cap);
+ err = set_cap(&dest_caps[dest_i].cap, &temp_cap);
if (err_is_fail(err)) {
break;
}
case ObjType_VNode_x86_64_pdir:
{
- size_t objbits_vnode = vnode_objbits(type);
+ size_t objsize_vnode = vnode_objsize(type);
- TRACE(KERNEL, BZERO, 1);
- memset((void*)lvaddr, 0, 1UL << bits);
- TRACE(KERNEL, BZERO, 0);
-
- for(dest_i = 0; dest_i < numobjs; dest_i++) {
+ for(dest_i = 0; dest_i < count; dest_i++) {
// Initialize type specific fields
- src_cap.u.vnode_x86_64_pdir.base =
- genpaddr + dest_i * ((genpaddr_t)1 << objbits_vnode);
+ temp_cap.u.vnode_x86_64_pdir.base =
+ genpaddr + dest_i * objsize_vnode;
// Insert the capability
- err = set_cap(&dest_caps[dest_i].cap, &src_cap);
+ err = set_cap(&dest_caps[dest_i].cap, &temp_cap);
if (err_is_fail(err)) {
break;
}
case ObjType_VNode_x86_64_pdpt:
{
- size_t objbits_vnode = vnode_objbits(type);
-
- TRACE(KERNEL, BZERO, 1);
- memset((void*)lvaddr, 0, 1UL << bits);
- TRACE(KERNEL, BZERO, 0);
+ size_t objsize_vnode = vnode_objsize(type);
- for(dest_i = 0; dest_i < numobjs; dest_i++) {
+ for(dest_i = 0; dest_i < count; dest_i++) {
// Initialize type specific fields
- src_cap.u.vnode_x86_64_pdpt.base =
- genpaddr + dest_i * ((genpaddr_t)1 << objbits_vnode);
+ temp_cap.u.vnode_x86_64_pdpt.base =
+ genpaddr + dest_i * objsize_vnode;
// Insert the capability
- err = set_cap(&dest_caps[dest_i].cap, &src_cap);
+ err = set_cap(&dest_caps[dest_i].cap, &temp_cap);
if (err_is_fail(err)) {
break;
}
case ObjType_VNode_x86_64_pml4:
{
- size_t objbits_vnode = vnode_objbits(type);
-
- TRACE(KERNEL, BZERO, 1);
- memset((void*)lvaddr, 0, 1UL << bits);
- TRACE(KERNEL, BZERO, 0);
+ size_t objsize_vnode = vnode_objsize(type);
- for(dest_i = 0; dest_i < numobjs; dest_i++) {
+ for(dest_i = 0; dest_i < count; dest_i++) {
// Initialize type specific fields
- src_cap.u.vnode_x86_64_pml4.base =
- genpaddr + dest_i * ((genpaddr_t)1 << objbits_vnode);
+ temp_cap.u.vnode_x86_64_pml4.base =
+ genpaddr + dest_i * objsize_vnode;
#if defined(__x86_64__) || defined(__k1om__)
// Make it a good PML4 by inserting kernel/mem VSpaces
- lpaddr_t var = gen_phys_to_local_phys(src_cap.u.vnode_x86_64_pml4.base);
+ lpaddr_t var = gen_phys_to_local_phys(temp_cap.u.vnode_x86_64_pml4.base);
paging_x86_64_make_good_pml4(var);
#endif
// Insert the capability
- err = set_cap(&dest_caps[dest_i].cap, &src_cap);
+ err = set_cap(&dest_caps[dest_i].cap, &temp_cap);
if (err_is_fail(err)) {
break;
}
case ObjType_Dispatcher:
assert((1UL << OBJBITS_DISPATCHER) >= sizeof(struct dcb));
- TRACE(KERNEL, BZERO, 1);
- memset((void*)lvaddr, 0, 1UL << bits);
- TRACE(KERNEL, BZERO, 0);
- for(dest_i = 0; dest_i < numobjs; dest_i++) {
+ for(dest_i = 0; dest_i < count; dest_i++) {
// Initialize type specific fields
- src_cap.u.dispatcher.dcb = (struct dcb *)
+ temp_cap.u.dispatcher.dcb = (struct dcb *)
(lvaddr + dest_i * (1UL << OBJBITS_DISPATCHER));
// Insert the capability
- err = set_cap(&dest_caps[dest_i].cap, &src_cap);
+ err = set_cap(&dest_caps[dest_i].cap, &temp_cap);
if (err_is_fail(err)) {
break;
}
case ObjType_ID:
// ID type does not refer to a kernel object
assert(lpaddr == 0);
- assert(bits == 0);
- assert(objbits == 0);
- assert(numobjs == 1);
+ assert(size == 0);
+ assert(objsize == 0);
+ assert(count == 1);
// Prevent wrap around
if (id_cap_counter >= UINT32_MAX) {
}
// Generate a new ID, core_local_id monotonically increases
- src_cap.u.id.coreid = my_core_id;
- src_cap.u.id.core_local_id = id_cap_counter++;
+ temp_cap.u.id.coreid = my_core_id;
+ temp_cap.u.id.core_local_id = id_cap_counter++;
// Insert the capability
- err = set_cap(&dest_caps->cap, &src_cap);
+ err = set_cap(&dest_caps->cap, &temp_cap);
break;
case ObjType_IO:
- src_cap.u.io.start = 0;
- src_cap.u.io.end = 65535;
+ temp_cap.u.io.start = 0;
+ temp_cap.u.io.end = 65535;
/* fall through */
case ObjType_Kernel:
case ObjType_PerfMon:
// These types do not refer to a kernel object
assert(lpaddr == 0);
- assert(bits == 0);
- assert(objbits == 0);
- assert(numobjs == 1);
+ assert(size == 0);
+ assert(objsize == 0);
+ assert(count == 1);
// Insert the capability
- err = set_cap(&dest_caps->cap, &src_cap);
+ err = set_cap(&dest_caps->cap, &temp_cap);
if (err_is_ok(err)) {
dest_i = 1;
}
case ObjType_KernelControlBlock:
assert((1UL << OBJBITS_KCB) >= sizeof(struct dcb));
- for(size_t i = 0; i < numobjs; i++) {
+ for(size_t i = 0; i < count; i++) {
// Initialize type specific fields
- src_cap.u.kernelcontrolblock.kcb = (struct kcb *)
+ temp_cap.u.kernelcontrolblock.kcb = (struct kcb *)
(lvaddr + i * (1UL << OBJBITS_KCB));
// Insert the capability
- err = set_cap(&dest_caps[i].cap, &src_cap);
+ err = set_cap(&dest_caps[i].cap, &temp_cap);
if (err_is_fail(err)) {
return err;
}
return SYS_ERR_OK;
}
-/// Create caps to new kernel objects.
-errval_t caps_create_new(enum objtype type, lpaddr_t addr, size_t bits,
- size_t objbits, coreid_t owner, struct cte *caps)
+//{{{1 Capability creation
+
+/// check arguments, return true iff ok
+static bool check_arguments(enum objtype type, size_t bytes, size_t objsize, bool exact)
+{
+ /* special case Dispatcher which is 1kB right now */
+ if (type == ObjType_Dispatcher) {
+ if (bytes & 0x3FF) {
+ return false;
+ }
+ if (objsize > 0 && objsize != 1UL << OBJBITS_DISPATCHER) {
+ return false;
+ }
+
+ if (exact && bytes % (1UL << OBJBITS_DISPATCHER)) {
+ return false;
+ }
+
+ return true;
+ }
+
+ /* Adjust objsize to be in bytes for CNodes */
+ if (type == ObjType_CNode) {
+ objsize *= sizeof(struct cte);
+ }
+
+ /* source size not multiple of BASE_PAGE_SIZE */
+ if (bytes & BASE_PAGE_MASK) {
+ debug(SUBSYS_CAPS, "source size not multiple of BASE_PAGE_SIZE\n");
+ return false;
+ }
+ /* objsize > 0 and not multiple of BASE_PAGE_SIZE */
+ if (objsize > 0 && objsize & BASE_PAGE_MASK) {
+ debug(SUBSYS_CAPS, "object size not multiple of BASE_PAGE_SIZE\n");
+ return false;
+ }
+
+ /* check that bytes can be evenly divided into objsize sized chunks */
+ if (exact && bytes > 0 && objsize > 0) {
+ if (bytes % objsize) {
+ debug(SUBSYS_CAPS, "source size cannot be evenly divided into object size-sized chunks\n");
+ }
+ return bytes % objsize == 0;
+ }
+
+ return true;
+}
+
+/** Create caps to new kernel objects.
+ * This takes the size of the memory region in bytes, and the size of
+ * individual objects in bytes. The following needs to hold:
+ * bytes % objbytes == 0
+ */
+errval_t caps_create_new(enum objtype type, lpaddr_t addr, size_t bytes,
+ size_t objsize, coreid_t owner, struct cte *caps)
{
TRACE(KERNEL, CAP_CREATE_NEW, 0);
/* Parameter checking */
assert(type != ObjType_EndPoint); // Cap of this type cannot be created
+ debug(SUBSYS_CAPS, "caps_create_new: type = %d, addr = %#"PRIxLPADDR
+ ", bytes=%zu, objsize=%zu\n", type, addr, bytes, objsize);
+ assert(check_arguments(type, bytes, objsize, false));
+ assert(addr == 0 || check_arguments(type, bytes, objsize, true));
- size_t numobjs = caps_numobjs(type, bits, objbits);
+ size_t numobjs = caps_max_numobjs(type, bytes, objsize);
assert(numobjs > 0);
+ // XXX: Dispatcher creation is kind of hacky right now :(
+ // Consider allowing non-mappable types to be < BASE_PAGE_SIZE
+ if (type == ObjType_Dispatcher) {
+ numobjs = 1;
+ }
/* Create the new capabilities */
- errval_t err = caps_create(type, addr, bits, objbits, numobjs, owner, caps);
+ errval_t err = caps_create(type, addr, bytes, objsize, numobjs, owner, caps);
if (err_is_fail(err)) {
return err;
}
return SYS_ERR_OK;
}
-
STATIC_ASSERT(46 == ObjType_Num, "Knowledge of all cap types");
/// Retype caps
-errval_t caps_retype(enum objtype type, size_t objbits,
+/// Create `count` new caps of `type` from `offset` in src, and put them in
+/// `dest_cnode` starting at `dest_slot`.
+/// Note: currently objsize is in slots for type == ObjType_CNode
+errval_t caps_retype(enum objtype type, gensize_t objsize, size_t count,
struct capability *dest_cnode, cslot_t dest_slot,
- struct cte *src_cte, bool from_monitor)
+ struct cte *src_cte, gensize_t offset,
+ bool from_monitor)
{
+ // TODO List for this:
+ // * do not complain if there's non-overlapping descendants,
+ // only complain about overlapping descendants
TRACE(KERNEL, CAP_RETYPE, 0);
- size_t numobjs;
- uint8_t bits = 0;
+ size_t maxobjs;
genpaddr_t base = 0;
+ gensize_t size = 0;
errval_t err;
+ bool do_range_check = false;
/* Parameter checking */
assert(type != ObjType_Null);
assert(type < ObjType_Num);
+ if (type == ObjType_Null || type >= ObjType_Num) {
+ return SYS_ERR_INVALID_RETYPE;
+ }
- struct capability *src_cap = &src_cte->cap;
-
- TRACE_CAP_MSG("retyping", src_cte);
+ /* check that offset into source cap is multiple of BASE_PAGE_SIZE */
+ if (offset % BASE_PAGE_SIZE != 0) {
+ return SYS_ERR_RETYPE_INVALID_OFFSET;
+ }
+ assert(offset % BASE_PAGE_SIZE == 0);
+
+ // check that size is multiple of BASE_PAGE_SIZE
+ // (or zero, for fixed-size types)
+ if (type != ObjType_CNode && objsize % BASE_PAGE_SIZE != 0) {
+ printk(LOG_WARN, "%s: objsize = %zu\n", __FUNCTION__, objsize);
+ return SYS_ERR_INVALID_SIZE;
+ } else if ((objsize * sizeof(struct cte)) % BASE_PAGE_SIZE != 0) {
+ printk(LOG_WARN, "%s: CNode: objsize = %zu\n", __FUNCTION__, objsize);
+ return SYS_ERR_INVALID_SIZE;
+ }
+ // TODO: clean up semantics for type == ObjType_CNode
+ assert((type == ObjType_CNode
+ && ((objsize * sizeof(struct cte)) % BASE_PAGE_SIZE == 0)) ||
+ (type != ObjType_CNode && objsize % BASE_PAGE_SIZE == 0));
/* No explicit retypes to Mapping allowed */
if (type_is_mapping(type)) {
return SYS_ERR_RETYPE_MAPPING_EXPLICIT;
}
+ struct capability *src_cap = &src_cte->cap;
+
+ TRACE_CAP_MSG("retyping", src_cte);
+
/* Check retypability */
err = is_retypeable(src_cte, src_cap->type, type, from_monitor);
if (err_is_fail(err)) {
- debug(SUBSYS_CAPS, "caps_retype: is_retypeable failed\n");
- return err;
+ if (err_no(err) != SYS_ERR_REVOKE_FIRST) {
+ printk(LOG_NOTE, "caps_retype: is_retypeable failed: %"PRIuERRV"\n", err);
+ debug(SUBSYS_CAPS, "caps_retype: is_retypeable failed\n");
+ return err;
+ } else {
+ debug(SUBSYS_CAPS,
+ "caps_retype: is_retypeable() returned SYS_ERR_REVOKE_FIRST, doing range check\n");
+ // We handle err_revoke_first fine-grained checking below, as it
+ // might happen for non-overlapping regions.
+
+ // TODO: move the range checking into is_retypeable() or even
+ // is_revoked_first(), -SG 2016-04-18
+ do_range_check = true;
+ }
+ }
+ // from here: src cap type is one of these.
+ assert(src_cap->type == ObjType_PhysAddr ||
+ src_cap->type == ObjType_RAM ||
+ src_cap->type == ObjType_Dispatcher ||
+ src_cap->type == ObjType_Frame ||
+ src_cap->type == ObjType_DevFrame);
+
+ if (src_cap->type != ObjType_Dispatcher) {
+ base = get_address(src_cap);
+ size = get_size(src_cap);
}
- /* Create Destination caps as per source and destination type */
- switch(src_cap->type) {
- case ObjType_PhysAddr:
- bits = src_cap->u.physaddr.bits;
- base = src_cap->u.physaddr.base;
- break;
-
- case ObjType_RAM:
- bits = src_cap->u.ram.bits;
- base = src_cap->u.ram.base;
- break;
-
- case ObjType_Dispatcher:
- bits = base = 0;
- break;
-
- case ObjType_Frame:
- bits = src_cap->u.frame.bits;
- base = src_cap->u.frame.base;
- break;
-
- case ObjType_DevFrame:
- bits = src_cap->u.devframe.bits;
- base = src_cap->u.devframe.base;
- break;
+ maxobjs = caps_max_numobjs(type, get_size(src_cap), objsize);
+ debug(SUBSYS_CAPS, "maximum possible new object count: %zu\n", maxobjs);
- default:
- panic("Unreachable case");
+ if (maxobjs == 0) {
+ debug(SUBSYS_CAPS, "caps_retype: maxobjs == 0\n");
+ return SYS_ERR_INVALID_SIZE;
}
- /* determine number of objects to be created */
- numobjs = caps_numobjs(type, bits, objbits);
-
- if (numobjs == 0) {
- debug(SUBSYS_CAPS, "caps_retype: numobjs == 0\n");
- return SYS_ERR_INVALID_SIZE_BITS;
+ if (count > maxobjs) {
+ debug(SUBSYS_CAPS, "caps_retype: maxobjs = %zu, count = %zu\n", maxobjs, count);
+ return SYS_ERR_RETYPE_INVALID_COUNT;
+ }
+ // from here: count <= maxobjs
+ assert(count <= maxobjs);
+ // make sure nobody calls with the old behaviour
+ if (count == 0) {
+ return SYS_ERR_RETYPE_INVALID_COUNT;
+ }
+ assert(count > 0);
+
+ /* check that we can create `count` objs from `offset` in source, and
+ * update base accordingly */
+ if (src_cap->type != ObjType_Dispatcher) {
+ // TODO: convince ourselves that this is the only condition on offset
+ if (offset + count * objsize > get_size(src_cap)) {
+ debug(SUBSYS_CAPS, "caps_retype: cannot create all %zu objects"
+ " of size 0x%zx from offset 0x%zx\n", count, objsize, offset);
+ return SYS_ERR_RETYPE_INVALID_OFFSET;
+ }
+ // adjust base address for new objects
+ base += offset;
+
+ // Check whether we got SYS_ERR_REVOKE_FIRST because of
+ // non-overlapping child
+ if (do_range_check) {
+ int find_range_result = 0;
+ struct cte *found_cte = NULL;
+ err = mdb_find_range(get_type_root(src_cap->type), base, objsize * count,
+ MDB_RANGE_FOUND_SURROUNDING, &found_cte, &find_range_result);
+ // this should never return an error unless we mess up the
+ // non-user supplied arguments
+ if (err_is_fail(err)) {
+ printk(LOG_WARN, "mdb_find_range returned: %"PRIuERRV"\n", err);
+ }
+ assert(err_is_ok(err));
+ // return REVOKE_FIRST, if we found a cap inside the region
+ // (FOUND_INNER == 2) or overlapping the region (FOUND_PARTIAL == 3)
+ if (find_range_result >= MDB_RANGE_FOUND_INNER) {
+ printf("found existing region inside, or overlapping requested region\n");
+ return SYS_ERR_REVOKE_FIRST;
+ }
+ // return REVOKE_FIRST, if we found a cap that isn't our source
+ // (or a copy of our source) covering the whole requested region.
+ else if (find_range_result == MDB_RANGE_FOUND_SURROUNDING &&
+ !is_copy(&found_cte->cap, src_cap))
+ {
+ printf("found non source region fully covering requested region");
+ return SYS_ERR_REVOKE_FIRST;
+ }
+ }
}
- // debug(SUBSYS_CAPS, "caps_retype: numobjs == %d\n", (int)numobjs);
/* check that destination slots all fit within target cnode */
- if (dest_slot + numobjs > (1UL << dest_cnode->u.cnode.bits)) {
+ // TODO: fix this with new cspace layout (should be easier)
+ if (dest_slot + count > (1UL << dest_cnode->u.cnode.bits)) {
debug(SUBSYS_CAPS, "caps_retype: dest slots don't fit in cnode\n");
return SYS_ERR_SLOTS_INVALID;
}
debug(SUBSYS_CAPS, "caps_retype: dest cnode is %#" PRIxLPADDR
" dest_slot %d\n",
dest_cnode->u.cnode.cnode, (int)dest_slot);
- for (cslot_t i = 0; i < numobjs; i++) {
+ for (cslot_t i = 0; i < count; i++) {
if (caps_locate_slot(dest_cnode->u.cnode.cnode, dest_slot + i)->cap.type
!= ObjType_Null) {
debug(SUBSYS_CAPS, "caps_retype: dest slot %d in use\n",
/* create new caps */
struct cte *dest_cte =
caps_locate_slot(dest_cnode->u.cnode.cnode, dest_slot);
- err = caps_create(type, base, bits, objbits, numobjs, my_core_id, dest_cte);
+ err = caps_create(type, base, size, objsize, count, my_core_id, dest_cte);
if (err_is_fail(err)) {
debug(SUBSYS_CAPS, "caps_retype: failed to create a dest cap\n");
return err_push(err, SYS_ERR_RETYPE_CREATE);
/* special initialisation for endpoint caps */
if (type == ObjType_EndPoint) {
assert(src_cap->type == ObjType_Dispatcher);
- assert(numobjs == 1);
+ assert(count == 1);
struct capability *dest_cap = &dest_cte->cap;
dest_cap->u.endpoint.listener = src_cap->u.dispatcher.dcb;
}
/* Handle mapping */
- for (size_t i = 0; i < numobjs; i++) {
+ for (size_t i = 0; i < count; i++) {
mdb_insert(&dest_cte[i]);
}
#ifdef TRACE_PMEM_CAPS
- for (size_t i = 0; i < numobjs; i++) {
+ for (size_t i = 0; i < count; i++) {
TRACE_CAP_MSG("created", &dest_cte[i]);
}
#endif
return SYS_ERR_OK;
}
+
/// Check the validity of a retype operation
errval_t is_retypeable(struct cte *src_cte, enum objtype src_type,
enum objtype dest_type, bool from_monitor)
if (!is_well_founded(src_type, dest_type)) {
return SYS_ERR_INVALID_RETYPE;
} else if (!is_revoked_first(src_cte, src_type)){
- printf("err_revoke_first: (%p, %d, %d)\n", src_cte, src_type, dest_type);
+ //printf("err_revoke_first: (%p, %d, %d)\n", src_cte, src_type, dest_type);
return SYS_ERR_REVOKE_FIRST;
} else if (dest_type == ObjType_EndPoint && src_cte->mdbnode.owner == my_core_id) {
// XXX: because of the current "multi-retype" hack for endpoints, a
// dispatcher->endpoint retype can happen irrespective of the existence
- // of descendents on any core.
- // Howevery, we only do this for locally owned caps as the owner should
+ // of descendants on any core.
+ // However, we only do this for locally owned caps as the owner should
// be notified that the cap has remote descendants
return SYS_ERR_OK;
} else if (!from_monitor && (src_cte->mdbnode.owner != my_core_id
uintptr_t pages, uintptr_t kpi_paging_flags);
void paging_dump_tables(struct dcb *dispatcher);
-errval_t caps_retype(enum objtype type, size_t objbits,
- struct capability *dest_cnode,
- cslot_t dest_slot, struct cte *src_cte,
+errval_t caps_retype(enum objtype type, gensize_t objsize, size_t count,
+ struct capability *dest_cnode, cslot_t dest_slot,
+ struct cte *src_cte, gensize_t offset,
bool from_monitor);
errval_t is_retypeable(struct cte *src_cte,
enum objtype src_type,
* Cap tracing
*/
#ifdef TRACE_PMEM_CAPS
-STATIC_ASSERT(44 == ObjType_Num, "knowledge of all cap types");
+STATIC_ASSERT(46 == ObjType_Num, "knowledge of all cap types");
STATIC_ASSERT(64 >= ObjType_Num, "cap types fit in uint64_t bitfield");
#define MAPPING_TYPES \
((1ull<<ObjType_VNode_x86_64_pml4_Mapping) | \
return l;
}
+/// Computes the ceiling of log_2 of the given number
+static inline uint8_t log2cl(uintptr_t num)
+{
+ uint8_t l = log2flr(num);
+ if (num == ((uintptr_t)1) << l) { /* fencepost case */
+ return l;
+ } else {
+ return l + 1;
+ }
+}
+
static inline int bitaddralign(size_t n, lpaddr_t base_addr)
{
int exponent = sizeof(size_t) * NBBY - 1;
unsigned long wcet, unsigned long period,
unsigned long release, unsigned short weight);
struct sysret
-sys_retype(struct capability *root, capaddr_t source_cptr, enum objtype type,
- uint8_t objbits, capaddr_t dest_cnode_cptr, cslot_t dest_slot,
+sys_retype(struct capability *root, capaddr_t source_cptr, gensize_t offset,
+ enum objtype type, gensize_t objsize, size_t count,
+ capaddr_t dest_cnode_cptr, cslot_t dest_slot,
uint8_t dest_vbits, bool from_monitor);
struct sysret sys_create(struct capability *root, enum objtype type,
uint8_t objbits, capaddr_t dest_cnode_cptr,
struct sysret sys_kernel_add_kcb(struct kcb* new_kcb);
struct sysret sys_kernel_remove_kcb(struct kcb* kcb_addr);
struct sysret sys_kernel_suspend_kcb_sched(bool toggle);
-struct sysret sys_handle_kcb_identify(struct capability* to);
+struct sysret sys_handle_kcb_identify(struct capability* to, struct frame_identity *fi);
struct sysret sys_get_absolute_time(void);
/*
return err;
}
if (!type_is_vnode((*next)->cap.type)) {
+ struct cte *tmp = mdb_predecessor(*next);
+ // check if there's a copy of *next that is a vnode, and return that
+ // copy, if found.
+ while(is_copy(&tmp->cap, &(*next)->cap)) {
+ if (type_is_vnode(tmp->cap.type)) {
+ *next = tmp;
+ return SYS_ERR_OK;
+ }
+ tmp = mdb_predecessor(tmp);
+ }
+ tmp = mdb_successor(*next);
+ while(is_copy(&tmp->cap, &(*next)->cap)) {
+ if (type_is_vnode(tmp->cap.type)) {
+ *next = tmp;
+ return SYS_ERR_OK;
+ }
+ tmp = mdb_successor(tmp);
+ }
+
+ debug(SUBSYS_CAPS, "found cap not a VNode\n");
+ // no copy was vnode
return SYS_ERR_VNODE_LOOKUP_NEXT;
}
return SYS_ERR_OK;
return SYS_ERR_VNODE_NOT_INSTALLED;
}
err = find_next_ptable(mapping, &next);
- if (err == SYS_ERR_VNODE_NOT_INSTALLED) { // no next page table
+ // no next page table
+ if (err == SYS_ERR_VNODE_NOT_INSTALLED ||
+ err == SYS_ERR_VNODE_LOOKUP_NEXT)
+ {
*retvaddr = 0;
return SYS_ERR_VNODE_NOT_INSTALLED;
}
#include <mdb/mdb_tree.h>
#include <trace/trace.h>
-struct kcb *kcb_current;
+struct kcb *kcb_current = NULL;
coreid_t my_core_id;
enum region_type type,
struct spawn_state *st, struct bootinfo *bootinfo)
{
- size_t remain = size;
struct mem_region *regions = bootinfo->regions;
size_t *regions_index = &bootinfo->regions_length;
struct capability *cnode;
panic("Cannot handle bootinfo region type!");
}
- while (remain > 0) {
- /* Cannot insert anymore into this cnode */
- if (*slot >= 1UL << cnode->u.cnode.bits) {
- printk(LOG_WARN, "create_caps_to_cnode: Cannot create more caps "
- "in CNode\n");
- return SYS_ERR_SLOTS_IN_USE;
- }
- /* Cannot insert anymore into the mem_region */
- if (*regions_index >= MAX_MEM_REGIONS) {
- printk(LOG_WARN, "create_caps_to_cnode: mem_region out of space\n");
- return -1;
- }
-
- uint8_t block_size = bitaddralign(remain, base_addr);
-
- /* Create the capability */
- err = caps_create_new(cap_type, base_addr, block_size, block_size, my_core_id,
- caps_locate_slot(cnode->u.cnode.cnode, (*slot)++));
- if (err_is_fail(err)) {
- return err;
- }
-
- assert(regions != NULL);
- regions[*regions_index].mr_base = base_addr;
- regions[*regions_index].mr_type = type;
- regions[*regions_index].mr_bits = block_size;
- regions[*regions_index].mr_consumed = false;
- regions[*regions_index].mrmod_size = 0;
- regions[*regions_index].mrmod_data = 0;
- (*regions_index)++;
-
- // Advance physical memory pointer
- base_addr += (1UL << block_size);
- remain -= (1UL << block_size);
+ if (*slot >= 1UL << cnode->u.cnode.bits) {
+ printk(LOG_WARN, "create_caps_to_cnode: Cannot create more caps "
+ "in CNode\n");
+ return SYS_ERR_SLOTS_IN_USE;
}
+ /* Cannot insert anymore into the mem_region */
+ if (*regions_index >= MAX_MEM_REGIONS) {
+ printk(LOG_WARN, "create_caps_to_cnode: mem_region out of space\n");
+ return -1;
+ }
+
+ /* create the capability */
+ err = caps_create_new(cap_type, base_addr, size, size, my_core_id,
+ caps_locate_slot(cnode->u.cnode.cnode, (*slot)++));
+ if (err_is_fail(err)) {
+ return err;
+ }
+
+ /* record region */
+ assert(regions != NULL);
+ regions[*regions_index].mr_base = base_addr;
+ regions[*regions_index].mr_type = type;
+ regions[*regions_index].mr_bytes = size;
+ regions[*regions_index].mr_consumed = false;
+ regions[*regions_index].mrmod_size = 0;
+ regions[*regions_index].mrmod_data = 0;
+ (*regions_index)++;
return SYS_ERR_OK;
}
#error invalid scheduler
#endif
+ /* create root cnode */
err = caps_create_new(ObjType_CNode, alloc_phys(BASE_PAGE_SIZE),
- BASE_PAGE_BITS, DEFAULT_CNODE_BITS, my_core_id,
+ BASE_PAGE_SIZE, DEFAULT_CNODE_SLOTS, my_core_id,
rootcn);
assert(err_is_ok(err));
// Task cnode in root cnode
st->taskcn = caps_locate_slot(CNODE(rootcn), ROOTCN_SLOT_TASKCN);
err = caps_create_new(ObjType_CNode, alloc_phys(BASE_PAGE_SIZE),
- BASE_PAGE_BITS, DEFAULT_CNODE_BITS, my_core_id,
+ BASE_PAGE_SIZE, DEFAULT_CNODE_SLOTS, my_core_id,
st->taskcn);
assert(err_is_ok(err));
st->taskcn->cap.u.cnode.guard_size = GUARD_REMAINDER(2 * DEFAULT_CNODE_BITS);
st->pagecn = caps_locate_slot(CNODE(rootcn), ROOTCN_SLOT_PAGECN);
err = caps_create_new(ObjType_CNode,
alloc_phys(1UL << (OBJBITS_CTE + PAGE_CNODE_BITS)),
- PAGE_CNODE_BITS + OBJBITS_CTE, PAGE_CNODE_BITS,
+ PAGE_CNODE_SLOTS * sizeof(struct cte), PAGE_CNODE_SLOTS,
my_core_id, st->pagecn);
assert(err_is_ok(err));
// Base page cnode in root cnode
st->basepagecn = caps_locate_slot(CNODE(rootcn), ROOTCN_SLOT_BASE_PAGE_CN);
err = caps_create_new(ObjType_CNode, alloc_phys(BASE_PAGE_SIZE),
- BASE_PAGE_BITS, DEFAULT_CNODE_BITS, my_core_id,
+ BASE_PAGE_SIZE, DEFAULT_CNODE_SLOTS, my_core_id,
st->basepagecn);
assert(err_is_ok(err));
st->supercn = caps_locate_slot(CNODE(rootcn), ROOTCN_SLOT_SUPERCN);
err = caps_create_new(ObjType_CNode,
alloc_phys(1UL << (OBJBITS_CTE + SUPER_CNODE_BITS)),
- SUPER_CNODE_BITS + OBJBITS_CTE,
- SUPER_CNODE_BITS, my_core_id, st->supercn);
+ SUPER_CNODE_SLOTS * sizeof(struct cte),
+ SUPER_CNODE_SLOTS, my_core_id, st->supercn);
assert(err_is_ok(err));
// slot_alloc cnodes in root cnode
st->slot_alloc_cn0 = caps_locate_slot(CNODE(rootcn), ROOTCN_SLOT_SLOT_ALLOC0);
err = caps_create_new(ObjType_CNode,
alloc_phys(1UL << (OBJBITS_CTE + SLOT_ALLOC_CNODE_BITS)),
- SLOT_ALLOC_CNODE_BITS + OBJBITS_CTE,
- SLOT_ALLOC_CNODE_BITS, my_core_id,
+ SLOT_ALLOC_CNODE_SLOTS * sizeof(struct cte),
+ SLOT_ALLOC_CNODE_SLOTS, my_core_id,
st->slot_alloc_cn0);
assert(err_is_ok(err));
st->slot_alloc_cn1 = caps_locate_slot(CNODE(rootcn), ROOTCN_SLOT_SLOT_ALLOC1);
err = caps_create_new(ObjType_CNode,
alloc_phys(1UL << (OBJBITS_CTE + SLOT_ALLOC_CNODE_BITS)),
- SLOT_ALLOC_CNODE_BITS + OBJBITS_CTE,
- SLOT_ALLOC_CNODE_BITS, my_core_id,
+ SLOT_ALLOC_CNODE_SLOTS * sizeof(struct cte),
+ SLOT_ALLOC_CNODE_SLOTS, my_core_id,
st->slot_alloc_cn1);
assert(err_is_ok(err));
st->slot_alloc_cn2 = caps_locate_slot(CNODE(rootcn), ROOTCN_SLOT_SLOT_ALLOC2);
err = caps_create_new(ObjType_CNode,
alloc_phys(1UL << (OBJBITS_CTE + SLOT_ALLOC_CNODE_BITS)),
- SLOT_ALLOC_CNODE_BITS + OBJBITS_CTE,
- SLOT_ALLOC_CNODE_BITS, my_core_id,
+ SLOT_ALLOC_CNODE_SLOTS * sizeof(struct cte),
+ SLOT_ALLOC_CNODE_SLOTS, my_core_id,
st->slot_alloc_cn2);
assert(err_is_ok(err));
// Seg cnode in root cnode
st->segcn = caps_locate_slot(CNODE(rootcn), ROOTCN_SLOT_SEGCN);
err = caps_create_new(ObjType_CNode, alloc_phys(BASE_PAGE_SIZE),
- BASE_PAGE_BITS, DEFAULT_CNODE_BITS, my_core_id,
+ BASE_PAGE_SIZE, DEFAULT_CNODE_SLOTS, my_core_id,
st->segcn);
assert(err_is_ok(err));
st->physaddrcn = caps_locate_slot(CNODE(rootcn), ROOTCN_SLOT_PACN);
err = caps_create_new(ObjType_CNode,
alloc_phys(1UL << (OBJBITS_CTE + PHYSADDRCN_BITS)),
- OBJBITS_CTE + PHYSADDRCN_BITS, PHYSADDRCN_BITS,
+ 1UL << (OBJBITS_CTE + PHYSADDRCN_BITS), PHYSADDRCN_SLOTS,
my_core_id, st->physaddrcn);
assert(err_is_ok(err));
st->modulecn = caps_locate_slot(CNODE(rootcn), ROOTCN_SLOT_MODULECN);
err = caps_create_new(ObjType_CNode,
alloc_phys(1UL << (OBJBITS_CTE + MODULECN_SIZE_BITS)),
- MODULECN_SIZE_BITS + OBJBITS_CTE,
- MODULECN_SIZE_BITS, my_core_id, st->modulecn);
+ 1UL << (MODULECN_SIZE_BITS + OBJBITS_CTE),
+ 1UL << MODULECN_SIZE_BITS, my_core_id, st->modulecn);
assert(err_is_ok(err));
}
TASKCN_SLOT_DISPATCHER);
err = caps_create_new(ObjType_Dispatcher,
alloc_phys(1UL << OBJBITS_DISPATCHER),
- OBJBITS_DISPATCHER, 0, my_core_id, init_dcb_cte);
+ 1UL << OBJBITS_DISPATCHER, 0, my_core_id, init_dcb_cte);
assert(err_is_ok(err));
struct dcb *init_dcb = init_dcb_cte->cap.u.dispatcher.dcb;
struct cte *init_dispframe_cte = caps_locate_slot(CNODE(st->taskcn),
TASKCN_SLOT_DISPFRAME);
err = caps_create_new(ObjType_Frame, alloc_phys(1 << DISPATCHER_FRAME_BITS),
- DISPATCHER_FRAME_BITS, DISPATCHER_FRAME_BITS,
+ 1UL << DISPATCHER_FRAME_BITS,
+ 1UL << DISPATCHER_FRAME_BITS,
my_core_id, init_dispframe_cte);
assert(err_is_ok(err));
struct cte *init_args_cte = caps_locate_slot(CNODE(st->taskcn),
TASKCN_SLOT_ARGSPAGE);
err = caps_create_new(ObjType_Frame, alloc_phys(ARGS_SIZE),
- ARGS_FRAME_BITS, ARGS_FRAME_BITS, my_core_id,
+ 1UL << ARGS_FRAME_BITS, 1UL << ARGS_FRAME_BITS, my_core_id,
init_args_cte);
st->args_page = gen_phys_to_local_phys(init_args_cte->cap.u.frame.base);
/* DevFrame to prevent zeroing! */
/* Note: Since this is only done in the bsp, we can safely assume we
* own the bootinfo memory */
- err = caps_create_new(ObjType_DevFrame, bootinfo, BOOTINFO_SIZEBITS,
- BOOTINFO_SIZEBITS, my_core_id, bootinfo_cte);
+ err = caps_create_new(ObjType_DevFrame, bootinfo, 1UL << BOOTINFO_SIZEBITS,
+ 1UL << BOOTINFO_SIZEBITS, my_core_id, bootinfo_cte);
assert(err_is_ok(err));
}
/* Fill up base page CN (pre-allocated 4K pages) */
for(size_t i = 0; i < (1UL << (BASE_PAGE_BITS - OBJBITS_CTE)); i++) {
err = caps_create_new(ObjType_RAM, alloc_phys(BASE_PAGE_SIZE),
- BASE_PAGE_BITS, BASE_PAGE_BITS, my_core_id,
+ BASE_PAGE_SIZE, BASE_PAGE_SIZE, my_core_id,
caps_locate_slot(CNODE(st->basepagecn), i));
assert(err_is_ok(err));
}
#include <trace/trace.h>
#include <trace_definitions/trace_defs.h>
#include <kcb.h>
+#include <useraccess.h>
errval_t sys_print(const char *str, size_t length)
{
/**
* \param root Root CNode to invoke
* \param source_cptr Source capability cptr
+ * \param offset Offset into source capability from which to retype
* \param type Type to retype to
- * \param objbits Object bits for variable-sized types
+ * \param objsize Object size for variable-sized types
+ * \param count number of objects to create
* \param dest_cnode_cptr Destination cnode cptr
* \param dest_slot Destination slot number
* \param dest_vbits Valid bits in destination cnode cptr
*/
struct sysret
-sys_retype(struct capability *root, capaddr_t source_cptr, enum objtype type,
- uint8_t objbits, capaddr_t dest_cnode_cptr, cslot_t dest_slot,
+sys_retype(struct capability *root, capaddr_t source_cptr, gensize_t offset,
+ enum objtype type, gensize_t objsize, size_t count,
+ capaddr_t dest_cnode_cptr, cslot_t dest_slot,
uint8_t dest_vbits, bool from_monitor)
{
errval_t err;
}
/* Source capability */
- struct cte *source_cap;
- err = caps_lookup_slot(root, source_cptr, CPTR_BITS, &source_cap,
+ struct cte *source_cte;
+ err = caps_lookup_slot(root, source_cptr, CPTR_BITS, &source_cte,
CAPRIGHTS_READ);
if (err_is_fail(err)) {
return SYSRET(err_push(err, SYS_ERR_SOURCE_CAP_LOOKUP));
}
- assert(source_cap != NULL);
+ assert(source_cte != NULL);
/* Destination cnode */
struct capability *dest_cnode_cap;
return SYSRET(SYS_ERR_DEST_CNODE_INVALID);
}
- return SYSRET(caps_retype(type, objbits, dest_cnode_cap, dest_slot,
- source_cap, from_monitor));
+ return SYSRET(caps_retype(type, objsize, count, dest_cnode_cap, dest_slot,
+ source_cte, offset, from_monitor));
}
struct sysret sys_create(struct capability *root, enum objtype type,
return SYSRET(SYS_ERR_OK);
}
-struct sysret sys_handle_kcb_identify(struct capability* to)
+struct sysret sys_handle_kcb_identify(struct capability* to, struct frame_identity *fi)
{
// Return with physical base address of frame
// XXX: pack size into bottom bits of base address
lvaddr_t vkcb = (lvaddr_t) to->u.kernelcontrolblock.kcb;
assert((vkcb & BASE_PAGE_MASK) == 0);
- return (struct sysret) {
- .error = SYS_ERR_OK,
- .value = mem_to_local_phys(vkcb) | OBJBITS_KCB,
- };
+ if (!access_ok(ACCESS_WRITE, (lvaddr_t)fi, sizeof(struct frame_identity))) {
+ return SYSRET(SYS_ERR_INVALID_USER_BUFFER);
+ }
+
+ fi->base = get_address(to);
+ fi->bytes = get_size(to);
+
+ return SYSRET(SYS_ERR_OK);
}
struct sysret sys_get_absolute_time(void)
if (flags & VREGION_FLAGS_LARGE &&
(vaddr & LARGE_PAGE_MASK) == 0 &&
- fi.bits >= LARGE_PAGE_BITS &&
+ fi.bytes >= LARGE_PAGE_SIZE &&
(fi.base & LARGE_PAGE_MASK) == 0) {
//section mapping (1MB)
//mapped in the L1 table at root
// determine mapping specific parts
if (flags & VREGION_FLAGS_LARGE &&
(vaddr & LARGE_PAGE_MASK) == 0 &&
- fi.bits >= LARGE_PAGE_BITS &&
+ fi.bytes >= LARGE_PAGE_SIZE &&
(fi.base & LARGE_PAGE_MASK) == 0) {
//section mapping (1MB)
page_size = LARGE_PAGE_SIZE;
}
genvaddr_t vend = vaddr + size;
- if ((1UL << fi.bits) < size) {
+ if (fi.bytes < size) {
return LIB_ERR_PMAP_FRAME_SIZE;
}
// adjust the mapping to be on page boundaries
if (flags & VREGION_FLAGS_LARGE &&
(vaddr & LARGE_PAGE_MASK) == 0 &&
- fi.bits >= LARGE_PAGE_BITS &&
+ fi.bytes >= LARGE_PAGE_SIZE &&
(fi.base & LARGE_PAGE_MASK) == 0) {
//section mapping (1MB)
base = LARGE_PAGE_OFFSET(offset);
page_size = LARGE_PAGE_SIZE;
slabs_required = max_slabs_required_large(size);
#ifdef LIBBARRELFISH_DEBUG_PMAP
- size_t frame_sz = 1ULL<<fi.bits;
printf("map: large path, page_size: %i, base: %i, slabs: %i, size: %i,"
- "frame size: %zu\n", page_size, base, slabs_required, size, frame_sz);
+ "frame size: %zu\n", page_size, base, slabs_required, size, fi.bytes);
#endif
} else {
//4k mapping
* the monitor to ensure consistancy with other cores. Only necessary for
* caps that have been sent remotely.
*/
-static errval_t cap_retype_remote(capaddr_t src, enum objtype new_type,
- uint8_t size_bits, capaddr_t to, capaddr_t slot,
- int to_vbits)
+static errval_t cap_retype_remote(capaddr_t src, gensize_t offset, enum objtype new_type,
+ gensize_t objsize, size_t count, capaddr_t to,
+ capaddr_t slot, int to_vbits)
{
struct monitor_blocking_rpc_client *mrc = get_monitor_blocking_rpc_client();
if (!mrc) {
}
errval_t err, remote_cap_err;
- int count = 0;
+ int send_count = 0;
do {
- err = mrc->vtbl.remote_cap_retype(mrc, cap_root, src,
- (uint64_t)new_type,
- size_bits, to, slot,
+ err = mrc->vtbl.remote_cap_retype(mrc, cap_root, src, offset,
+ (uint64_t)new_type, objsize,
+ count, to, slot,
to_vbits, &remote_cap_err);
if (err_is_fail(err)){
DEBUG_ERR(err, "remote cap retype\n");
}
- } while (err_no(remote_cap_err) == MON_ERR_REMOTE_CAP_RETRY && backoff(++count));
+ } while (err_no(remote_cap_err) == MON_ERR_REMOTE_CAP_RETRY && backoff(++send_count));
return remote_cap_err;
}
/**
- * \brief Retype a capability into one or more new capabilities
+ * \brief Retype (part of) a capability into one or more new capabilities
*
* \param dest_start Location of first destination slot, which must be empty
* \param src Source capability to retype
+ * \param offset Offset into source capability
* \param new_type Kernel object type to retype to.
- * \param size_bits Size of created objects as a power of two
+ * \param objsize Size of created objects in bytes
* (ignored for fixed-size objects)
+ * \param count The number of new objects to create
*
- * Retypes the given source capability into a number of new capabilities, which
- * may be of the same or of different type. The new capabilities are created
- * in the slots starting from dest_start, which must all be empty and lie in the
- * same CNode. The number of objects created is determined by the size of the
- * source object divided by the size of the destination objects.
+ * Retypes (part of) the given source capability into a number of new
+ * capabilities, which may be of the same or of different type. The new
+ * capabilities are created in the slots starting from dest_start, which must
+ * all be empty and lie in the same CNode. The number of objects created is
+ * determined by the argument `count`.
*/
-errval_t cap_retype(struct capref dest_start, struct capref src,
- enum objtype new_type, uint8_t size_bits)
+errval_t cap_retype(struct capref dest_start, struct capref src, gensize_t offset,
+ enum objtype new_type, gensize_t objsize, size_t count)
{
errval_t err;
// Address of source capability
capaddr_t scp_addr = get_cap_addr(src);
- err = invoke_cnode_retype(cap_root, scp_addr, new_type, size_bits,
+ err = invoke_cnode_retype(cap_root, scp_addr, offset, new_type, objsize, count,
dcn_addr, dest_start.slot, dcn_vbits);
if (err_no(err) == SYS_ERR_RETRY_THROUGH_MONITOR) {
- return cap_retype_remote(scp_addr, new_type, size_bits,
+ return cap_retype_remote(scp_addr, offset, new_type, objsize, count,
dcn_addr, dest_start.slot, dcn_vbits);
} else {
return err;
errval_t err;
// Retype it to the destination
- err = cap_retype(dest, src, ObjType_CNode, slot_bits);
+ err = cap_retype(dest, src, 0, ObjType_CNode, 1UL << slot_bits, 1);
if (err_is_fail(err)) {
return err_push(err, LIB_ERR_CAP_RETYPE);
}
}
assert(type_is_vnode(type));
- err = cap_retype(dest, ram, type, 0);
+ err = cap_retype(dest, ram, 0, type, 0, 1);
if (err_is_fail(err)) {
return err_push(err, LIB_ERR_CAP_RETYPE);
}
return err_push(err, LIB_ERR_RAM_ALLOC);
}
- err = cap_retype(dest, ram, ObjType_Frame, bits);
+ err = cap_retype(dest, ram, 0, ObjType_Frame, (1UL << bits), 1);
if (err_is_fail(err)) {
return err_push(err, LIB_ERR_CAP_RETYPE);
}
errval_t err;
struct capref ram;
- err = ram_alloc(&ram, OBJBITS_DISPATCHER);
+ err = ram_alloc(&ram, BASE_PAGE_BITS);
if (err_is_fail(err)) {
return err_push(err, LIB_ERR_RAM_ALLOC);
}
- err = cap_retype(dest, ram, ObjType_Dispatcher, 0);
+ err = cap_retype(dest, ram, 0, ObjType_Dispatcher, 0, 1);
if (err_is_fail(err)) {
return err_push(err, LIB_ERR_CAP_RETYPE);
}
return err_push(err, LIB_ERR_SLOT_ALLOC);
}
- return cap_retype(*dest, src, ObjType_DevFrame, bits);
+ return cap_retype(*dest, src, 0, ObjType_DevFrame, 1UL << bits, 1);
}
/**
switch (cap->type) {
case ObjType_PhysAddr:
return snprintf(buf, len,
- "physical address range cap (0x%" PRIxGENPADDR ":%u)",
- cap->u.physaddr.base, cap->u.physaddr.bits);
+ "physical address range cap (0x%" PRIxGENPADDR ":0x%zx)",
+ cap->u.physaddr.base, cap->u.physaddr.bytes);
case ObjType_RAM:
- return snprintf(buf, len, "RAM cap (0x%" PRIxGENPADDR ":%u)",
- cap->u.ram.base, cap->u.ram.bits);
+ return snprintf(buf, len, "RAM cap (0x%" PRIxGENPADDR ":0x%zx)",
+ cap->u.ram.base, cap->u.ram.bytes);
case ObjType_CNode: {
int ret = snprintf(buf, len, "CNode cap "
return snprintf(buf, len, "Dispatcher cap %p", cap->u.dispatcher.dcb);
case ObjType_Frame:
- return snprintf(buf, len, "Frame cap (0x%" PRIxGENPADDR ":%u)",
- cap->u.frame.base, cap->u.frame.bits);
+ return snprintf(buf, len, "Frame cap (0x%" PRIxGENPADDR ":0x%zx)",
+ cap->u.frame.base, cap->u.frame.bytes);
case ObjType_DevFrame:
- return snprintf(buf, len, "Device Frame cap (0x%" PRIxGENPADDR ":%u)",
- cap->u.frame.base, cap->u.devframe.bits);
+ return snprintf(buf, len, "Device Frame cap (0x%" PRIxGENPADDR ":%zx)",
+ cap->u.frame.base, cap->u.devframe.bytes);
case ObjType_VNode_ARM_l1:
return snprintf(buf, len, "ARM L1 table at 0x%" PRIxGENPADDR,
errval_t range_slot_alloc(struct range_slot_allocator *alloc, cslot_t nslots,
struct capref *ret)
{
- thread_mutex_lock(&alloc->mutex);
+ assert(alloc);
+ if (!alloc->is_head) {
+ return LIB_ERR_RANGE_ALLOC_NOT_HEAD;
+ }
+ struct range_slot_allocator *head = alloc;
+ thread_mutex_lock(&head->mutex);
- struct cnode_meta *prev = NULL;
- struct cnode_meta *walk = alloc->meta;
+ struct cnode_meta *prev = NULL, *walk = NULL;
- /* Look for large enough space */
- while(walk != NULL) {
- if (walk->space >= nslots) {
+ /* Look for large enough space in whole chain */
+ while (alloc) {
+ walk = alloc->meta;
+ prev = NULL;
+ while(walk != NULL) {
+ if (walk->space >= nslots) {
+ break;
+ }
+ prev = walk;
+ walk = walk->next;
+ }
+
+ /* Space not found */
+ if (walk != NULL) {
break;
}
- prev = walk;
- walk = walk->next;
+
+ alloc = alloc->next;
}
- /* Space not found */
- if (walk == NULL) {
- thread_mutex_unlock(&alloc->mutex);
+ if (alloc == NULL) {
+ thread_mutex_unlock(&head->mutex);
return LIB_ERR_SLOT_ALLOC_NO_SPACE;
}
slab_free(&alloc->slab, walk);
}
- thread_mutex_unlock(&alloc->mutex);
+ thread_mutex_unlock(&head->mutex);
return SYS_ERR_OK;
}
errval_t range_slot_free(struct range_slot_allocator *alloc, struct capref cap,
cslot_t nslots)
{
+ if (!alloc->is_head) {
+ return LIB_ERR_RANGE_ALLOC_NOT_HEAD;
+ }
+
errval_t err;
- thread_mutex_lock(&alloc->mutex);
+ struct range_slot_allocator *head = alloc;
+ thread_mutex_lock(&head->mutex);
+
+ // find right allocator
+ while (!cnodecmp(cap.cnode, alloc->cnode)) {
+ alloc = alloc->next;
+ }
+ if (!alloc) {
+ thread_mutex_unlock(&head->mutex);
+ return LIB_ERR_SLOT_ALLOC_WRONG_CNODE;
+ }
+ // alloc now the right chain element
struct cnode_meta *prev = NULL;
struct cnode_meta *walk = alloc->meta;
while(walk != NULL) {
if ((cap.slot > walk->slot) && (walk->next == NULL)) {
err = insert_after(alloc, nslots, cap.slot, walk);
- thread_mutex_unlock(&alloc->mutex);
+ thread_mutex_unlock(&head->mutex);
return err;
}
if (cap.slot < walk->slot) {
err = insert_before(alloc, nslots, cap.slot, prev, walk);
- thread_mutex_unlock(&alloc->mutex);
+ thread_mutex_unlock(&head->mutex);
return err;
}
prev = walk;
assert(alloc->meta == NULL);
alloc->meta = slab_alloc(&alloc->slab);
if (alloc->meta == NULL) {
- thread_mutex_unlock(&alloc->mutex);
+ thread_mutex_unlock(&head->mutex);
return LIB_ERR_SLAB_ALLOC_FAIL;
}
alloc->meta->slot = cap.slot;
alloc->meta->space = nslots;
alloc->meta->next = NULL;
- thread_mutex_unlock(&alloc->mutex);
+ thread_mutex_unlock(&head->mutex);
return SYS_ERR_OK;
}
ret->meta->space = nslots;
ret->meta->next = NULL;
+ // setting is_head true here, internal code can reset by hand
+ ret->is_head = true;
+
+ return SYS_ERR_OK;
+}
+
+size_t range_slot_alloc_freecount(struct range_slot_allocator *alloc)
+{
+ size_t count = 0;
+ if (!alloc->is_head) {
+ return LIB_ERR_RANGE_ALLOC_NOT_HEAD;
+ }
+ struct range_slot_allocator *head = alloc;
+ thread_mutex_lock(&head->mutex);
+
+ struct range_slot_allocator *alloc_w = alloc;
+
+ while (alloc_w) {
+ struct cnode_meta *walk = alloc->meta;
+ while(walk != NULL) {
+ count += walk->space;
+ walk = walk->next;
+ }
+ alloc_w = alloc_w->next;
+ }
+
+ thread_mutex_unlock(&head->mutex);
+ return count;
+}
+
+errval_t range_slot_alloc_refill(struct range_slot_allocator *alloc, cslot_t slots)
+{
+ if (!alloc->is_head) {
+ return LIB_ERR_RANGE_ALLOC_NOT_HEAD;
+ }
+
+ struct range_slot_allocator *head = alloc;
+ thread_mutex_lock(&head->mutex);
+ // find last allocator in chain
+ while(alloc->next) {
+ alloc = alloc->next;
+ }
+ // allocate new instance
+ alloc->next = malloc(sizeof(struct range_slot_allocator));
+ assert(alloc->next);
+
+ // initialize new instance
+ struct range_slot_allocator *n = alloc->next;
+ n->next = NULL;
+ cslot_t retslots;
+ errval_t err = range_slot_alloc_init(n, slots, &retslots);
+ assert(err_is_ok(err));
+ assert(retslots > slots);
+
+ n->is_head = false;
+
+ thread_mutex_unlock(&head->mutex);
return SYS_ERR_OK;
}
if ((flags & VREGION_FLAGS_HUGE) &&
(vaddr & X86_64_HUGE_PAGE_MASK) == 0 &&
- fi.bits >= X86_64_HUGE_PAGE_BITS &&
+ fi.bytes >= X86_64_HUGE_PAGE_SIZE &&
((fi.base & X86_64_HUGE_PAGE_MASK) == 0))
{
// huge page branch (1GB)
flags &= ~VREGION_FLAGS_LARGE;
} else if ((flags & VREGION_FLAGS_LARGE) &&
(vaddr & X86_64_LARGE_PAGE_MASK) == 0 &&
- fi.bits >= X86_64_LARGE_PAGE_BITS &&
+ fi.bytes >= X86_64_LARGE_PAGE_SIZE &&
((fi.base & X86_64_LARGE_PAGE_MASK) == 0))
{
// large page branch (2MB)
size_t pte_count = DIVIDE_ROUND_UP(size, page_size);
genvaddr_t vend = vaddr + size;
- if (offset+size > (1ULL<<fi.bits)) {
+ if (offset+size > fi.bytes) {
debug_printf("do_map: offset=%zu; size=%zu; frame size=%zu\n",
- offset, size, ((size_t)1<<fi.bits));
+ offset, size, fi.bytes);
return LIB_ERR_PMAP_FRAME_SIZE;
}
debug_printf("do_map: 0x%"
PRIxGENVADDR"--0x%"PRIxGENVADDR" -> 0x%"PRIxGENPADDR
- "; pte_count = %zd; frame bits = %zd; page size = 0x%zx\n",
- vaddr, vend, paddr, pte_count, (size_t)fi.bits, page_size);
+ "; pte_count = %zd; frame bytes = 0x%zx; page size = 0x%zx\n",
+ vaddr, vend, paddr, pte_count, fi.bytes, page_size);
}
#endif
if ((flags & VREGION_FLAGS_LARGE) &&
(vaddr & X86_64_LARGE_PAGE_MASK) == 0 &&
(fi.base & X86_64_LARGE_PAGE_MASK) == 0 &&
- (1UL<<fi.bits) >= offset+size) {
+ fi.bytes >= offset+size) {
//case large pages (2MB)
size += LARGE_PAGE_OFFSET(offset);
size = ROUND_UP(size, LARGE_PAGE_SIZE);
} else if ((flags & VREGION_FLAGS_HUGE) &&
(vaddr & X86_64_HUGE_PAGE_MASK) == 0 &&
(fi.base & X86_64_HUGE_PAGE_MASK) == 0 &&
- (1UL<<fi.bits) >= offset+size) {
+ fi.bytes >= offset+size) {
// case huge pages (1GB)
size += HUGE_PAGE_OFFSET(offset);
size = ROUND_UP(size, HUGE_PAGE_SIZE);
uc->recvid = (uintptr_t)(frameid.base + outchanlen);
uc->sendid = (uintptr_t)frameid.base;
- size_t framesize = ((uintptr_t)1) << frameid.bits;
- if (framesize < inchanlen + outchanlen) {
+ if (frameid.bytes < inchanlen + outchanlen) {
return LIB_ERR_UMP_FRAME_OVERFLOW;
}
// map it in
void *buf;
- err = vspace_map_one_frame_attr(&buf, framesize, frame, UMP_MAP_ATTR,
+ err = vspace_map_one_frame_attr(&buf, frameid.bytes, frame, UMP_MAP_ATTR,
NULL, &uc->vregion);
if (err_is_fail(err)) {
cap_destroy(uc->frame);
if (err_is_fail(err)) {
return err;
}
- offset += (1UL<<fi.bits);
+ offset += fi.bytes;
err = cap_destroy(oldframe);
if (err_is_fail(err)) {
return err;
err = frame_alloc(&frame, BASE_PAGE_SIZE, NULL);
if (err_is_fail(err)) {
thread_mutex_unlock(&state->mutex);
+ DEBUG_ERR(err, "frame_alloc in vspace_pinned_alloc");
return err_push(err, LIB_ERR_FRAME_ALLOC);
}
err = state->memobj.m.f.fill((struct memobj*)&state->memobj,
BASE_PAGE_SIZE);
if (err_is_fail(err)) {
thread_mutex_unlock(&state->mutex);
+ DEBUG_ERR(err, "memobj_fill in vspace_pinned_alloc");
return err_push(err, LIB_ERR_MEMOBJ_FILL);
}
worker->msgbase = id.base;
worker->state = XOMP_WORKER_ST_SPAWNING;
- err = vspace_map_one_frame(&worker->msgbuf, (1UL << id.bits),
+ err = vspace_map_one_frame(&worker->msgbuf, id.bytes,
worker->msgframe, NULL, NULL);
if (err_is_fail(err)) {
XWR_DEBUG("Replicating frame: [%016lx]\n", id.base);
struct capref replicate;
- err = frame_alloc(&replicate, (1UL << id.bits), NULL);
+ err = frame_alloc(&replicate, id.bytes, NULL);
if (err_is_fail(err)) {
return err;
}
.args = {
.memcpy = {
.src = id.base,
- .bytes = (1UL << id.bits)
+ .bytes = id.bytes
}
}
};
err = invoke_frame_identify(frame, &id);
#else
struct capref replicate;
- err = frame_alloc(&replicate, (1UL << id.bits), NULL);
+ err = frame_alloc(&replicate, id.bytes, NULL);
if (err_is_fail(err)) {
USER_PANIC_ERR(err, "failed to allocate replicate frame\n");
return err;
}
- err = vspace_map_one_frame_fixed_attr((lvaddr_t) usrdata, (1UL << id.bits),
+ err = vspace_map_one_frame_fixed_attr((lvaddr_t) usrdata, id.bytes,
replicate, map_flags, NULL, NULL);
if (err_is_fail(err)) {
return err;
return err;
}
}
- err = vspace_map_one_frame_fixed_attr(addr, (1UL << id.bits), frame,
+ err = vspace_map_one_frame_fixed_attr(addr, id.bytes, frame,
map_flags, NULL, NULL);
} else {
- err = vspace_map_one_frame_attr((void **) &addr, (1UL << id.bits), frame,
+ err = vspace_map_one_frame_attr((void **) &addr, id.bytes, frame,
map_flags, NULL, NULL);
}
if (err_is_fail(err)) {
#if !XOMP_WORKER_ENABLE_DMA
if ((xomp_frame_type_t) type == XOMP_FRAME_TYPE_REPL_RW) {
- memcpy((void *)usrdata, (void *)addr, (1UL << id.bits));
+ memcpy((void *)usrdata, (void *)addr, id.bytes);
}
#endif
XWI_DEBUG("msg_open_cb: frame [%016lx] mapped @ [%016lx, %016lx]\n", id.base,
- addr, addr + (1UL << id.bits));
+ addr, addr + id.bytes);
if ((xomp_frame_type_t) type == XOMP_FRAME_TYPE_MSG) {
USER_PANIC("NYI: initializing messaging");
}
if (addr) {
- msg_st->err = vspace_map_one_frame_fixed_attr(addr, (1UL << id.bits),
+ msg_st->err = vspace_map_one_frame_fixed_attr(addr, id.bytes,
frame, map_flags, NULL, NULL);
} else {
void *map_addr;
- msg_st->err = vspace_map_one_frame_attr(&map_addr, (1UL << id.bits),
+ msg_st->err = vspace_map_one_frame_attr(&map_addr, id.bytes,
frame, map_flags, NULL, NULL);
}
cycles_t map_start = bench_tsc();
#endif
if (addr) {
- msg_st->err = vspace_map_one_frame_fixed_attr(addr, (1UL << id.bits),
+ msg_st->err = vspace_map_one_frame_fixed_attr(addr, id.bytes,
frame, map_flags, NULL, NULL);
} else {
void *map_addr;
- msg_st->err = vspace_map_one_frame_attr(&map_addr, (1UL << id.bits),
+ msg_st->err = vspace_map_one_frame_attr(&map_addr, id.bytes,
frame, map_flags, NULL, NULL);
}
#if XOMP_BENCH_WORKER_EN
}
}
- if ((1UL << id.bits) < XOMP_TLS_SIZE) {
+ if (id.bytes < XOMP_TLS_SIZE) {
return XOMP_ERR_INVALID_MSG_FRAME;
}
errval_t err;
struct bulk_e10k *bu = b->st;
- struct frame_identity fid = { .base = 0, .bits = 0 };
+ struct frame_identity fid = { .base = 0, .bytes = 0 };
void *virt, *rx, *tx, *txhwb;
uint8_t core;
struct e10k_queue_ops ops = {
// Map registers
invoke_frame_identify(registers, &fid);
- err = vspace_map_one_frame_attr(&virt, 1 << fid.bits, registers,
+ err = vspace_map_one_frame_attr(&virt, fid.bytes, registers,
VREGION_FLAGS_READ_WRITE_NOCACHE, NULL, NULL);
assert(err_is_ok(err));
while (buf_size >>= 1) {
++size_bits;
}
+ // XXX: trying to understand this; is size_bits == log2(pool->buffer_size)?
+ // -SG, 2016-04-20
+ assert(1UL << size_bits == pool->buffer_size);
//split pool cap into smaller caps for each buffer
- err = cap_retype(buf_cap, pool->pool_cap, ObjType_Frame, size_bits);
+ err = cap_retype(buf_cap, pool->pool_cap, 0, ObjType_Frame, pool->buffer_size, 1);
assert(err_is_ok(err));//TODO: handle error instead
/* set the capref for each buffer into the new cnode and set
}
DMAMEM_DEBUG("registering DMA memory range [0x%016lx, 0x%016lx]\n",
- frame_id.base, frame_id.base + (1UL << frame_id.bits));
+ frame_id.base, frame_id.base + frame_id.bytes);
struct dma_mem_node *entry = calloc(1, sizeof(*entry));
if (entry == NULL) {
entry->cap = cap;
entry->paddr = frame_id.base;
- entry->size = (1UL << frame_id.bits);
+ entry->size = frame_id.bytes;
if (mem_mgr->convert) {
entry->paddr = mem_mgr->convert(mem_mgr->convert_arg, frame_id.base,
}
DMAMEM_DEBUG("deregister DMA memory range [0x%016lx, 0x%016lx]\n",
- frame_id.base, frame_id.base + (1UL << frame_id.bits));
+ frame_id.base, frame_id.base + frame_id.bytes);
lpaddr_t addr = frame_id.base;
if (mem_mgr->convert) {
addr = mem_mgr->convert(mem_mgr->convert_arg, frame_id.base,
- (1UL << frame_id.bits));
+ frame_id.bytes);
DMAMEM_DEBUG("converted base address [0x%016lx] -> [0x%016lx]\n",
frame_id.base, addr);
}
dma_dev->id = device_id++;
dma_dev->mmio.paddr = mmio_id.base;
- dma_dev->mmio.bytes = (1UL << mmio_id.bits);
+ dma_dev->mmio.bytes = mmio_id.bytes;
dma_dev->mmio.frame = mmio;
ioat_device->pci_addr = *pci_addr;
IOATDEV_DEBUG("init device with mmio range: {paddr=0x%016lx, size=%u kB}\n",
- dma_dev->id, mmio_id.base, 1 << mmio_id.bits);
+ dma_dev->id, mmio_id.base, mmio_id.bytes / 1024);
err = vspace_map_one_frame_attr((void**) &dma_dev->mmio.vaddr,
dma_dev->mmio.bytes, dma_dev->mmio.frame,
// XXX: should be address+size <= ...
// Need to add proper register size
if (address_base >= fid.base &&
- (address_base + size) <= (fid.base + UNBITS_GENPA(fid.bits))) {
+ (address_base + size) <= (fid.base + fid.bytes)) {
void* frame_base;
err = vspace_map_one_frame_attr(&frame_base, size,
device_cap_iter, VREGION_FLAGS_READ_WRITE_NOCACHE,
#define UNBITS_GENPA(bits) (((genpaddr_t)1) << (bits))
#define FLAGBITS ((uint8_t)-1)
+/// calculate largest power-of-two region that fits into region of size n
+/// starting at base_addr.
+static inline int bitaddralign(size_t n, lpaddr_t base_addr)
+{
+ int exponent = sizeof(size_t) * NBBY - 1;
+
+ if(n == 0) {
+ return 0;
+ }
+
+ while ((exponent > 0) && ((base_addr % (1UL << exponent)) != 0)){
+ exponent--;
+ }
+ return((1UL << exponent) > n ? log2floor(n) : exponent);
+}
+
/// Allocate a new node of given type/size. Does NOT initialise children pointers.
static struct mmnode *new_node(struct mm *mm, enum nodetype type,
uint8_t childbits)
return err_push(err, MM_ERR_CHUNK_SLOT_ALLOC);
}
- err = cap_retype(cap, node->cap, mm->objtype, *nodesizebits - childbits);
+ // retype node into 2^(maxchildbits) smaller nodes
+ DEBUG("retype: current size: %zu, child size: %zu, count: %u\n",
+ 1UL << *nodesizebits, 1UL << (*nodesizebits - childbits), UNBITS_CA(childbits));
+ err = cap_retype(cap, node->cap, 0, mm->objtype,
+ 1UL << (*nodesizebits - childbits),
+ UNBITS_CA(childbits));
if (err_is_fail(err)) {
// This is only a failure if the node was free. Otherwise,
// the caller could've deleted the cap already.
/* init fields */
assert(mm != NULL);
mm->objtype = objtype;
- assert((base & (UNBITS_GENPA(sizebits) - 1)) == 0);
+ // We do not care about alignment anymore?!
+ //assert((base & (UNBITS_GENPA(sizebits) - 1)) == 0);
mm->base = base;
mm->sizebits = sizebits;
assert(maxchildbits > 0 && maxchildbits != FLAGBITS);
}
/* check that base is properly aligned to size */
- assert((base & (UNBITS_GENPA(sizebits) - 1)) == 0);
+ // We do not care about alignment anymore?!
+ //assert((base & (UNBITS_GENPA(sizebits) - 1)) == 0);
/* construct root node if we need one */
if (mm->root == NULL) {
}
/**
+ * \brief Add a new region to the memory manager. The region does not need to
+ * be power-of-two sized or aligned.
+ *
+ * It is an error if any part of the region has already been added, or the
+ * region doesn't fit within the base and size specified for the allocator.
+ *
+ * \param mm Memory manager instance
+ * \param cap Capability to newly-added region
+ * \param size Size of region
+ * \param base Physical base address of region
+ */
+errval_t mm_add_multi(struct mm *mm, struct capref cap, gensize_t size, genpaddr_t base)
+{
+ DEBUG("%s: mm=%p, base=%#"PRIxGENPADDR", bytes=%zu\n", __FUNCTION__, mm, base, size);
+ gensize_t offset = 0;
+ errval_t err;
+ size_t rcount = 0;
+ // if we got aligned block; skip retype
+ if (1UL << bitaddralign(size, base) == size) {
+ DEBUG("%s: aligned region: adding original cap\n", __FUNCTION__);
+ return mm_add(mm, cap, log2ceil(size), base);
+ }
+
+ while (size > 0) {
+ uint8_t blockbits = bitaddralign(size, base);
+ gensize_t blockbytes = 1UL << blockbits;
+
+ /* get dest slot for retype */
+ struct capref temp;
+ err = mm->slot_alloc(mm->slot_alloc_inst, 1, &temp);
+ if (err_is_fail(err)) {
+ DEBUG_ERR(err, "Allocating slot");
+ return err_push(err, MM_ERR_SLOT_NOSLOTS);
+ }
+
+ err = cap_retype(temp, cap, offset, mm->objtype, 1UL << blockbits, 1);
+ if (err_is_fail(err)) {
+ DEBUG_ERR(err, "Retyping region");
+ return err_push(err, MM_ERR_MM_ADD_MULTI);
+ }
+
+ err = mm_add(mm, temp, blockbits, base);
+ if (err_is_fail(err)) {
+ DEBUG_ERR(err, "Adding region to allocator");
+ return err_push(err, MM_ERR_MM_ADD_MULTI);
+ }
+ DEBUG("Added block %#"PRIxGENPADDR"--%#"PRIxGENPADDR", %u bits\n",
+ base, base+blockbytes, blockbits);
+
+ // advance block pointers
+ base += blockbytes;
+ offset += blockbytes;
+ size -= blockbytes;
+ rcount ++;
+ }
+
+ DEBUG("%s: done. cap was split into %zu blocks\n", __FUNCTION__, rcount);
+
+ return SYS_ERR_OK;
+}
+
+/**
* \brief Allocate an arbitrary memory region of a given size
*
* \param mm Memory manager instance
genpaddr_t maxlimit, struct capref *retcap,
genpaddr_t *retbase)
{
+ if (sizebits < BASE_PAGE_BITS) {
+ debug_printf("%s called with sizebits=%u from %p\n",
+ __FUNCTION__, sizebits, __builtin_return_address(0));
+ }
/* check bounds */
if(minbase + UNBITS_GENPA(sizebits) > maxlimit) {
printf("mm_alloc_range: mb %"PRIxGENPADDR" sizebits %x , <= max %"PRIxGENPADDR" \n",
}
/* check that base is properly aligned to size */
- assert((base & (UNBITS_GENPA(sizebits) - 1)) == 0);
+ // We do not care about alignment anymore?!
+ //assert((base & (UNBITS_GENPA(sizebits) - 1)) == 0);
if (mm->root == NULL) {
return MM_ERR_NOT_FOUND; // nothing added
ETHERSRV_DEBUG("register_netd_memory: attempt to register memory\n");
// 2 is rx + tx
- if ((1L << pa.bits) < BASE_PAGE_SIZE * 2) {
+ if (pa.bytes < BASE_PAGE_SIZE * 2) {
ETHERSRV_DEBUG("netd did not provided enough for filter transfer\n");
err = FILTER_ERR_NOT_ENOUGH_MEMORY; /* ps: FIXME: enable this error */
{
struct capref frame;
errval_t r;
- struct frame_identity frameid = { .base = 0, .bits = 0 };
+ struct frame_identity frameid = { .base = 0, .bytes = 0 };
size_t capacity = rx_get_free_slots_fn_ptr();
size_t size;
size_t i;
abort();
}
buffer->pa = pa.base;
- buffer->bits = pa.bits;
+ buffer->bytes = pa.bytes;
- err = vspace_map_one_frame(&buffer->va, (1L << buffer->bits), cap,
+ err = vspace_map_one_frame(&buffer->va, buffer->bytes, cap,
NULL, NULL);
/*
uint64_t offset = bsm->offset;
--buffer->rxq.buffer_state_used;
- assert(offset < (1L << buffer->bits));
+ assert(offset < buffer->bytes);
void *dst = (void *) (uintptr_t) buffer->va + offset;
ETHERSRV_DEBUG("Copy packet pos %p %p %p\n", buffer->va, dst,
- (buffer->va + (1L << buffer->bits)));
+ (buffer->va + buffer->bytes));
uint64_t ts = rdtsc();
if (type == 0) { // Frame cap BAR
bar->frame_cap[nc] = cap;
if (nc == 0) {
- struct frame_identity id = { .base = 0, .bits = 0 };
+ struct frame_identity id = { .base = 0, .bytes = 0 };
invoke_frame_identify(cap, &id);
bar->paddr = id.base;
- bar->bits = id.bits;
- bar->bytes = (1ul << id.bits) * ncaps;
+ bar->bits = log2ceil(id.bytes);
+ bar->bytes = id.bytes * ncaps;
}
} else { // IO BAR
bar->io_cap = cap;
}
if(shmaddr != NULL) {
- err = vspace_map_one_frame_fixed_attr((lvaddr_t)shmaddr, 1 << id.bits,
+ err = vspace_map_one_frame_fixed_attr((lvaddr_t)shmaddr, id.bytes,
s->frame, attr, NULL, NULL);
m->mem = (void *)shmaddr;
} else {
- err = vspace_map_one_frame_attr(&m->mem, 1 << id.bits, s->frame,
+ err = vspace_map_one_frame_attr(&m->mem, id.bytes, s->frame,
attr, NULL, NULL);
}
return NULL;
}
spp->pa = f.base;
- spp->mem_size = (1 << f.bits);
+ spp->mem_size = f.bytes;
spp->alloted_slots = slot_no;
spp->is_creator = true;
spp->role = role;
return err;
}
spp->pa = f.base;
- spp->mem_size = (1 << f.bits);
+ spp->mem_size = f.bytes;
size_t mem_size = calculate_shared_pool_size(slot_no);
assert(spp->mem_size >= mem_size);
- err = vspace_map_one_frame_attr(&spp->va, (1L << f.bits), cap,
+ err = vspace_map_one_frame_attr(&spp->va, f.bytes, cap,
VREGION_FLAGS_READ_WRITE_NOCACHE, NULL, NULL);
if (err_is_fail(err)) {
err = invoke_frame_identify(frame, &id);
assert(err_is_ok(err));
- err = memobj->f.fill(memobj, offset, frame, 1UL << id.bits);
+ err = memobj->f.fill(memobj, offset, frame, id.bytes);
if (err_is_fail(err)) {
return err_push(err, LIB_ERR_MEMOBJ_FILL);
}
}
frame.slot ++;
- size -= (1UL << id.bits);
- offset += (1UL << id.bits);
+ size -= id.bytes;
+ offset += id.bytes;
}
return SYS_ERR_OK;
.cnode = si->taskcn,
.slot = TASKCN_SLOT_SELFEP,
};
- err = cap_retype(selfep, si->dcb, ObjType_EndPoint, 0);
+ // XXX: could redo retyping of EPs now, and actually give offset and stuff
+ err = cap_retype(selfep, si->dcb, 0, ObjType_EndPoint, 0, 1);
if (err_is_fail(err)) {
return err_push(err, SPAWN_ERR_CREATE_SELFEP);
}
return err;
}
- assert((1UL<<id.bits) > VIRTIO_MMIO_DEVICE_SIZE);
+ assert(id.bytes > VIRTIO_MMIO_DEVICE_SIZE);
VIRTIO_DEBUG_DEV("Using frame [0x%016lx, 0x%lx] as device frame.\n",
id.base,
- (1UL << id.bits));
+ id.bytes);
if (setup->backend.args.mmio.dev_base == NULL) {
VIRTIO_DEBUG_DEV("mapping device frame.\n");
VIRTIO_DEBUG_DEV("ERROR: could not identify the device frame.\n");
return err;
}
- size_t dev_size = (1UL << id.bits);
+ size_t dev_size = id.bytes;
void *dev_base;
err = vspace_map_one_frame_attr(&dev_base, dev_size, dev_cap,
VIRTIO_VREGION_FLAGS_DEVICE,
return err;
}
- assert((1UL<<id.bits) >= (offset + (bufsize * bufcount)));
+ assert(id.bytes >= (offset + (bufsize * bufcount)));
struct virtio_buffer_allocator *vbuf_alloc;
vring_mem_size += setup->vring_ndesc * (1UL << setup->header_bits);
}
- if (vring_mem_size > (1UL << id.bits)) {
+ if (vring_mem_size > id.bytes) {
VIRTIO_DEBUG_VQ("ERROR: supplied cap was too small %lx, needed %lx\n",
- ((1UL << id.bits)),
+ (id.bytes),
(uint64_t )vring_mem_size);
return VIRTIO_ERR_CAP_SIZE;
}
}
/* check if we have enough space in the given cap */
- if ((1UL << id.bits) < size) {
+ if (id.bytes < size) {
return SYS_ERR_INVALID_SIZE_BITS;
}
void *addr;
- err = vspace_map_one_frame(&addr, (1UL << id.bits), cap, NULL, NULL);
+ err = vspace_map_one_frame(&addr, id.bytes, cap, NULL, NULL);
if (err_is_fail(err)) {
return err_push(err, LIB_ERR_VSPACE_MAP);
}
void *vring_base;
err = vspace_map_one_frame_attr(&vring_base,
- (1UL << id.bits),
+ id.bytes,
vring_cap,
VIRTIO_VREGION_FLAGS_RING,
NULL,
mi->cap_offset =offset;
mi->guest_paddr = virtio_host_translate_host_addr(id.base) + offset;
mi->vaddr = (lvaddr_t)(vring_base) + offset;
- mi->size = (1UL<<id.bits);
+ mi->size = id.bytes;
virtio_vq_host_add_mem_range(vqh, mi);
}
return SYS_ERR_OK;
size_t vring_mem_size = vring_size(setup->vring_ndesc, setup->vring_align);
vring_mem_size = ROUND_UP(vring_mem_size, BASE_PAGE_SIZE);
- if (vring_mem_size > (1UL << id.bits)) {
+ if (vring_mem_size > id.bytes) {
VIRTIO_DEBUG_VQ("ERROR: supplied cap was too small %lx, needed %lx\n",
- ((1UL << id.bits)),
+ (id.bytes),
(uint64_t )vring_mem_size);
return VIRTIO_ERR_CAP_SIZE;
}
}
/* check if we have enough space in the given cap */
- if ((1UL << id.bits) < size) {
+ if (id.bytes < size) {
return SYS_ERR_INVALID_SIZE_BITS;
}
void *addr;
- err = vspace_map_one_frame(&addr, (1UL << id.bits), cap, NULL, NULL);
+ err = vspace_map_one_frame(&addr, id.bytes, cap, NULL, NULL);
if (err_is_fail(err)) {
return err_push(err, LIB_ERR_VSPACE_MAP);
}
"nkmtest_map_unmap",
"nkmtest_modify_flags",
"schedtest",
+ "test_retype",
"testerror",
"yield_test" ] ]
--- /dev/null
+import re
+
+RANGE_START=0x3ffe0000
+RANGE_END=0x40000000
+
+kaddr_cap_regex = re.compile(r"^(0x[0-9a-f]+){(.*)}$")
+cap_regex = \
+re.compile(r"^left=(?P<leftval>0x[0-9a-f]+),right=(?P<rightval>0x[0-9a-f]+),end=(?P<end>0x[0-9a-f]+),end_root=(?P<end_root>\d+),level=(?P<level>\d+),address=(?P<address>0x[0-9a-f]+),size=(?P<size>0x[0-9a-f]+),type=(?P<type>\d+),remote_rels=(?P<rcopies>\d)(?P<rancs>\d)(?P<rdescs>\d),extra=(?P<extra>.*)$")
+typemap = {
+ 45:"ObjType_IPI",
+ 44:"ObjType_KernelControlBlock",
+ 43:"ObjType_PerfMon",
+ 42:"ObjType_ID",
+ 41:"ObjType_Notify_IPI",
+ 40:"ObjType_Notify_RCK",
+ 39:"ObjType_IO",
+ 38:"ObjType_IRQSrc",
+ 37:"ObjType_IRQDest",
+ 36:"ObjType_IRQTable",
+ 35:"ObjType_VNode_AARCH64_l3_Mapping",
+ 34:"ObjType_VNode_AARCH64_l3",
+ 33:"ObjType_VNode_AARCH64_l2_Mapping",
+ 32:"ObjType_VNode_AARCH64_l2",
+ 31:"ObjType_VNode_AARCH64_l1_Mapping",
+ 30:"ObjType_VNode_AARCH64_l1",
+ 29:"ObjType_VNode_ARM_l2_Mapping",
+ 28:"ObjType_VNode_ARM_l2",
+ 27:"ObjType_VNode_ARM_l1_Mapping",
+ 26:"ObjType_VNode_ARM_l1",
+ 25:"ObjType_VNode_x86_32_ptable_Mapping",
+ 24:"ObjType_VNode_x86_32_ptable",
+ 23:"ObjType_VNode_x86_32_pdir_Mapping",
+ 22:"ObjType_VNode_x86_32_pdir",
+ 21:"ObjType_VNode_x86_32_pdpt_Mapping",
+ 20:"ObjType_VNode_x86_32_pdpt",
+ 19:"ObjType_VNode_x86_64_ptable_Mapping",
+ 18:"ObjType_VNode_x86_64_ptable",
+ 17:"ObjType_VNode_x86_64_pdir_Mapping",
+ 16:"ObjType_VNode_x86_64_pdir",
+ 15:"ObjType_VNode_x86_64_pdpt_Mapping",
+ 14:"ObjType_VNode_x86_64_pdpt",
+ 13:"ObjType_VNode_x86_64_pml4_Mapping",
+ 12:"ObjType_VNode_x86_64_pml4",
+ 11:"ObjType_Kernel",
+ 10:"ObjType_DevFrame_Mapping",
+ 9 :"ObjType_DevFrame",
+ 8 :"ObjType_Frame_Mapping",
+ 7 :"ObjType_Frame",
+ 6 :"ObjType_EndPoint",
+ 5 :"ObjType_Dispatcher",
+ 4 :"ObjType_FCNode",
+ 3 :"ObjType_CNode",
+ 2 :"ObjType_RAM",
+ 1 :"ObjType_PhysAddr",
+ 0 :"ObjType_Null",
+ }
+
+class Capability(object):
+ """Representation of a MDB node"""
+ def __init__(self, capstring):
+ capmatch = cap_regex.match(capstring)
+ for key, value in capmatch.groupdict().items():
+ val = value
+ if key != "extra":
+ val = int(value, 0)
+ setattr(self, key, val)
+
+ self.parent = None
+ self.leftcap = None
+ self.rightcap = None
+ self.nodeid = -1
+
+ def __str__(self):
+ return "{address=0x%x, size=0x%x, type=%s, left=0x%x, right=0x%x}" % \
+ (self.address, self.size, typemap[self.type], self.leftval, self.rightval)
+
+ def set_parent(self, parentcap):
+ self.parent = parentcap
+
+ def set_left(self, leftcap):
+ self.leftcap = leftcap
+
+ def set_right(self, rightcap):
+ self.rightcap = rightcap
+
+ def set_nodeid(self, nodeid):
+ self.nodeid = nodeid
+
+def parse_file(fname):
+ # nodes is map of kernel addr to cap
+ nodes = {}
+ with open(fname, 'r') as f:
+ for l in f:
+ l = l.strip()
+ match = kaddr_cap_regex.match(l)
+ kaddr, cap = match.groups()
+ nodes[int(kaddr, 0)] = Capability(cap)
+ return nodes
+
+def build_tree(nodedict):
+ for kaddr,cap in nodedict.items():
+ left = cap.leftval
+ right = cap.rightval
+ if left != 0:
+ leftcap = nodedict[left]
+ leftcap.set_parent(cap)
+ else:
+ leftcap = None
+ if right != 0:
+ rightcap = nodedict[right]
+ rightcap.set_parent(cap)
+ else:
+ rightcap = None
+
+ cap.set_left(leftcap)
+ cap.set_right(rightcap)
+
+ root = None
+ for kaddr,cap in nodedict.items():
+ if cap.parent is None:
+ root = cap
+
+ return root
+
+def write_tree(root, outfh):
+ mynodeid = root.nodeid
+ if root.leftcap is not None:
+ leftid = root.leftcap.nodeid
+ outfh.write(" n%d -- n%d\n" % (mynodeid, leftid))
+ write_tree(root.leftcap, outfh)
+ if root.rightcap is not None:
+ rightid= root.rightcap.nodeid
+ outfh.write(" n%d -- n%d\n" % (mynodeid, rightid))
+ write_tree(root.rightcap, outfh)
+
+def write_dot_file(nodedict, treeroot, outfname):
+ nodeid = 0
+ with open(outfname, "w") as f:
+ f.write("graph mdb {\n")
+ # generate nodes
+ f.write(" // list of all nodes\n")
+ for kaddr,cap in nodedict.items():
+ color = "black"
+ cstart = cap.address
+ cend = cap.address + cap.size
+ if cstart >= RANGE_START and cend <= RANGE_END:
+ # cap completely in target range
+ print "cap inside target range"
+ color = "red"
+ elif cend >= RANGE_START and cend <= RANGE_END:
+ # cap ends in target range
+ print "cap ends inside target range"
+ color = "blue"
+ elif cstart >= RANGE_START and cstart <= RANGE_END:
+ # cap starts in target range
+ print "cap starts inside target range"
+ color = "green"
+ f.write(" n%d [label=\"0x%x--0x%x [%d]\",color=\"%s\"];\n" % \
+ (nodeid, cap.address,cap.address + cap.size, cap.type, color))
+ cap.set_nodeid(nodeid)
+ nodeid += 1
+
+ f.write(" // Tree\n")
+ write_tree(treeroot, f)
+ f.write("}\n")
+
+
+
+if __name__ == "__main__":
+ import sys
+ if len(sys.argv) < 2:
+ print "usage %s mdb_dump.txt [output.dot]" % sys.argv[0]
+ sys.exit(1)
+
+ nodes = parse_file(sys.argv[1])
+ treeroot = build_tree(nodes)
+
+ outf = "output.dot"
+ if len(sys.argv) >= 3:
+ outf = sys.argv[2]
+ write_dot_file(nodes, treeroot, outf)
+
+ sys.exit(0)
self.del_module(name)
self.add_module(name, args)
- def get_menu_data(self, path):
+ def get_menu_data(self, path, root="(nd)"):
assert(self.kernel[0])
r = "timeout 0\n"
r += "title Harness image\n"
- r += "root (nd)\n"
+ r += "root %s\n" % root
if self.hypervisor:
r += "hypervisor %s\n" % os.path.join(path, self.hypervisor)
r += "kernel %s %s\n" % (
def _get_hake_conf(self, *args):
conf = super(HakeDebugBuild, self)._get_hake_conf(*args)
- conf["cOptFlags"] = "[\"-O2\"]"
+ conf["cOptFlags"] = "[\"-O2\", \"-g\"]"
+ return conf
+
+class HakeDebugGem5Build(HakeDebugBuild):
+ """Default Hake build: debug symbols, optimisations, assertions"""
+ name = 'debug_gem5'
+
+ def _get_hake_conf(self, *args):
+ conf = super(HakeDebugGem5Build, self)._get_hake_conf(*args)
+ conf["armv7_platform"] = '"gem5"'
return conf
class HakeDebugTraceBuild(HakeBuildBase):
all_builds = [HakeReleaseBuild, HakeDebugBuild, HakeReleaseTraceBuild,
HakeReleaseMdbInvariantsBuild,
- HakeDebugTraceBuild, HakeReleaseGem5Build]
+ HakeDebugTraceBuild, HakeReleaseGem5Build, HakeDebugGem5Build]
def mk_libc_builds():
def newlib_conf(self, *args):
import debug, machines
from machines import Machine
+QEMU_SCRIPT_PATH = 'tools/qemu-wrapper.sh' # relative to source tree
GRUB_IMAGE_PATH = 'tools/grub-qemu.img' # relative to source tree
QEMU_CMD_X64 = 'qemu-system-x86_64'
QEMU_CMD_X32 = 'qemu-system-i386'
class QEMUMachineX64(QEMUMachineBase):
def _get_cmdline(self):
- grub_image = os.path.join(self.options.sourcedir, GRUB_IMAGE_PATH)
- s = '-smp %d -fda %s -tftp %s' % (self.get_ncores(), grub_image,
- self.get_tftp_dir())
- return [QEMU_CMD_X64] + QEMU_ARGS_GENERIC + QEMU_ARGS_X64 + s.split()
+ qemu_wrapper = os.path.join(self.options.sourcedir, QEMU_SCRIPT_PATH)
+ menu_lst = os.path.join(self.get_tftp_dir(), 'menu.lst')
+ return [ qemu_wrapper, "--menu", menu_lst, "--arch", "x86_64",
+ "--smp", "%s" % self.get_ncores() ]
+
+ def set_bootmodules(self, modules):
+ path = os.path.join(self.get_tftp_dir(), 'menu.lst')
+ self._write_menu_lst(modules.get_menu_data('/', self.get_tftp_dir()), path)
def get_bootarch(self):
return "x86_64"
--- /dev/null
+
+import tests
+import re
+from common import TestCommon
+from results import PassFailResult
+
+@tests.add_test
+class RetypeTest(TestCommon):
+ '''test new retype code'''
+ name = "retype"
+
+ def get_modules(self, build, machine):
+ modules = super(RetypeTest, self).get_modules(build, machine)
+ modules.add_module("test_retype")
+ return modules
+
+ def get_finish_string(self):
+ return "retype: result:"
+
+ def process_data(self, testdir, rawiter):
+ # the test passed iff the last line is the finish string
+ passed = False
+ for line in rawiter:
+ if line.startswith(self.get_finish_string()):
+ _,_,results=line.split(':')
+ results = results.strip()
+ passed = results == "0"
+ return PassFailResult(passed)
+
+@tests.add_test
+class RetypeMultiTest(TestCommon):
+ '''test new retype code'''
+ name = "retype_multi"
+
+ def setup(self,build,machine,testdir):
+ super(RetypeMultiTest, self).setup(build,machine,testdir)
+ self._ncores = machine.get_ncores()
+ self._nseen = 0
+
+ def get_modules(self, build, machine):
+ modules = super(RetypeMultiTest, self).get_modules(build, machine)
+ for core in machine.get_coreids():
+ modules.add_module("test_retype", ["core=%d" % core])
+ return modules
+
+ def is_finished(self, line):
+ if line.startswith("retype: result:"):
+ self._nseen += 1
+ return self._nseen == self._ncores
+
+ def process_data(self, testdir, rawiter):
+ # the test passed iff the last line is the finish string
+ nspawned = 0
+ npassed = 0
+ for line in rawiter:
+ if re.match(r'.*pawning .*test_retype on core', line):
+ nspawned += 1
+ if line.startswith("retype: result:"):
+ _,_,r=line.split(':')
+ r = r.strip()
+ if r == "0":
+ npassed += 1
+ return PassFailResult(npassed == nspawned)
#
##########################################################################
-HDFILE=hg.img
+HDFILE=hd.img
MENUFILE=""
ARCH=""
DEBUG_SCRIPT=""
+SMP=2
usage () {
echo "Usage: $0 --menu <file> --arch <arch> [options]"
echo " --kernel <file> (kernel binary, if no menu.lst given)"
echo " --initrd <file> (initial RAM disk, if no menu.lst given)"
echo " --args <args> (kernel command-line args, if no menu.lst given)"
+ echo " --smp <cores> (number of cores to use, defaults to $SMP)"
exit 1
}
"--args")
shift; KERNEL_CMDS="$1"
;;
+ "--smp")
+ shift; SMP="$1"
+ ;;
*)
echo "Unknown option $1 (try: --help)" >&2
exit 1
fi
else
echo "Using menu file $MENUFILE"
+ ROOT=`sed -rne 's,^root[ \t]*([^ ]*).*,\1,p' "$MENUFILE"`
+ if [ "$ROOT" != "(nd)" ]; then
+ echo "Root: $ROOT"
+ fi
KERNEL=`sed -rne 's,^kernel[ \t]*/([^ ]*).*,\1,p' "$MENUFILE"`
+ if [ "$ROOT" != "(nd)" ]; then
+ KERNEL="$ROOT/$KERNEL"
+ fi
if [ -z "$KERNEL" ]; then
echo "ERROR: No initial kernel specified in menu.lst file." >&2; exit 1
fi
KERNEL_CMDS=`sed -rne 's,^kernel[ \t]*[^ ]*[ \t]*(.*),\1,p' "$MENUFILE"`
- INITRD=`sed -rne 's,^module(nounzip)?[ \t]*/(.*),\2,p' "$MENUFILE" | awk '{ if(NR == 1) printf($$0); else printf("," $$0) }'`
+ if [ "$ROOT" != "(nd)" ]; then
+ AWKSCRIPT='{ if (NR == 1) printf(root "/" $$0); else printf("," root "/" $$0) }'
+ AWKARGS="-v root=$ROOT"
+ else
+ AWKSCRIPT='{ if (NR == 1) printf($$0); else printf("," $$0) }'
+ fi
+ INITRD=`sed -rne 's,^module(nounzip)?[ \t]*/(.*),\2,p' "$MENUFILE" | awk $AWKARGS "$AWKSCRIPT"`
if [ -z "$INITRD" ]; then
echo "ERROR: No initial ram disk modules specified in menu.lst file." >&2; exit 1
fi
case "$ARCH" in
"x86_64")
QEMU_CMD="qemu-system-x86_64 \
- -smp 2 \
+ -smp $SMP \
-m 1024 \
-net nic,model=e1000 \
-net user \
vregion_destroy(origbios_vregion);
vregion_destroy(newbios_vregion);
- // TODO: Implement mm_free()
+ err = mm_free(&pci_mm_physaddr, bioscap, 0, BIOS_BITS);
+ assert(err_is_ok(err));
return err;
}
else {
skb_add_fact("memory_region(16'%" PRIxGENPADDR ",%u,%zu,%u,%tu).",
mrp->mr_base,
- mrp->mr_bits,
- ((size_t)1) << mrp->mr_bits,
+ 0,
+ mrp->mr_bytes,
mrp->mr_type,
mrp->mrmod_data);
}
mrp->mr_type == RegionType_PlatformData) {
ACPI_DEBUG("Region %d: %"PRIxGENPADDR" - %"PRIxGENPADDR" %s\n",
i, mrp->mr_base,
- mrp->mr_base + (((size_t)1)<<mrp->mr_bits),
+ mrp->mr_base + mrp->mr_bytes,
mrp->mr_type == RegionType_PhyAddr ?
"physical address" : "platform data");
- err = cap_retype(devframe, phys_cap, ObjType_DevFrame, mrp->mr_bits);
+ err = cap_retype(devframe, phys_cap, 0, ObjType_DevFrame, mrp->mr_bytes, 1);
if (err_no(err) == SYS_ERR_REVOKE_FIRST) {
printf("cannot retype region %d: need to revoke first; ignoring it\n", i);
} else {
assert(err_is_ok(err));
- err = mm_add(&pci_mm_physaddr, devframe,
- mrp->mr_bits, mrp->mr_base);
+ err = mm_add_multi(&pci_mm_physaddr, devframe, mrp->mr_bytes,
+ mrp->mr_base);
if (err_is_fail(err)) {
USER_PANIC_ERR(err, "adding region %d FAILED\n", i);
}
assert(err_is_ok(err));
err = invoke_frame_identify(pe_frame, &pe_frame_id);
assert(err_is_ok(err));
- err = vspace_map_one_frame_attr(&pe_vaddr, 1 << pe_frame_id.bits, pe_frame,
+ err = vspace_map_one_frame_attr(&pe_vaddr, pe_frame_id.bytes, pe_frame,
vtd_map_attr, NULL, NULL);
assert(err_is_ok(err));
assert((pe_frame_id.base & BASE_PAGE_MASK) == 0 &&
err = invoke_frame_identify(cap, &id);
if (err_is_fail(err)) return err;
- return vspace_map_one_frame(retbuf, 1UL << id.bits, cap, NULL, NULL);
+ return vspace_map_one_frame(retbuf, id.bytes, cap, NULL, NULL);
}
struct cte *cte = &ctes[i];
struct capability *curr = &cte->cap;
assert(curr->type == ObjType_RAM);
- printf("%s/%zu:dump:%zu: 0x%08"PRIxGENPADDR"/%"PRIu8" %c%c%c\n",
- name, num_caps, run, curr->u.ram.base, curr->u.ram.bits,
+ printf("%s/%zu:dump:%zu: 0x%08"PRIxGENPADDR"/0x%"PRIxGENSIZE" %c%c%c\n",
+ name, num_caps, run, curr->u.ram.base, curr->u.ram.bytes,
(HASCOP(cte) ? 'c' : '.'), (HASANC(cte) ? 'a' : '.'),
(HASDESC(cte) ? 'd' : '.'));
}
for (int i = 0; i < num_caps; i++) {
struct RAM ram = {
.base = i,
- .bits = 0,
+ .bytes = 0,
};
struct capability cap = {
.type = ObjType_RAM,
int bits = rand() % 16;
struct RAM ram = {
.base = ((uint32_t)rand())<<bits,
- .bits = bits,
+ .bytes = 1UL << bits,
};
struct capability cap = {
.type = ObjType_RAM,
struct RAM ram = {
.base = capbase & ((1<<size_bits)-1),
- .bits = capbits,
+ .bytes = 1UL << capbits,
};
struct capability cap = {
.type = ObjType_RAM,
struct RAM ram = {
.base = capbase & ((1<<size_bits)-1),
- .bits = capbits,
+ .bytes = 1UL << capbits,
};
struct capability cap = {
.type = ObjType_RAM,
assert(err_is_ok(err));
err = slot_alloc(&pagetable);
assert(err_is_ok(err));
- err = cap_retype(pagetable, ram, ObjType_VNode_x86_64_ptable, pt_bits);
+ err = cap_retype(pagetable, ram, 0, ObjType_VNode_x86_64_ptable, 1UL << pt_bits, 1);
assert(err_is_ok(err));
err = frame_alloc(&frame, FRAME_SIZE, &ret_bytes);
assert(err_is_ok(err));
// __FILE__, __FUNCTION__, __LINE__, i);
start = bench_tsc();
//printf("%s:%s:%d: \n", __FILE__, __FUNCTION__, __LINE__);
- err = cap_retype(frame, ram, ObjType_Frame, ram_bits);
+ err = cap_retype(frame, ram, 0, ObjType_Frame, 1UL << ram_bits, 1);
if (err_is_fail(err)) {
USER_PANIC_ERR(err, "cap_retype failed.");
}
#include <string.h>
#include <bench/bench.h>
-#define PARENT_BITS 19
-#define CHILD_BITS 12
-#define CAPS_PER_CORE (1 << (PARENT_BITS - CHILD_BITS))
+#define PARENT_BYTES (1UL << 19)
+#define CHILD_BYTES 4096
+#define CAPS_PER_CORE (PARENT_BYTES / CHILD_BYTES)
/* --- Globals ---*/
// allocate a bunch of ramcaps
for (int i=0; i<CAPS_PER_CORE; i++) {
- err = ram_alloc(&my_caps[i], CHILD_BITS);
+ err = ram_alloc(&my_caps[i], log2ceil(CHILD_BYTES));
if (err_is_fail(err)) {
DEBUG_ERR(err, "xcorecap: RAM alloc failed\n");
abort();
cycles_t time_taken = 0;
for (int i=0; i<CAPS_PER_CORE; i++) {
cycles_t start = bench_tsc();
- err = cap_retype(retyped_caps[i], my_caps[i], ObjType_Frame, CHILD_BITS);
+ err = cap_retype(retyped_caps[i], my_caps[i], 0, ObjType_Frame, CHILD_BYTES, 1);
if (i >= 20 && i <= (CAPS_PER_CORE - 20)) { // avoid warmup / cooldown
time_taken += (bench_tsc() - start);
}
local_frame_sz = alloced_size;
debug_printf("alloc_local | Frame base: %016lx, size=%lx\n", id.base,
- 1UL << id.bits);
+ id.bytes);
err = vspace_map_one_frame(&local_buf, alloced_size, local_frame, NULL, NULL);
EXPECT_SUCCESS(err, "vspace_map_one_frame");
EXPECT_SUCCESS(err, "frame identify");
debug_printf("msg_open_cb | Frame base: %016lx, size=%lx\n", id.base,
- 1UL << id.bits);
+ id.bytes);
- assert((1UL << id.bits) >= XPHI_BENCH_MSG_FRAME_SIZE);
+ assert(id.bytes >= XPHI_BENCH_MSG_FRAME_SIZE);
err = vspace_map_one_frame(&remote_buf, XPHI_BENCH_MSG_FRAME_SIZE, msgframe,
NULL, NULL);
remote_frame = msgframe;
remote_base = id.base;
- remote_frame_sz = (1UL << id.bits);
+ remote_frame_sz = id.bytes;
init_buffer();
core_data->module_start = cpu_blob.paddr;
core_data->module_end = cpu_blob.paddr + cpu_blob.size;
core_data->urpc_frame_base = urpc_frame_id.base;
- core_data->urpc_frame_bits = urpc_frame_id.bits;
+ assert((1UL << log2ceil(urpc_frame_id.bytes)) == urpc_frame_id.bytes);
+ core_data->urpc_frame_bits = log2ceil(urpc_frame_id.bytes);
core_data->monitor_binary = monitor_blob.paddr;
core_data->monitor_binary_size = monitor_blob.size;
core_data->memory_base_start = spawn_mem_frameid.base;
- core_data->memory_bits = spawn_mem_frameid.bits;
+ assert((1UL << log2ceil(spawn_mem_frameid.bytes)) == spawn_mem_frameid.bytes);
+ core_data->memory_bits = log2ceil(spawn_mem_frameid.bytes);
core_data->src_core_id = disp_get_core_id();
core_data->src_arch_id = my_arch_id;
core_data->dst_core_id = coreid;
return err;
}
- err = cap_retype(*the_kcb, kcb_mem,
+ err = cap_retype(*the_kcb, kcb_mem, 0,
ObjType_KernelControlBlock,
- OBJBITS_KCB);
+ 1UL << OBJBITS_KCB, 1);
if (err_is_fail(err)) {
DEBUG_ERR(err, "Failure in cap_retype.");
}
// compute size of frame needed and allocate it
DEBUG("%s:%s:%d: urpc_frame_id.base=%"PRIxGENPADDR"\n",
__FILE__, __FUNCTION__, __LINE__, urpc_frame_id.base);
- DEBUG("%s:%s:%d: urpc_frame_id.size=%d\n",
- __FILE__, __FUNCTION__, __LINE__, urpc_frame_id.bits);
+ DEBUG("%s:%s:%d: urpc_frame_id.size=0x%zx\n",
+ __FILE__, __FUNCTION__, __LINE__, urpc_frame_id.bytes);
if (benchmark_flag) {
start = bench_tsc();
core_data->module_start = cpu_binary_phys;
core_data->module_end = cpu_binary_phys + cpu_binary_size;
core_data->urpc_frame_base = urpc_frame_id.base;
- core_data->urpc_frame_bits = urpc_frame_id.bits;
+ assert((1UL << log2ceil(urpc_frame_id.bytes)) == urpc_frame_id.bytes);
+ core_data->urpc_frame_bits = log2ceil(urpc_frame_id.bytes);
core_data->monitor_binary = monitor_binary_phys;
core_data->monitor_binary_size = monitor_binary_size;
core_data->memory_base_start = spawn_memory_identity.base;
- core_data->memory_bits = spawn_memory_identity.bits;
+ assert((1UL << log2ceil(spawn_memory_identity.bytes)) == spawn_memory_identity.bytes);
+ core_data->memory_bits = log2ceil(spawn_memory_identity.bytes);
core_data->src_core_id = disp_get_core_id();
core_data->src_arch_id = my_arch_id;
core_data->dst_core_id = coreid;
int transmit_buffers, uint8_t *mac_addr,
bool user_mac_addr, bool use_interrupt)
{
- struct frame_identity frameid = { .base = 0, .bits = 0 };
+ struct frame_identity frameid = { .base = 0, .bytes = 0 };
struct capref frame;
errval_t err;
static void queue_hw_init(uint8_t n)
{
errval_t r;
- struct frame_identity frameid = { .base = 0, .bits = 0 };
+ struct frame_identity frameid = { .base = 0, .bytes = 0 };
uint64_t tx_phys, txhwb_phys, rx_phys;
size_t tx_size, rx_size;
bool enable_global = !rxtx_enabled;
r = invoke_frame_identify(queues[n].tx_frame, &frameid);
assert(err_is_ok(r));
tx_phys = frameid.base;
- tx_size = 1 << frameid.bits;
+ tx_size = frameid.bytes;
r = invoke_frame_identify(queues[n].rx_frame, &frameid);
assert(err_is_ok(r));
rx_phys = frameid.base;
- rx_size = 1 << frameid.bits;
+ rx_size = frameid.bytes;
DEBUG("tx.phys=%"PRIx64" tx.size=%"PRIu64"\n", tx_phys, tx_size);
DEBUG("rx.phys=%"PRIx64" rx.size=%"PRIu64"\n", rx_phys, rx_size);
void qd_queue_init_data(struct e10k_binding *b, struct capref registers,
uint64_t macaddr)
{
- struct frame_identity frameid = { .base = 0, .bits = 0 };
+ struct frame_identity frameid = { .base = 0, .bytes = 0 };
errval_t err;
void *virt;
// Map device registers
invoke_frame_identify(registers, &frameid);
- err = vspace_map_one_frame_attr(&virt, 1 << frameid.bits, registers,
+ err = vspace_map_one_frame_attr(&virt, frameid.bytes, registers,
VREGION_FLAGS_READ_WRITE_NOCACHE, NULL, NULL);
assert(err_is_ok(err));
if (err_is_fail(err)) return err_push(err, OMAP_SDMA_ERR_CAP_LOOKUP);
lpaddr_t frame_start = id.base;
- int32_t frame_size = (1 << id.bits);
+ int32_t frame_size = id.bytes;
// image size cannot exceed hardware limits
if (count->x_count > OMAP44XX_SDMA_MAX_EN ||
if (err_is_fail(err)) return err_push(err, OMAP_SDMA_ERR_CAP_LOOKUP);
// infer element/frame number for smaller frame
- init_count_1d(MIN(dst_id.bits, dst_id.bits), &count);
+ init_count_1d(MIN(log2ceil(dst_id.bytes), log2ceil(dst_id.bytes)), &count);
// configure and initiate transfer
struct omap_sdma_channel_conf conf;
// get frame size and infer element/frame number
err = invoke_frame_identify(dst_cap, &dst_id);
if (err_is_fail(err)) return err_push(err, OMAP_SDMA_ERR_CAP_LOOKUP);
- init_count_1d(dst_id.bits, &count);
+ init_count_1d(log2ceil(dst_id.bytes), &count);
// configure and initiate transfer
struct omap_sdma_channel_conf conf;
USER_PANIC_ERR(err, "identifying the frame failed\n");
}
- size_t dev_size = (1UL<<id.bits);
+ size_t dev_size = id.bytes;
void *dev_regs;
err = vspace_map_one_frame_attr(&dev_regs, dev_size, dev_frame, VIRTIO_VREGION_FLAGS_DEVICE, NULL, NULL);
mi->is_client = is_client;
- frame_size = (1UL << id.bits);
+ frame_size = id.bytes;
#ifdef __k1om__
/*
#else
struct xnode *node = &phi->topology[xphi];
lpaddr_t offset = ((node->apt_base >> 32) - ((node->apt_base >> 34)<<2))<<32;
- err = interphi_bootstrap(phi, id.base, id.bits, offset, xphi, mi->is_client);
+ assert((1UL << log2ceil(id.bytes)) == id.bytes);
+ err = interphi_bootstrap(phi, id.base, log2ceil(id.bytes), offset, xphi, mi->is_client);
if (err_is_fail(err)) {
free(mi);
return err;
return err;
}
- frame_size = (1UL << id.bits);
+ frame_size = id.bytes;
void *addr;
err = vspace_map_one_frame(&addr, frame_size, mi->frame, NULL, NULL);
struct xeon_phi_boot_params *bp;
bp = (struct xeon_phi_boot_params *) (phi->apt.vbase + phi->os_offset);
bp->msg_base = id.base;
- bp->msg_size_bits = id.bits;
+ assert((1UL << log2ceil(id.bytes)) == id.bytes);
+ bp->msg_size_bits = log2ceil(id.bytes);
}
return SYS_ERR_OK;
svc_st->args.spawn_call.cmdlen = cmdlen;
svc_st->args.spawn_call.core = core;
svc_st->args.spawn_call.flags = flags;
- svc_st->args.spawn_call.cap_size_bits = id.bits;
+ assert((1UL << log2ceil(id.bytes)) == id.bytes);
+ svc_st->args.spawn_call.cap_size_bits = log2ceil(id.bytes);
svc_st->args.spawn_call.cap_base = id.base;
txq_send(msg_st);
struct interphi_msg_st *svc_st = (struct interphi_msg_st *) msg_st;
svc_st->args.open.msgbase = id.base;
- svc_st->args.open.msgbits = id.bits;
+ assert((1UL << log2ceil(id.bytes)) == id.bytes);
+ svc_st->args.open.msgbits = log2ceil(id.bytes);
svc_st->args.open.source = source;
svc_st->args.open.usrdata = usrdata;
svc_st->args.open.type = type;
return err;
}
- err = vspace_map_one_frame(&mmio, (1UL << id.bits), mmio_cap, NULL, NULL);
+ err = vspace_map_one_frame(&mmio, id.bytes, mmio_cap, NULL, NULL);
if (err_is_fail(err)) {
return err;
}
XDEBUG("mapped mmio register space @ [%p]\n", mmio);
- phi->mmio.bits = id.bits;
phi->mmio.vbase = (lvaddr_t) mmio;
phi->mmio.cap = mmio_cap;
phi->mmio.pbase = id.base;
- phi->mmio.length = (1UL << id.bits);
+ phi->mmio.length = id.bytes;
return SYS_ERR_OK;
}
st->b = node->binding;
st->base = id.base;
- st->bits = id.bits;
+ assert((1UL << log2ceil(id.bytes)) == id.bytes);
+ st->bits = log2ceil(id.bytes);
bootstrap_call_tx(st);
* Important: the type has to be DevFrame, we do not want to zero out the
* host memory!
*/
- err = mm_init(&sysmem_manager, ObjType_DevFrame, ret.base, ret.bits,
+ assert((1UL << log2ceil(ret.bytes)) == ret.bytes);
+ err = mm_init(&sysmem_manager, ObjType_DevFrame, ret.base, log2ceil(ret.bytes),
NUM_CHILDREN, slab_default_refill, slot_alloc_dynamic,
&sysmem_allocator, false);
if (err_is_fail(err)) {
return err_push(err, MM_ERR_MM_INIT);
}
- XSYSMEM_DEBUG("Adding cap: [0x%016lx, %i]\n", ret.base, ret.bits);
- err = mm_add(&sysmem_manager, sysmem_cap, ret.bits, ret.base);
+ XSYSMEM_DEBUG("Adding cap: [0x%016lx, %i]\n", ret.base, log2ceil(ret.bytes));
+ err = mm_add(&sysmem_manager, sysmem_cap, log2ceil(ret.bytes), ret.base);
if (err_is_fail(err)) {
return err;
}
return err;
}
- return mm_free(&sysmem_manager, frame, id.base, id.bits);
+ assert((1UL << log2ceil(id.bytes)) == id.bytes);
+ return mm_free(&sysmem_manager, frame, id.base, log2ceil(id.bytes));
}
/**
if (err_is_fail(err)) {
USER_PANIC_ERR(err, "failed to identify the aperture cap");
}
- card->apt.bits = id.bits;
+ card->apt.length = id.bytes;
card->apt.pbase = id.base;
card->apt.bytes = bar_info[XEON_PHI_APT_BAR].bytes;
if (err_is_fail(err)) {
USER_PANIC_ERR(err, "failed to identify the aperture cap");
}
- card->mmio.bits = id.bits;
+ card->mmio.length = id.bytes;
card->mmio.pbase = id.base;
card->mmio.bytes = bar_info[XEON_PHI_MMIO_BAR].bytes;
lpaddr_t pbase; ///< physical address of the mbar
size_t length; ///< length of the mapped area
struct capref cap; ///< capability of the mbar
- uint8_t bits; ///< size of the capability in bits
size_t bytes; ///< size of the region in bytes
};
assert(slots >= cap_count);
// retype RAM into Frames
struct capref first_frame = (struct capref) { .cnode = cnode, .slot = 0 };
- err = cap_retype(first_frame, frame, ObjType_Frame, log2floor(granularity));
+ err = cap_retype(first_frame, frame, 0, ObjType_Frame, granularity, cap_count);
assert(err_is_ok(err));
err = cap_destroy(frame);
assert(err_is_ok(err));
assert(err_is_ok(err));
assert(err_is_ok(ret));
- struct frame_identity fbid = { .base = 0, .bits = 0 };
+ struct frame_identity fbid = { .base = 0, .bytes = 0 };
err = invoke_frame_identify(fbcap, &fbid);
assert(err == 0);
char *vidmem;
- err = vspace_map_one_frame((void**)&vidmem, 1ul << fbid.bits, fbcap,
+ err = vspace_map_one_frame((void**)&vidmem, fbid.bytes, fbcap,
NULL, NULL);
assert(err_is_ok(err));
disp_set_core_id(my_core_id);
/* Create our endpoint to self */
- err = cap_retype(cap_selfep, cap_dispatcher, ObjType_EndPoint, 0);
+ err = cap_retype(cap_selfep, cap_dispatcher, 0, ObjType_EndPoint, 0, 1);
if (err_is_fail(err)) {
DEBUG_ERR(err, "Failed to create our endpoint to self");
abort();
/* parameters for local memory allocator used until we spawn mem_serv */
#define MM_REQUIREDBITS 24 ///< Required size of memory to boot (16MB)
+#define MM_REQUIREDBYTES (1UL << MM_REQUIREDBITS)
#define MM_MAXSIZEBITS (MM_REQUIREDBITS + 3) ///< Max size of memory in allocator
#define MM_MINSIZEBITS BASE_PAGE_BITS ///< Min size of allocation
#define MM_MAXCHILDBITS 1 ///< Max branching factor of BTree nodes
{
errval_t err;
+ /* init slot allocator */
+ static struct slot_alloc_basecn init_slot_alloc;
+ err = slot_alloc_basecn_init(&init_slot_alloc);
+ if (err_is_fail(err)) {
+ return err_push(err, MM_ERR_SLOT_ALLOC_INIT);
+ }
+
/* walk bootinfo looking for suitable RAM cap to use
* we pick the first cap equal to MM_REQUIREDBITS,
* or else the next closest less than MM_MAXSIZEBITS */
- int mem_region = -1, mem_slot = 0;
+ int mem_slot = 0;
struct capref mem_cap = {
.cnode = cnode_super,
.slot = 0,
};
+ /* get destination slot for retype */
+ genpaddr_t region_base = 0;
+ struct capref region_for_init;
+ err = slot_alloc_basecn(&init_slot_alloc, 1, ®ion_for_init);
+ if (err_is_fail(err)) {
+ return err_push(err, MM_ERR_SLOT_NOSLOTS);
+ }
+
assert(bi != NULL);
for (int i = 0; i < bi->regions_length; i++) {
assert(!bi->regions[i].mr_consumed);
if (bi->regions[i].mr_type == RegionType_Empty) {
- if (bi->regions[i].mr_bits >= MM_REQUIREDBITS &&
- bi->regions[i].mr_bits <= MM_MAXSIZEBITS &&
- (mem_region == -1 || bi->regions[i].mr_bits < bi->regions[mem_region].mr_bits)) {
- mem_region = i;
+ if (bi->regions[i].mr_bytes >= MM_REQUIREDBYTES) {
mem_cap.slot = mem_slot;
- if (bi->regions[i].mr_bits == MM_REQUIREDBITS) {
+ if (bi->regions[i].mr_bytes == MM_REQUIREDBYTES) {
+ bi->regions[i].mr_consumed = true;
break;
}
+
+ /* found cap bigger than required; cut off end */
+ bi->regions[i].mr_bytes -= MM_REQUIREDBYTES;
+ // can use mr_bytes as offset here
+ err = cap_retype(region_for_init, mem_cap,
+ bi->regions[i].mr_bytes, ObjType_RAM,
+ MM_REQUIREDBYTES, 1);
+ if (err_is_fail(err)) {
+ return err_push(err, MM_ERR_CHUNK_NODE);
+ }
+ mem_cap = region_for_init;
+ region_base = bi->regions[i].mr_base + bi->regions[i].mr_bytes;
+ break;
}
mem_slot++;
}
}
- if (mem_region < 0) {
- printf("Error: no RAM capability found in the size range "
- "2^%d to 2^%d bytes\n", MM_REQUIREDBITS, MM_MAXSIZEBITS);
- return INIT_ERR_NO_MATCHING_RAM_CAP;
- }
- bi->regions[mem_region].mr_consumed = true;
- /* init slot allocator */
- static struct slot_alloc_basecn init_slot_alloc;
- err = slot_alloc_basecn_init(&init_slot_alloc);
- if (err_is_fail(err)) {
- return err_push(err, MM_ERR_SLOT_ALLOC_INIT);
+ if (region_base == 0) {
+ printf("Error: no RAM capability >= %zu MB found", MM_REQUIREDBYTES / 1024 / 1024);
}
/* init MM allocator */
- assert(bi->regions[mem_region].mr_type != RegionType_Module);
- err = mm_init(&mymm, ObjType_RAM, bi->regions[mem_region].mr_base,
- bi->regions[mem_region].mr_bits, MM_MAXCHILDBITS, NULL,
+ err = mm_init(&mymm, ObjType_RAM, region_base,
+ MM_REQUIREDBITS, MM_MAXCHILDBITS, NULL,
slot_alloc_basecn, &init_slot_alloc, true);
if (err_is_fail(err)) {
return err_push(err, MM_ERR_MM_INIT);
slab_grow(&mymm.slabs, nodebuf, sizeof(nodebuf));
/* add single RAM cap to allocator */
- err = mm_add(&mymm, mem_cap, bi->regions[mem_region].mr_bits,
- bi->regions[mem_region].mr_base);
+ /* XXX: can't use mm_add_multi here, as the allocator tends to choke when
+ * we add smaller regions before larger */
+ debug_printf("using %#"PRIxGENPADDR", %zu MB for init's allocator\n",
+ region_base, MM_REQUIREDBYTES / 1024 / 1024);
+ err = mm_add(&mymm, mem_cap, MM_REQUIREDBITS, region_base);
if (err_is_fail(err)) {
return err_push(err, MM_ERR_MM_ADD);
}
#include "kaluga.h"
-#define UNBITS_GENPA(bits) (((genpaddr_t)1) << (bits))
-
static struct mm register_manager;
/**
struct allocated_range* iter = allocation_head;
while (iter != NULL) {
if (address >= iter->id.base &&
- (address + size <= (iter->id.base + UNBITS_GENPA(iter->id.bits)))) {
+ (address + size <= (iter->id.base + iter->id.bytes))) {
KALUGA_DEBUG("Apparently, yes. We try to map that one.\n");
*devframe = iter->cr;
return SYS_ERR_OK;
struct frame_identity ret;
err = invoke_frame_identify(requested_cap, &ret);
assert (err_is_ok(err));
+ assert((1ULL << log2ceil(ret.bytes)) == ret.bytes);
- err = mm_init(®ister_manager, ObjType_DevFrame, ret.base, ret.bits,
+ err = mm_init(®ister_manager, ObjType_DevFrame, ret.base, log2ceil(ret.bytes),
1, slab_default_refill, slot_alloc_dynamic,
&devframes_allocator, false);
if (err_is_fail(err)) {
struct capref *cap = malloc(sizeof(struct capref));
errval_t err, ret;
+ // TODO: XXX: do this properly and inform caller, -SG 2016-04-20
+ //if (bits < BASE_PAGE_BITS) {
+ // bits = BASE_PAGE_BITS;
+ //}
+ if (bits < BASE_PAGE_BITS) {
+ debug_printf("WARNING: ALLOCATING RAM CAP WITH %u BITS\n", bits);
+ }
+
trace_event(TRACE_SUBSYS_MEMSERV, TRACE_EVENT_MEMSERV_ALLOC, bits);
/* refill slot allocator if needed */
if (bi->regions[i].mr_type == RegionType_Empty) {
dump_ram_region(i, bi->regions + i);
- mem_total += ((size_t)1) << bi->regions[i].mr_bits;
+ mem_total += bi->regions[i].mr_bytes;
if (bi->regions[i].mr_consumed) {
// region consumed by init, skipped
continue;
}
- err = mm_add(&mm_ram, mem_cap, bi->regions[i].mr_bits,
- bi->regions[i].mr_base);
+ err = mm_add_multi(&mm_ram, mem_cap, bi->regions[i].mr_bytes,
+ bi->regions[i].mr_base);
if (err_is_ok(err)) {
- mem_avail += ((size_t)1) << bi->regions[i].mr_bits;
+ mem_avail += bi->regions[i].mr_bytes;
} else {
- DEBUG_ERR(err, "Warning: adding RAM region %d (%p/%d) FAILED",
- i, bi->regions[i].mr_base, bi->regions[i].mr_bits);
+ DEBUG_ERR(err, "Warning: adding RAM region %d (%p/%zu) FAILED",
+ i, bi->regions[i].mr_base, bi->regions[i].mr_bytes);
}
/* try to refill slot allocator (may fail if the mem allocator is empty) */
err = slot_prealloc_refill(mm_ram.slot_alloc_inst);
if (err_is_fail(err) && err_no(err) != MM_ERR_SLOT_MM_ALLOC) {
DEBUG_ERR(err, "in slot_prealloc_refill() while initialising"
- " memory allocator");
+ " memory allocator");
abort();
}
/* refill slab allocator if needed and possible */
if (slab_freecount(&mm_ram.slabs) <= MINSPARENODES
- && mem_avail > (1UL << (CNODE_BITS + OBJBITS_CTE)) * 2
- + 10 * BASE_PAGE_SIZE) {
+ && mem_avail > (1UL << (CNODE_BITS + OBJBITS_CTE)) * 2
+ + 10 * BASE_PAGE_SIZE) {
slab_default_refill(&mm_ram.slabs); // may fail
}
+
mem_cap.slot++;
}
}
#endif
return do_free(&mm_percore, ramcap, info.u.ram.base,
- info.u.ram.bits, &mem_avail);
+ log2ceil(info.u.ram.bytes), &mem_avail);
}
errval_t percore_free_handler_common(struct capref ramcap, genpaddr_t base,
info.type, info.u.ram.base, info.u.ram.base,
info.u.ram.bits);
#endif
- assert(bits == info.u.ram.bits);
+ assert(bits == log2ceil(info.u.ram.bytes));
mem_to_add = (memsize_t)1 << bits;
uint8_t new_bits = log2floor(mem_requested);
bits = MIN(bits, new_bits);
} else {
- DEBUG_ERR(err, "Warning: adding RAM region (%p/%d) FAILED",
- info.u.ram.base, info.u.ram.bits);
+ DEBUG_ERR(err, "Warning: adding RAM region (%p/0x%"PRIxGENSIZE") FAILED",
+ info.u.ram.base, info.u.ram.bytes);
}
}
printf("Cap is type %d Ram base 0x%"PRIxGENPADDR" Bits %d\n",
info.type, info.u.ram.base, info.u.ram.bits);
#endif
- assert(SMALLCAP_BITS == info.u.ram.bits);
+ assert(SMALLCAP_BITS == log2ceil(info.u.ram.bytes));
*mem_tot += (memsize_t)1<<SMALLCAP_BITS;
info.type, info.u.ram.base, info.u.ram.base,
info.u.ram.bits);
#endif
- if(steal_bits != info.u.ram.bits) {
+ if(steal_bits != log2ceil(info.u.ram.bytes)) {
printf("asked for %d bits, but got %d bits of type %d\n",
- steal_bits, info.u.ram.bits, info.type);
+ steal_bits, log2ceil(info.u.ram.bytes), info.type);
}
- assert(steal_bits == info.u.ram.bits);
+ assert(steal_bits == log2ceil(info.u.ram.bytes));
- memsize_t mem_to_add = (memsize_t)1 << info.u.ram.bits;
+ memsize_t mem_to_add = (memsize_t)info.u.ram.bytes;
- err = mm_free(&mm_percore, ramcap, info.u.ram.base, info.u.ram.bits);
+ err = mm_free(&mm_percore, ramcap, info.u.ram.base, log2ceil(info.u.ram.bytes));
if (err_is_fail(err)) {
if (err_no(err) == MM_ERR_NOT_FOUND) {
// memory wasn't there initially, add it
- err = mm_add(&mm_percore, ramcap, info.u.ram.bits, info.u.ram.base);
+ err = mm_add(&mm_percore, ramcap, log2ceil(info.u.ram.bytes), info.u.ram.base);
if (err_is_fail(err)) {
return err_push(err, MM_ERR_MM_ADD);
}
return err;
}
- size_t framesize = ((uintptr_t)1) << frameid.bits;
+ size_t framesize = frameid.bytes;
if (framesize < 2 * MON_URPC_CHANNEL_LEN) {
return LIB_ERR_UMP_FRAME_OVERFLOW;
}
/* } */
// Identify UMP frame for tracing
- struct frame_identity umpid = { .base = 0, .bits = 0 };
+ struct frame_identity umpid = { .base = 0, .bytes = 0 };
err = invoke_frame_identify(frame, &umpid);
assert(err_is_ok(err));
ump_binding->ump_state.chan.recvid = (uintptr_t)umpid.base;
return err;
}
- size_t framesize = ((uintptr_t)1) << frameid.bits;
+ size_t framesize = frameid.bytes;
if (framesize < 2 * MON_URPC_CHANNEL_LEN) {
return LIB_ERR_UMP_FRAME_OVERFLOW;
}
/* } */
// Identify UMP frame for tracing
- struct frame_identity umpid = { .base = 0, .bits = 0 };
+ struct frame_identity umpid = { .base = 0, .bytes = 0 };
err = invoke_frame_identify(frame, &umpid);
assert(err_is_ok(err));
ump_binding->ump_state.chan.recvid = (uintptr_t)umpid.base;
return err;
}
- size_t framesize = ((uintptr_t)1) << frameid.bits;
+ size_t framesize = frameid.bytes;
if (framesize < 2 * MON_URPC_CHANNEL_LEN) {
return LIB_ERR_UMP_FRAME_OVERFLOW;
}
}
// Identify UMP frame for tracing
- struct frame_identity umpid = { .base = 0, .bits = 0 };
+ struct frame_identity umpid = { .base = 0, .bytes = 0 };
err = invoke_frame_identify(frame, &umpid);
assert(err_is_ok(err));
ump_binding->ump_state.chan.recvid = (uintptr_t)umpid.base;
if (!b) {
DEBUG_CAPOPS("%s: forwarding to monitor.0\n", __FUNCTION__);
// we're not on core 0, so forward free_monitor msg to monitor.0
- err = mon_ram_free(&cap_data, ram.base, ram.bits);
+ err = mon_ram_free(&cap_data, ram.base, log2ceil(ram.bytes));
assert(err_is_ok(err));
} else {
DEBUG_CAPOPS("%s: we are monitor.0\n", __FUNCTION__);
// XXX: This should not be an RPC! It could stall the monitor, but
// we trust mem_serv for the moment.
- err = b->vtbl.free_monitor(b, cap, ram.base, ram.bits, &result);
+ err = b->vtbl.free_monitor(b, cap, ram.base, log2ceil(ram.bytes), &result);
assert(err_is_ok(err));
assert(err_is_ok(result));
}
errval_t status, uint8_t relations,
genvaddr_t st);
void retype_request__rx(struct intermon_binding *b, intermon_caprep_t srcrep,
- uint32_t desttype, uint32_t destbits, genvaddr_t st);
+ uint64_t offset, uint32_t desttype, uint64_t destsize,
+ uint64_t count, genvaddr_t st);
void retype_response__rx(struct intermon_binding *b, errval_t status,
genvaddr_t st);
void revoke_mark__rx(struct intermon_binding *b,
struct retype_check_st {
enum objtype type;
- size_t objbits;
+ size_t objsize;
+ size_t count;
+ size_t offset;
struct domcapref src;
struct result_closure cont;
};
}
void
-retype_request__rx(struct intermon_binding *b, intermon_caprep_t srcrep,
- uint32_t desttype, uint32_t destbits, genvaddr_t st)
+retype_request__rx(struct intermon_binding *b, intermon_caprep_t srcrep, uint64_t offset,
+ uint32_t desttype, uint64_t destsize, uint64_t count, genvaddr_t st)
{
errval_t err;
req_st->queue_elem.cont = retype_result__send;
req_st->check.type = desttype;
- req_st->check.objbits = destbits;
+ req_st->check.objsize = destsize;
+ req_st->check.count = count;
+ req_st->check.offset = offset;
req_st->check.cont = MKRESCONT(retype_request_check__rx, req_st);
req_st->from = ((struct intermon_state*)b->st)->core_id;
req_st->request_st = st;
errval_t err;
err = intermon_capops_request_retype__tx(b, NOP_CONT, req_st->caprep,
+ req_st->check.offset,
req_st->check.type,
- req_st->check.objbits,
+ req_st->check.objsize,
+ req_st->check.count,
(lvaddr_t)req_st);
struct domcapref *src = &check->src;
struct domcapref *destcn = &output->destcn;
assert(capcmp(src->croot, destcn->croot));
- err = monitor_create_caps(src->croot, check->type, check->objbits,
- src->cptr, src->bits, destcn->cptr,
- destcn->bits, output->start_slot);
+ err = monitor_create_caps(src->croot, check->type, check->objsize,
+ check->count, src->cptr, src->bits,
+ check->offset, destcn->cptr, destcn->bits,
+ output->start_slot);
}
struct result_closure cont = output->cont;
assert(cont.handler);
*/
void
-capops_retype(enum objtype type, size_t objbits, struct capref croot,
+capops_retype(enum objtype type, size_t objsize, size_t count, struct capref croot,
capaddr_t dest_cn, uint8_t dest_bits, cslot_t dest_slot,
- capaddr_t src, uint8_t src_bits,
+ capaddr_t src, uint8_t src_bits, gensize_t offset,
retype_result_handler_t result_handler, void *st)
{
errval_t err;
goto err_cont;
}
- err = invoke_cnode_retype(croot, src, type, objbits, dest_cn, dest_slot,
- dest_bits);
+ err = invoke_cnode_retype(croot, src, offset, type, objsize, count,
+ dest_cn, dest_slot, dest_bits);
if (err_no(err) != SYS_ERR_RETRY_THROUGH_MONITOR) {
goto err_cont;
}
// fill in parameters
rtp_req_st->check.type = type;
- rtp_req_st->check.objbits = objbits;
+ rtp_req_st->check.objsize = objsize;
+ rtp_req_st->check.count = count;
+ rtp_req_st->check.offset = offset;
rtp_req_st->check.src = (struct domcapref){
.croot = croot,
.cptr = src,
// fill in parameters
rtp_loc_st->check.type = type;
- rtp_loc_st->check.objbits = objbits;
+ rtp_loc_st->check.objsize = objsize;
+ rtp_loc_st->check.count = count;
+ rtp_loc_st->check.offset = offset;
rtp_loc_st->check.src = (struct domcapref){
.croot = croot,
.cptr = src,