failure MAP_BOOTINFO "Failure mapping bootinfo to new domain",
failure FIND_MODULE "Didn't find module to be spawned",
failure MAP_MODULE "Failed mapping in module",
+ failure UNMAP_MODULE "Failed unmapping module",
failure CREATE_SEGCN "Failed to create segment CNode",
failure CREATE_SMALLCN "Failed to create small RAM caps CNode",
#endif
invoke_vnode_map(struct capref ptable, capaddr_t slot, capaddr_t from,
int frombits, uintptr_t flags, uintptr_t offset,
- uintptr_t pte_count)
+ uintptr_t pte_count, capaddr_t mcn_addr, int mcn_vbits,
+ cslot_t mapping_slot)
{
uint8_t invoke_bits = get_cap_valid_bits(ptable);
capaddr_t invoke_cptr = get_cap_addr(ptable) >> (CPTR_BITS - invoke_bits);
assert(frombits <= 0xff);
// XXX: needs check of flags, offset, and pte_count sizes
- return syscall7((invoke_bits << 16) | (VNodeCmd_Map << 8) | SYSCALL_INVOKE,
- invoke_cptr, from, (slot << 16) | frombits,
- flags, offset, pte_count).error;
+ return syscall10((invoke_bits << 16) | (VNodeCmd_Map << 8) | SYSCALL_INVOKE,
+ invoke_cptr, from, (slot << 16) | frombits,
+ flags, offset, pte_count, mcn_addr, mcn_vbits,
+ mapping_slot).error;
}
//XXX: workaround for inline bug of arm-gcc 4.6.1 and lower
#else
static inline errval_t
#endif
-invoke_vnode_unmap(struct capref cap, capaddr_t mapping_cptr, int mapping_bits,
- size_t entry, size_t pte_count)
+invoke_vnode_unmap(struct capref cap, capaddr_t mapping_cptr, int mapping_bits)
{
uint8_t invoke_bits = get_cap_valid_bits(cap);
capaddr_t invoke_cptr = get_cap_addr(cap) >> (CPTR_BITS - invoke_bits);
- pte_count -= 1;
-
- assert(entry < 4096);
- assert(pte_count < 4096);
assert(mapping_bits <= 0xff);
return syscall4((invoke_bits << 16) | (VNodeCmd_Unmap << 8) | SYSCALL_INVOKE,
- invoke_cptr, mapping_cptr,
- ((mapping_bits & 0xff)<<24) | ((pte_count & 0xfff)<<12) |
- (entry & 0xfff)).error;
+ invoke_cptr, mapping_cptr, mapping_bits).error;
}
/**
#else
static inline errval_t
#endif
-invoke_frame_modify_flags (struct capref frame, uintptr_t offset,
+invoke_mapping_modify_flags (struct capref mapping, uintptr_t offset,
uintptr_t pages, uintptr_t flags)
{
- uint8_t invoke_bits = get_cap_valid_bits(frame);
- capaddr_t invoke_cptr = get_cap_addr(frame) >> (CPTR_BITS - invoke_bits);
+ uint8_t invoke_bits = get_cap_valid_bits(mapping);
+ capaddr_t invoke_cptr = get_cap_addr(mapping) >> (CPTR_BITS - invoke_bits);
uintptr_t arg1 = ((uintptr_t)invoke_bits) << 16;
- arg1 |= ((uintptr_t)FrameCmd_ModifyFlags<<8);
+ arg1 |= ((uintptr_t)MappingCmd_Modify<<8);
arg1 |= (uintptr_t)SYSCALL_INVOKE;
return syscall5(arg1, invoke_cptr, offset, pages, flags).error;
#define L2_PAGE_OFFSET(idx) (L2_PAGE_IDX(idx) * PTABLE_SIZE)
#define L2_IS_MAPPED(ptable, idx) \
- ((ptable)->u.vnode.mapped & (1 << L2_PAGE_IDX(idx)))
+ (!capref_is_null((ptable)->u.vnode.mapped[idx]))
/// Node in the meta-data, corresponds to an actual VNode object
struct vnode *next; ///< Next entry in list of siblings
union {
struct {
- struct capref cap[L2_PER_PAGE]; ///< Capability of this VNode
- struct vnode *children; ///< Children of this VNode
- uint8_t mapped; ///< which 1k tables are actually mapped
+ struct capref cap; ///< Capability of this VNode
+ struct vnode *children; ///< Children of this VNode
+ struct capref mapped[L2_PER_PAGE]; // < mapping caps for mapped 1k tables
} vnode; // for non-leaf node
struct {
struct capref cap; ///< Capability of this VNode
+ struct capref mapping; ///< Mapping cap for this vnode
genvaddr_t offset; ///< Offset within mapped frame cap
vregion_flags_t flags; ///< Flags for mapping
uint16_t pte_count; ///< number of user page table entries consumed by this mapping
}
static struct sysret
-handle_frame_modify_flags(
- struct capability *to,
- arch_registers_state_t *context,
- int argc
- )
-{
- // Modify flags of (part of) mapped region of frame
- assert (5 == argc);
-
- assert(to->type == ObjType_Frame || to->type == ObjType_DevFrame);
-
- // unpack arguments
- struct registers_arm_syscall_args* sa = &context->syscall_args;
- size_t offset = sa->arg2; // in pages; of first page to modify from first
- // page in mapped region
- size_t pages = sa->arg3; // #pages to modify
- size_t flags = sa->arg4; // new flags
-
- paging_modify_flags(to, offset, pages, flags);
-
- return (struct sysret) {
- .error = SYS_ERR_OK,
- .value = 0,
- };
-}
-
-static struct sysret
handle_mint(
struct capability* root,
arch_registers_state_t* context,
int argc
)
{
- assert(7 == argc);
+ assert(10 == argc);
struct registers_arm_syscall_args* sa = &context->syscall_args;
/* Retrieve arguments */
- capaddr_t source_cptr = (capaddr_t)sa->arg2;
- capaddr_t dest_slot = ((capaddr_t)sa->arg3) >> 16;
- int source_vbits = ((int)sa->arg3) & 0xff;
- uintptr_t flags, offset,pte_count;
- flags = (uintptr_t)sa->arg4;
- offset = (uintptr_t)sa->arg5;
- pte_count = (uintptr_t)sa->arg6;
+ capaddr_t source_cptr = (capaddr_t)sa->arg2;
+ capaddr_t dest_slot = ((capaddr_t)sa->arg3) >> 16;
+ int source_vbits = ((int)sa->arg3) & 0xff;
+ uintptr_t flags = (uintptr_t)sa->arg4;
+ uintptr_t offset = (uintptr_t)sa->arg5;
+ uintptr_t pte_count = (uintptr_t)sa->arg6;
+ capaddr_t mcn_addr = (capaddr_t)sa->arg7;
+ int mcn_vbits = (int)sa->arg8;
+ cslot_t mapping_slot = (cslot_t)sa->arg9;
+
+
return sys_map(ptable, dest_slot, source_cptr, source_vbits,
- flags, offset, pte_count);
+ flags, offset, pte_count, mcn_addr, mcn_vbits,
+ mapping_slot);
}
static struct sysret
/* Retrieve arguments */
capaddr_t mapping_cptr = (capaddr_t)sa->arg2;
- int mapping_bits = (((int)sa->arg3) >> 24) & 0xff;
- size_t pte_count = (((size_t)sa->arg3) >> 12) & 0xfff;
- pte_count += 1;
- size_t entry = ((size_t)sa->arg3) & 0xfff;
+ int mapping_bits = (int)sa->arg3 & 0xff;
errval_t err;
struct cte *mapping = NULL;
err = caps_lookup_slot(&dcb_current->cspace.cap, mapping_cptr, mapping_bits,
&mapping, CAPRIGHTS_READ_WRITE);
if (err_is_fail(err)) {
+ printk(LOG_NOTE, "%s: caps_lookup_slot: %ld\n", __FUNCTION__, err);
return SYSRET(err_push(err, SYS_ERR_CAP_NOT_FOUND));
}
- err = page_mappings_unmap(ptable, mapping, entry, pte_count);
+ err = page_mappings_unmap(ptable, mapping);
+ if (err_is_fail(err)) {
+ printk(LOG_NOTE, "%s: page_mappings_unmap: %ld\n", __FUNCTION__, err);
+ }
return SYSRET(err);
}
+static struct sysret
+handle_mapping_destroy(
+ struct capability *to,
+ arch_registers_state_t *context,
+ int argc)
+{
+ panic("NYI!");
+ return SYSRET(SYS_ERR_OK);
+}
+
+static struct sysret
+handle_mapping_modify(
+ struct capability *to,
+ arch_registers_state_t *context,
+ int argc
+ )
+{
+ assert(5 == argc);
+ struct registers_arm_syscall_args* sa = &context->syscall_args;
+
+ // Modify flags of (part of) mapped region of frame
+ assert(type_is_mapping(to->type));
+
+ // unpack arguments
+ size_t offset = sa->arg2; // in pages; of first page to modify from first
+ // page in mapped region
+ size_t pages = sa->arg3; // #pages to modify
+ size_t flags = sa->arg4; // new flags
+
+ errval_t err = paging_modify_flags(to, offset, pages, flags);
+
+ return (struct sysret) {
+ .error = err,
+ .value = 0,
+ };
+}
+
/// Different handler for cap operations performed by the monitor
INVOCATION_HANDLER(monitor_handle_retype)
{
},
[ObjType_Frame] = {
[FrameCmd_Identify] = handle_frame_identify,
- [FrameCmd_ModifyFlags] = handle_frame_modify_flags,
},
[ObjType_DevFrame] = {
[FrameCmd_Identify] = handle_frame_identify,
- [FrameCmd_ModifyFlags] = handle_frame_modify_flags,
},
[ObjType_CNode] = {
[CNodeCmd_Copy] = handle_copy,
[VNodeCmd_Map] = handle_map,
[VNodeCmd_Unmap] = handle_unmap,
},
+ [ObjType_Frame_Mapping] = {
+ [MappingCmd_Destroy] = handle_mapping_destroy,
+ [MappingCmd_Modify] = handle_mapping_modify,
+ },
+ [ObjType_DevFrame_Mapping] = {
+ [MappingCmd_Destroy] = handle_mapping_destroy,
+ [MappingCmd_Modify] = handle_mapping_modify,
+ },
+ [ObjType_VNode_ARM_l1_Mapping] = {
+ [MappingCmd_Destroy] = handle_mapping_destroy,
+ [MappingCmd_Modify] = handle_mapping_modify,
+ },
+ [ObjType_VNode_ARM_l2_Mapping] = {
+ [MappingCmd_Destroy] = handle_mapping_destroy,
+ [MappingCmd_Modify] = handle_mapping_modify,
+ },
[ObjType_IRQTable] = {
[IRQTableCmd_Set] = handle_irq_table_set,
[IRQTableCmd_Delete] = handle_irq_table_delete,
#include <arm_hal.h>
#include <cap_predicates.h>
#include <dispatch.h>
+#include <mdb/mdb_tree.h>
inline static uintptr_t paging_round_down(uintptr_t address, uintptr_t size)
{
struct capability* src,
uintptr_t kpi_paging_flags,
uintptr_t offset,
- uintptr_t pte_count)
+ uintptr_t pte_count,
+ struct cte* mapping_cte)
{
//
// Note:
panic("Invalid target");
}
- struct cte *src_cte = cte_for_cap(src);
- src_cte->mapping_info.pte_count = pte_count;
- src_cte->mapping_info.pte = dest_lpaddr;
- src_cte->mapping_info.offset = offset;
+ create_mapping_cap(mapping_cte, src,
+ dest_lpaddr + slot * sizeof(union arm_l1_entry),
+ pte_count);
for (int i = 0; i < pte_count; i++) {
entry->raw = 0;
assert(aligned(src_lpaddr, 1u << 10));
assert((src_lpaddr < dest_lpaddr) || (src_lpaddr >= dest_lpaddr + 16384));
- struct cte *src_cte = cte_for_cap(src);
- src_cte->mapping_info.pte_count = pte_count;
- src_cte->mapping_info.pte = dest_lpaddr + slot;
- src_cte->mapping_info.offset = 0;
+ create_mapping_cap(mapping_cte, src,
+ dest_lpaddr + slot * sizeof(union arm_l1_entry),
+ pte_count);
for (int i = 0; i < pte_count; i++, entry++)
{
struct capability* src,
uintptr_t kpi_paging_flags,
uintptr_t offset,
- uintptr_t pte_count)
+ uintptr_t pte_count,
+ struct cte* mapping_cte)
{
assert(0 == (kpi_paging_flags & ~KPI_PAGING_FLAGS_MASK));
panic("Invalid target");
}
- struct cte *src_cte = cte_for_cap(src);
- src_cte->mapping_info.pte_count = pte_count;
- src_cte->mapping_info.pte = dest_lpaddr;
- src_cte->mapping_info.offset = offset;
+ create_mapping_cap(mapping_cte, src,
+ dest_lpaddr + slot * sizeof(union arm_l2_entry),
+ pte_count);
for (int i = 0; i < pte_count; i++) {
entry->raw = 0;
/// Create page mappings
errval_t caps_copy_to_vnode(struct cte *dest_vnode_cte, cslot_t dest_slot,
struct cte *src_cte, uintptr_t flags,
- uintptr_t offset, uintptr_t pte_count)
+ uintptr_t offset, uintptr_t pte_count,
+ struct cte *mapping_cte)
{
struct capability *src_cap = &src_cte->cap;
struct capability *dest_cap = &dest_vnode_cte->cap;
-
- if (src_cte->mapping_info.pte) {
- return SYS_ERR_VM_ALREADY_MAPPED;
- }
+ assert(mapping_cte->cap.type == ObjType_Null);
+ errval_t err;
if (ObjType_VNode_ARM_l1 == dest_cap->type) {
//printf("caps_map_l1: %zu\n", (size_t)pte_count);
- return caps_map_l1(dest_cap, dest_slot, src_cap,
+ err = caps_map_l1(dest_cap, dest_slot, src_cap,
flags,
offset,
- pte_count
+ pte_count,
+ mapping_cte
);
}
else if (ObjType_VNode_ARM_l2 == dest_cap->type) {
//printf("caps_map_l2: %zu\n", (size_t)pte_count);
- return caps_map_l2(dest_cap, dest_slot, src_cap,
+ err = caps_map_l2(dest_cap, dest_slot, src_cap,
flags,
offset,
- pte_count
+ pte_count,
+ mapping_cte
);
}
else {
panic("ObjType not VNode");
}
+
+ if (err_is_fail(err)) {
+ memset(mapping_cte, 0, sizeof(*mapping_cte));
+ return err;
+ }
+
+ assert(type_is_mapping(mapping_cte->cap.type));
+ err = mdb_insert(mapping_cte);
+ if (err_is_fail(err)) {
+ printk(LOG_ERR, "%s: mdb_insert: %"PRIuERRV"\n", __FUNCTION__, err);
+ }
+
+ TRACE_CAP_MSG("created", mapping_cte);
+
+ return err;
}
size_t do_unmap(lvaddr_t pt, cslot_t slot, size_t num_pages)
return unmapped_pages;
}
-static inline void read_pt_entry(struct capability *pgtable,
- size_t slot, bool is_section, genpaddr_t *paddr)
-{
- assert(type_is_vnode(pgtable->type));
- assert(paddr);
-
- genpaddr_t gp = get_address(pgtable);
- lpaddr_t lp = gen_phys_to_local_phys(gp);
- lvaddr_t lv = local_phys_to_mem(lp);
-
- switch (pgtable->type) {
- case ObjType_VNode_ARM_l1:
- {
- union arm_l1_entry *e = (union arm_l1_entry*)lv;
- if (is_section) {
- *paddr = (genpaddr_t)(e->section.base_address) << 20;
- return;
- } else {
- *paddr = (genpaddr_t)(e->page_table.base_address) << 10;
- return;
- }
- }
- case ObjType_VNode_ARM_l2:
- {
- union arm_l2_entry *e = (union arm_l2_entry*)lv;
- *paddr = (genpaddr_t)(e->small_page.base_address) << 12;
- return;
- }
- default:
- assert(!"Should not get here");
- }
-}
-
-errval_t page_mappings_unmap(struct capability *pgtable, struct cte *mapping, size_t slot, size_t num_pages)
-{
- assert(type_is_vnode(pgtable->type));
- //printf("page_mappings_unmap(%zd pages, slot = %zd)\n", num_pages, slot);
-
- bool is_section = false;
- if (pgtable->type == ObjType_VNode_ARM_l1) {
- // transform slot to hw slot
- if (mapping->cap.type == ObjType_VNode_ARM_l2) {
- // l2 table
- debug(SUBSYS_PAGING, "unmapping l2 tables: %zu, #pages: %zu\n",
- slot, num_pages);
- }
- else {
- // section
- is_section = true;
- debug(SUBSYS_PAGING, "unmapping section: %zu, #pages: %zu\n",
- slot, num_pages);
- }
- }
- // get page table entry data
- genpaddr_t paddr;
- //lpaddr_t pte;
- read_pt_entry(pgtable, slot, is_section, &paddr);
- lvaddr_t pt = local_phys_to_mem(gen_phys_to_local_phys(get_address(pgtable)));
-
- // get virtual address of first page
- // TODO: error checking
- genvaddr_t vaddr;
- struct cte *leaf_pt = cte_for_cap(pgtable);
- compile_vaddr(leaf_pt, slot, &vaddr);
- //genvaddr_t vend = vaddr + num_pages * BASE_PAGE_SIZE;
- // printf("vaddr = 0x%"PRIxGENVADDR"\n", vaddr);
- // printf("num_pages = %zu\n", num_pages);
-
- // get cap for mapping
- /*
- struct cte *mem;
- errval_t err = lookup_cap_for_mapping(paddr, pte, &mem);
- if (err_is_fail(err)) {
- printf("page_mappings_unmap: %ld\n", err);
- return err;
- }
- */
- //printf("state before unmap: mapped_pages = %zd\n", mem->mapping_info.mapped_pages);
- //printf("state before unmap: num_pages = %zd\n", num_pages);
-
- if (num_pages != mapping->mapping_info.pte_count) {
- debug(SUBSYS_PAGING, "num_pages = %zu, mapping = %zu\n",
- num_pages, mapping->mapping_info.pte_count);
- // want to unmap a different amount of pages than was mapped
- return SYS_ERR_VM_MAP_SIZE;
- }
-
- do_unmap(pt, slot, num_pages);
-
- // flush TLB for unmapped pages
- // TODO: selective TLB flush
- cp15_invalidate_tlb();
-
- // update mapping info
- memset(&mapping->mapping_info, 0, sizeof(struct mapping_info));
-
- return SYS_ERR_OK;
-}
-
-errval_t paging_modify_flags(struct capability *frame, uintptr_t offset,
+errval_t paging_modify_flags(struct capability *mapping, uintptr_t offset,
uintptr_t pages, uintptr_t kpi_paging_flags)
{
+ // XXX: modify flags for sections?
+ assert(type_is_mapping(mapping->type));
// check flags
assert(0 == (kpi_paging_flags & ~KPI_PAGING_FLAGS_MASK));
- struct cte *mapping = cte_for_cap(frame);
- struct mapping_info *info = &mapping->mapping_info;
+ struct Frame_Mapping *info = &mapping->u.frame_mapping;
/* Calculate location of page table entries we need to modify */
- lvaddr_t base = local_phys_to_mem(info->pte) + offset;
+ lvaddr_t base = local_phys_to_mem(info->pte) +
+ offset * sizeof(union arm_l2_entry);
for (int i = 0; i < pages; i++) {
union arm_l2_entry *entry =
paging_set_flags(entry, kpi_paging_flags);
}
- return paging_tlb_flush_range(mapping, offset, pages);
+ return paging_tlb_flush_range(cte_for_cap(mapping), offset, pages);
}
void paging_dump_tables(struct dcb *dispatcher)
struct Frame_Mapping *mapping = &mapping_cte->cap.u.frame_mapping;
err = mdb_find_cap_for_address(
local_phys_to_gen_phys(mapping->pte), next);
- if (err_no(err) == CAPS_ERR_CAP_NOT_FOUND) {
+ if (err_no(err) == CAPS_ERR_CAP_NOT_FOUND ||
+ err_no(err) == SYS_ERR_CAP_NOT_FOUND)
+ {
debug(SUBSYS_PAGING, "could not find cap associated "
"with 0x%"PRIxLPADDR"\n", mapping->pte);
return SYS_ERR_VNODE_NOT_INSTALLED;
for (int i = 0; i < L2_PER_PAGE; i++) {
// ith 1k L2 is mapped
#ifdef DEBUG_HAS_VNODE
- debug_printf("%s: n->u.vnode.mapped & (1 << %d) == %d\n",
- __FUNCTION__, i, n->u.vnode.mapped & (1 << i));
+ debug_printf("%s: n->u.vnode.mapped[%d] == %d\n",
+ __FUNCTION__, i, L2_IS_MAPPED(n, i));
#endif
if (L2_IS_MAPPED(n, i)) {
#ifdef DEBUG_HAS_VNODE
static void unmap_l2_table(struct vnode *root, struct vnode *n, uint16_t e)
{
errval_t err;
- uint32_t entry = ROUND_DOWN(n->entry, L2_PER_PAGE) + e;
if (L2_IS_MAPPED(n, e)) {
- err = vnode_unmap(root->u.vnode.cap[0], n->u.vnode.cap[e],
- entry, 1);
+ err = vnode_unmap(root->u.vnode.cap, n->u.vnode.mapped[e]);
if (err_is_fail(err)) {
debug_printf("remove_empty_vnodes: vnode_unmap: %s\n",
err_getstring(err));
abort();
}
-
- // delete capability, if not entry 0
- if (e) {
- err = cap_destroy(n->u.vnode.cap[e]);
- if (err_is_fail(err)) {
- debug_printf("remove_empty_vnodes: cap_destroy: %s\n",
- err_getstring(err));
- abort();
- }
+ err = cap_delete(n->u.vnode.mapped[e]);
+ if (err_is_fail(err)) {
+ debug_printf("remove_empty_vnodes: vnode_unmap: %s\n",
+ err_getstring(err));
+ abort();
}
+ err = slot_free(n->u.vnode.mapped[e]);
+ if (err_is_fail(err)) {
+ debug_printf("remove_empty_vnodes: vnode_unmap: %s\n",
+ err_getstring(err));
+ abort();
+ }
+ n->u.vnode.mapped[e] = NULL_CAP;
}
}
}
// delete last copy of pt cap
- err = cap_destroy(n->u.vnode.cap[0]);
+ err = cap_destroy(n->u.vnode.cap);
assert(err_is_ok(err));
// remove vnode from list
newvnode->is_vnode = true;
// The VNode capability
- err = slot_alloc(&newvnode->u.vnode.cap[0]);
+ err = slot_alloc(&newvnode->u.vnode.cap);
if (err_is_fail(err)) {
return err_push(err, LIB_ERR_SLOT_ALLOC);
}
- err = vnode_create(newvnode->u.vnode.cap[0], type);
+ err = vnode_create(newvnode->u.vnode.cap, type);
if (err_is_fail(err)) {
return err_push(err, LIB_ERR_VNODE_CREATE);
}
- for (int i = 1; i < L2_PER_PAGE; i++) {
- newvnode->u.vnode.cap[i] = NULL_CAP;
+ for (int i = 0; i < L2_PER_PAGE; i++) {
+ newvnode->u.vnode.mapped[i] = NULL_CAP;
}
// The VNode meta data
newvnode->entry = ROUND_DOWN(entry, L2_PER_PAGE);
assert(newvnode->entry % L2_PER_PAGE == 0);
newvnode->next = root->u.vnode.children;
- newvnode->u.vnode.mapped = 0x0; // no entries mapped
+ // no entries mapped
+ memset(newvnode->u.vnode.mapped, 0, sizeof(newvnode->u.vnode.mapped));
root->u.vnode.children = newvnode;
newvnode->u.vnode.children = NULL;
assert(pt->is_vnode);
#ifdef DEBUG_GET_PTABLE
debug_printf("have ptable: %p\n", pt);
- debug_printf("mapped = %x\n", pt->u.vnode.mapped);
debug_printf("page_idx = %d\n", page_idx);
debug_printf("l2_is_mapped: %d\n", L2_IS_MAPPED(pt, page_idx));
#endif
// create copy of ptable cap for this index, if it doesn't exist
// already
- if (capref_is_null(pt->u.vnode.cap[page_idx])) {
-#ifdef DEBUG_GET_PTABLE
- debug_printf("allocating slot for chunk %d\n", page_idx);
-#endif
- err = slot_alloc(&pt->u.vnode.cap[page_idx]);
- if (err_is_fail(err)) {
- return err_push(err, LIB_ERR_VNODE_MAP);
- }
-
#ifdef DEBUG_GET_PTABLE
- debug_printf("creating copy for chunk %d\n", page_idx);
+ debug_printf("allocating slot for mapping cap for chunk %d\n", page_idx);
#endif
- err = cap_copy(pt->u.vnode.cap[page_idx], pt->u.vnode.cap[0]);
- if (err_is_fail(err)) {
- return err_push(err, LIB_ERR_VNODE_MAP);
- }
+ err = slot_alloc(&pt->u.vnode.mapped[page_idx]);
+ if (err_is_fail(err)) {
+ return err_push(err, LIB_ERR_VNODE_MAP);
}
#ifdef DEBUG_GET_PTABLE
- debug_printf("calling vnode_map() for chunk %d\n", page_idx);
+ debug_printf("calling vnode_map() for chunk %d\n", page_idx);
#endif
// map single 1k ptable
- err = vnode_map(pmap->root.u.vnode.cap[0], pt->u.vnode.cap[page_idx], idx,
- KPI_PAGING_FLAGS_READ | KPI_PAGING_FLAGS_WRITE, offset, 1);
+ err = vnode_map(pmap->root.u.vnode.cap, pt->u.vnode.cap, idx,
+ KPI_PAGING_FLAGS_READ | KPI_PAGING_FLAGS_WRITE, offset, 1,
+ pt->u.vnode.mapped[page_idx]);
if (err_is_fail(err)) {
+ errval_t err2 = slot_free(pt->u.vnode.mapped[page_idx]);
+ if (err_is_fail(err2)) {
+ err = err_push(err, err2);
+ }
return err_push(err, LIB_ERR_VNODE_MAP);
}
-
- // set 1k ptable as mapped
- pt->u.vnode.mapped |= 1 << page_idx;
}
return SYS_ERR_OK;
entry = ARM_L1_OFFSET(vaddr);
is_large = true;
#ifdef LIBBARRELFISH_DEBUG_PMAP
- printf("do_single_map: large path: entry=%zu\n", entry);
+ debug_printf("do_single_map: large path: entry=%zu\n", entry);
#endif
} else {
#ifdef LIBBARRELFISH_DEBUG_PMAP
- debug_printf("%s: 4k path: mapping %"PRIxGENVADDR"\n", __FUNCTION__, vaddr);
+ debug_printf("%s: 4k path: mapping %"PRIxGENVADDR", %zu entries\n", __FUNCTION__, vaddr, pte_count);
debug_printf("4k path: L1 entry: %zu\n", ARM_USER_L1_OFFSET(vaddr));
#endif
//4k mapping
page->u.frame.pte_count = user_pte_count;
page->u.frame.kernel_pte_count = pte_count;
+ err = slot_alloc(&page->u.frame.mapping);
+ if (err_is_fail(err)) {
+ return err_push(err, LIB_ERR_SLOT_ALLOC);
+ }
+
// Map entry into the page table
- err = vnode_map(ptable->u.vnode.cap[0], frame, entry,
- pmap_flags, offset, pte_count);
+ err = vnode_map(ptable->u.vnode.cap, frame, entry,
+ pmap_flags, offset, pte_count,
+ page->u.frame.mapping);
if (err_is_fail(err)) {
+ errval_t err2 = slot_free(page->u.frame.mapping);
+ if (err_is_fail(err2)) {
+ err = err_push(err, err2);
+ }
return err_push(err, LIB_ERR_VNODE_MAP);
}
return SYS_ERR_OK;
return LIB_ERR_PMAP_FRAME_SIZE;
}
+#ifdef LIBBARRELFISH_DEBUG_PMAP
+ printf("do_map: mapping %zu pages (size=%zx), from %zu.%zu\n",
+ pte_count, page_size, ARM_L1_OFFSET(vaddr), ARM_L2_OFFSET(vaddr));
+ printf("page_size: %zx, size: %zx\n", page_size, size);
+#endif
+
//should be trivially true for section mappings
if ((ARM_L1_OFFSET(vaddr) == ARM_L1_OFFSET(vend)) ||
flags & VREGION_FLAGS_LARGE) {
temp_end = vaddr + ARM_L2_MAX_ENTRIES * page_size;
offset += c * page_size;
c = ARM_L2_MAX_ENTRIES;
- // copy cap
- struct capref next;
- err = slot_alloc(&next);
- if (err_is_fail(err)) {
- return err_push(err, LIB_ERR_PMAP_DO_MAP);
- }
- err = cap_copy(next, frame);
- if (err_is_fail(err)) {
- return err_push(err, LIB_ERR_PMAP_DO_MAP);
- }
- frame = next;
// do mapping
err = do_single_map(pmap, vaddr, temp_end, frame, offset, ARM_L2_MAX_ENTRIES, flags);
offset += c * page_size;
c = ARM_L2_OFFSET(vend) - ARM_L2_OFFSET(temp_end);
if (c) {
- // copy cap
- struct capref next;
- err = slot_alloc(&next);
- if (err_is_fail(err)) {
- return err_push(err, LIB_ERR_PMAP_DO_MAP);
- }
- err = cap_copy(next, frame);
- if (err_is_fail(err)) {
- return err_push(err, LIB_ERR_PMAP_DO_MAP);
- }
// do mapping
- err = do_single_map(pmap, temp_end, vend, next, offset, c, flags);
+ err = do_single_map(pmap, temp_end, vend, frame, offset, c, flags);
if (err_is_fail(err)) {
return err_push(err, LIB_ERR_PMAP_DO_MAP);
}
}
static errval_t do_single_unmap(struct pmap_arm *pmap, genvaddr_t vaddr,
- size_t pte_count, bool delete_cap)
+ size_t pte_count)
{
+#ifdef LIBBARRELFISH_DEBUG_PMAP
+ debug_printf("%s: vaddr=0x%"PRIxGENVADDR", pte_count=%zu\n",
+ __FUNCTION__, vaddr, pte_count);
+#endif
errval_t err;
struct vnode *pt = find_ptable(pmap, vaddr);
// pt->is_vnode == non-large mapping
// analog to do_single_map we use 10 bits for tracking pages in user space -SG
struct vnode *page = find_vnode(pt, ARM_USER_L2_OFFSET(vaddr));
if (page && page->u.frame.pte_count == pte_count) {
- err = vnode_unmap(pt->u.vnode.cap[0], page->u.frame.cap,
- page->entry, page->u.frame.pte_count);
+#ifdef LIBBARRELFISH_DEBUG_PMAP
+ debug_printf("page unmap: pt entry: %zu, entry = %zu, pte_count = %hu\n",
+ pt->entry, page->entry, page->u.frame.pte_count);
+#endif
+ err = vnode_unmap(pt->u.vnode.cap, page->u.frame.mapping);
if (err_is_fail(err)) {
DEBUG_ERR(err, "vnode_unmap");
return err_push(err, LIB_ERR_VNODE_UNMAP);
}
- // Free up the resources
- if (delete_cap) {
- err = cap_destroy(page->u.frame.cap);
- if (err_is_fail(err)) {
- return err_push(err, LIB_ERR_PMAP_DO_SINGLE_UNMAP);
- }
+ // cleanup mapping cap
+ err = cap_delete(page->u.frame.mapping);
+ if (err_is_fail(err)) {
+ DEBUG_ERR(err, "cap_delete");
+ return err_push(err, LIB_ERR_CAP_DELETE);
}
+ err = slot_free(page->u.frame.mapping);
+ if (err_is_fail(err)) {
+ return err_push(err, LIB_ERR_SLOT_FREE);
+ }
+
remove_vnode(pt, page);
slab_free(&pmap->slab, page);
}
debug_printf("section unmap: entry = %zu, pte_count = %zu\n",
pt->entry, pt->u.frame.kernel_pte_count);
#endif
- err = vnode_unmap(pmap->root.u.vnode.cap[0], pt->u.frame.cap,
- pt->entry, pt->u.frame.kernel_pte_count);
+ err = vnode_unmap(pmap->root.u.vnode.cap, pt->u.frame.mapping);
if (err_is_fail(err)) {
DEBUG_ERR(err, "vnode_unmap");
return err_push(err, LIB_ERR_VNODE_UNMAP);
}
+ // cleanup mapping cap
+ err = cap_delete(pt->u.frame.mapping);
+ if (err_is_fail(err)) {
+ DEBUG_ERR(err, "cap_delete");
+ return err_push(err, LIB_ERR_CAP_DELETE);
+ }
+ err = slot_free(pt->u.frame.mapping);
+ if (err_is_fail(err)) {
+ return err_push(err, LIB_ERR_SLOT_FREE);
+ }
+
remove_vnode(&pmap->root, pt);
slab_free(&pmap->slab, pt);
} else {
if (ARM_L1_OFFSET(vaddr) == ARM_L1_OFFSET(vend-1)) {
// fast path
- err = do_single_unmap(pmap_arm, vaddr, pte_count, false);
+#ifdef LIBBARRELFISH_DEBUG_PMAP
+ debug_printf("%s: fast path vaddr=0x%"PRIxGENVADDR", pte_count=%zu\n",
+ __FUNCTION__, vaddr, pte_count);
+#endif
+ err = do_single_unmap(pmap_arm, vaddr, pte_count);
if (err_is_fail(err)) {
return err_push(err, LIB_ERR_PMAP_UNMAP);
}
} else { // slow path
// unmap first leaf
uint32_t c = ARM_L2_MAX_ENTRIES - ARM_L2_OFFSET(vaddr);
- err = do_single_unmap(pmap_arm, vaddr, c, false);
+#ifdef LIBBARRELFISH_DEBUG_PMAP
+ debug_printf("%s: slow path 1st leaf vaddr=0x%"PRIxGENVADDR", pte_count=%zu\n",
+ __FUNCTION__, vaddr, c);
+#endif
+ err = do_single_unmap(pmap_arm, vaddr, c);
if (err_is_fail(err)) {
return err_push(err, LIB_ERR_PMAP_UNMAP);
}
vaddr += c * BASE_PAGE_SIZE;
while (ARM_L1_OFFSET(vaddr) < ARM_L1_OFFSET(vend)) {
c = ARM_L2_MAX_ENTRIES;
- err = do_single_unmap(pmap_arm, vaddr, c, true);
+#ifdef LIBBARRELFISH_DEBUG_PMAP
+ debug_printf("%s: slow path full leaf vaddr=0x%"PRIxGENVADDR", pte_count=%zu\n",
+ __FUNCTION__, vaddr, c);
+#endif
+ err = do_single_unmap(pmap_arm, vaddr, c);
if (err_is_fail(err)) {
return err_push(err, LIB_ERR_PMAP_UNMAP);
}
// unmap remaining part
c = ARM_L2_OFFSET(vend) - ARM_L2_OFFSET(vaddr);
if (c) {
- err = do_single_unmap(pmap_arm, vaddr, c, true);
+#ifdef LIBBARRELFISH_DEBUG_PMAP
+ debug_printf("%s: slow path last leaf vaddr=0x%"PRIxGENVADDR", pte_count=%zu\n",
+ __FUNCTION__, vaddr, c);
+#endif
+ err = do_single_unmap(pmap_arm, vaddr, c);
if (err_is_fail(err)) {
return err_push(err, LIB_ERR_PMAP_UNMAP);
}
// new set of flags with cap permissions.
size_t off = ptentry - page->entry;
uintptr_t pmap_flags = vregion_flags_to_kpi_paging_flags(flags);
- err = invoke_frame_modify_flags(page->u.frame.cap, off, pages, pmap_flags);
+ err = invoke_mapping_modify_flags(page->u.frame.mapping,
+ off, pages, pmap_flags);
printf("invoke_frame_modify_flags returned error: %s (%"PRIuERRV")\n",
err_getstring(err), err);
return err;
sizeof(pmap_arm->slab_buffer));
pmap_arm->root.is_vnode = true;
- pmap_arm->root.u.vnode.cap[0] = vnode;
+ pmap_arm->root.u.vnode.cap = vnode;
pmap_arm->root.next = NULL;
pmap_arm->root.u.vnode.children = NULL;
size = ROUND_UP(size, BASE_PAGE_SIZE);
cslot_t vspace_slot = si->elfload_slot;
+ cslot_t spawn_vspace_slot = si->elfload_slot;
// Allocate the frames
size_t sz = 0;
}
}
- cslot_t spawn_vspace_slot = si->elfload_slot;
- cslot_t new_slot_count = si->elfload_slot - vspace_slot;
-
- // create copies of the frame capabilities for spawn vspace
- for (int copy_idx = 0; copy_idx < new_slot_count; copy_idx++) {
- struct capref frame = {
- .cnode = si->segcn,
- .slot = vspace_slot + copy_idx,
- };
- struct capref spawn_frame = {
- .cnode = si->segcn,
- .slot = si->elfload_slot++,
- };
- err = cap_copy(spawn_frame, frame);
- if (err_is_fail(err)) {
- // TODO: make debug printf
- printf("cap_copy failed for src_slot = %"PRIuCSLOT", dest_slot = %"PRIuCSLOT"\n", frame.slot, spawn_frame.slot);
- return err_push(err, LIB_ERR_CAP_COPY);
- }
- }
-
/* Map into my vspace */
struct memobj *memobj = malloc(sizeof(struct memobj_anon));
if (!memobj) {
sz = 1UL << log2floor(size - offset);
struct capref frame = {
.cnode = si->segcn,
- .slot = spawn_vspace_slot++,
+ .slot = spawn_vspace_slot++,
};
genvaddr_t genvaddr = vspace_lvaddr_to_genvaddr(offset);
err = memobj->f.fill(spawn_memobj, genvaddr, frame, sz);
free(multiboot_args);
// unmap bootinfo module pages
- spawn_unmap_module(binary);
+ err = spawn_unmap_module(binary);
+ if (err_is_fail(err)) {
+ return err_push(err, SPAWN_ERR_UNMAP_MODULE);
+ }
return SYS_ERR_OK;
}