Note: the ARM version does not support large pages properly at this point.
Signed-off-by: Simon Gerber <simon.gerber@inf.ethz.ch>
/* 1MB large pages */
#define LARGE_PAGE_BITS 20
-#define LARGE_PAGE_SIZE (1u << PAGE_LARGE_BITS)
+#define LARGE_PAGE_SIZE (1u << LARGE_PAGE_BITS)
#define LARGE_PAGE_MASK (LARGE_PAGE_SIZE - 1)
#define LARGE_PAGE_OFFSET(a) ((a) & LARGE_PAGE_MASK)
case ObjType_VNode_x86_32_pdir:
page_size = X86_32_LARGE_PAGE_SIZE;
break;
+#elif __arm__
+ case ObjType_VNode_ARM_l1:
+ panic("large page support for ARM NYI!\n");
+ break;
+ case ObjType_VNode_ARM_l2:
+ page_size = BASE_PAGE_SIZE;
+ break;
#else
#error setup page sizes for arch
#endif
static inline uintptr_t
vregion_flags_to_kpi_paging_flags(vregion_flags_t flags)
{
- STATIC_ASSERT(0x2f == VREGION_FLAGS_MASK, "");
+ STATIC_ASSERT(0xff == VREGION_FLAGS_MASK, "");
STATIC_ASSERT(0x0f == KPI_PAGING_FLAGS_MASK, "");
STATIC_ASSERT(VREGION_FLAGS_READ == KPI_PAGING_FLAGS_READ, "");
STATIC_ASSERT(VREGION_FLAGS_WRITE == KPI_PAGING_FLAGS_WRITE, "");
errval_t err = SYS_ERR_OK;
// Get the page table
struct vnode *ptable;
- uintptr_t index;
+ uintptr_t entry;
if (flags&FLAGS_SECTION) {
//section mapping (1MB)
//mapped in the L1 table at root
ptable = &pmap->root;
- index = ARM_USER_L1_OFFSET(vaddr);
+ entry = ARM_USER_L1_OFFSET(vaddr);
printf("do_single_map: large path\n");
} else {
//4k mapping
err = get_ptable(pmap, vaddr, &ptable);
- index = ARM_USER_L2_OFFSET(vaddr);
+ entry = ARM_USER_L2_OFFSET(vaddr);
}
if (err_is_fail(err)) {
return err_push(err, LIB_ERR_PMAP_GET_PTABLE);
}
// copy over data to new frame
genvaddr_t gen_base = vregion_get_base_addr(&state->vregion);
- memcpy(vbuf, (void*)gen_base, state->mapoffset);
+ memcpy(vbuf, (void*)(lvaddr_t)gen_base, state->mapoffset);
err = vregion_destroy(vregion);
if (err_is_fail(err)) {
return err;
}
- size_t offset = 0;
+ genvaddr_t offset = 0;
// Unmap backing frames for [0, size) in state.vregion
do {
err = state->memobj.m.f.unfill(&state->memobj.m, 0, &oldframe,