Fix large page code so it compiles on 32bit / ARM.
authorSimon Gerber <simon.gerber@inf.ethz.ch>
Mon, 4 May 2015 09:50:52 +0000 (11:50 +0200)
committerSimon Gerber <simon.gerber@inf.ethz.ch>
Mon, 4 May 2015 09:51:26 +0000 (11:51 +0200)
Note: the ARM version does not support large pages properly at this point.

Signed-off-by: Simon Gerber <simon.gerber@inf.ethz.ch>

include/target/arm/barrelfish_kpi/paging_arm_v7.h
kernel/paging_generic.c
lib/barrelfish/arch/arm/pmap_arch.c
lib/barrelfish/vspace/mmu_aware.c

index 5f2c28a..37f4c3a 100644 (file)
@@ -23,7 +23,7 @@
 
 /* 1MB large pages */
 #define LARGE_PAGE_BITS         20
-#define LARGE_PAGE_SIZE         (1u << PAGE_LARGE_BITS)
+#define LARGE_PAGE_SIZE         (1u << LARGE_PAGE_BITS)
 #define LARGE_PAGE_MASK         (LARGE_PAGE_SIZE - 1)
 #define LARGE_PAGE_OFFSET(a)    ((a) & LARGE_PAGE_MASK)
 
index 3014dac..8133035 100644 (file)
@@ -271,6 +271,13 @@ errval_t paging_tlb_flush_range(struct cte *frame, size_t pages)
         case ObjType_VNode_x86_32_pdir:
             page_size = X86_32_LARGE_PAGE_SIZE;
             break;
+#elif __arm__
+        case ObjType_VNode_ARM_l1:
+            panic("large page support for ARM NYI!\n");
+            break;
+        case ObjType_VNode_ARM_l2:
+            page_size = BASE_PAGE_SIZE;
+            break;
 #else
 #error setup page sizes for arch
 #endif
index 27bdde6..6ff2c38 100644 (file)
@@ -92,7 +92,7 @@
 static inline uintptr_t
 vregion_flags_to_kpi_paging_flags(vregion_flags_t flags)
 {
-    STATIC_ASSERT(0x2f == VREGION_FLAGS_MASK, "");
+    STATIC_ASSERT(0xff == VREGION_FLAGS_MASK, "");
     STATIC_ASSERT(0x0f == KPI_PAGING_FLAGS_MASK, "");
     STATIC_ASSERT(VREGION_FLAGS_READ    == KPI_PAGING_FLAGS_READ,    "");
     STATIC_ASSERT(VREGION_FLAGS_WRITE   == KPI_PAGING_FLAGS_WRITE,   "");
@@ -289,17 +289,17 @@ static errval_t do_single_map(struct pmap_arm *pmap, genvaddr_t vaddr, genvaddr_
     errval_t err = SYS_ERR_OK;
     // Get the page table
     struct vnode *ptable;
-    uintptr_t index;
+    uintptr_t entry;
     if (flags&FLAGS_SECTION) {
         //section mapping (1MB)
         //mapped in the L1 table at root
         ptable = &pmap->root;
-        index = ARM_USER_L1_OFFSET(vaddr);
+        entry = ARM_USER_L1_OFFSET(vaddr);
         printf("do_single_map: large path\n");
     } else {
         //4k mapping
         err = get_ptable(pmap, vaddr, &ptable);
-        index = ARM_USER_L2_OFFSET(vaddr);
+        entry = ARM_USER_L2_OFFSET(vaddr);
     }
     if (err_is_fail(err)) {
         return err_push(err, LIB_ERR_PMAP_GET_PTABLE);
index 073d7d6..564b754 100644 (file)
@@ -197,14 +197,14 @@ errval_t vspace_mmu_aware_reset(struct vspace_mmu_aware *state,
     }
     // copy over data to new frame
     genvaddr_t gen_base = vregion_get_base_addr(&state->vregion);
-    memcpy(vbuf, (void*)gen_base, state->mapoffset);
+    memcpy(vbuf, (void*)(lvaddr_t)gen_base, state->mapoffset);
 
     err = vregion_destroy(vregion);
     if (err_is_fail(err)) {
         return err;
     }
 
-    size_t offset = 0;
+    genvaddr_t offset = 0;
     // Unmap backing frames for [0, size) in state.vregion
     do {
         err = state->memobj.m.f.unfill(&state->memobj.m, 0, &oldframe,