aarch64: Use VNode_AARCH64_l0 as initial page table level.
authorMoritz Hoffmann <moritz.hoffmann@hpe.com>
Wed, 15 Jun 2016 15:38:39 +0000 (15:38 +0000)
committerMoritz Hoffmann <moritz.hoffmann@hpe.com>
Wed, 15 Jun 2016 15:54:01 +0000 (15:54 +0000)
Signed-off-by: Moritz Hoffmann <moritz.hoffmann@hpe.com>

capabilities/caps.hl
include/barrelfish_kpi/capabilities.h
kernel/arch/armv8/paging.c
kernel/arch/armv8/syscall.c
kernel/syscall.c
lib/barrelfish/arch/aarch64/pmap_arch.c
lib/spawndomain/spawn.c

index 4f0cdb1..2d97e20 100644 (file)
@@ -336,7 +336,7 @@ cap VNode_ARM_l2_Mapping from VNode_ARM_l2 {
 cap VNode_AARCH64_l0 from RAM {
     /* L0 Page Table */
     address genpaddr base;  /* Base address of VNode */
-    size_bits { vnode_size };
+    size { vnode_size };
 };
 
 cap VNode_AARCH64_l0_Mapping from VNode_AARCH64_l0 {
index cfb274c..c9d6178 100644 (file)
@@ -127,7 +127,7 @@ static inline size_t vnode_objbits(enum objtype type)
 static inline size_t vnode_objsize(enum objtype type)
 {
     // This function should be emitted by hamlet or somesuch.
-    STATIC_ASSERT(46 == ObjType_Num, "Check VNode definitions");
+    STATIC_ASSERT(48 == ObjType_Num, "Check VNode definitions");
 
     if (type == ObjType_VNode_x86_64_pml4 ||
         type == ObjType_VNode_x86_64_pdpt ||
@@ -141,7 +141,8 @@ static inline size_t vnode_objsize(enum objtype type)
         // include the right files
         return 4096; // BASE_PAGE_SIZE
     }
-    else if (type == ObjType_VNode_AARCH64_l1 ||
+    else if (type == ObjType_VNode_AARCH64_l0 ||
+             type == ObjType_VNode_AARCH64_l1 ||
              type == ObjType_VNode_AARCH64_l2 ||
              type == ObjType_VNode_AARCH64_l3)
     {
index cd58ff2..7e8f2ed 100644 (file)
@@ -51,6 +51,90 @@ paging_set_flags(union armv8_ttable_entry *entry, uintptr_t kpi_paging_flags)
 }
 
 static errval_t
+caps_map_l0(struct capability* dest,
+            cslot_t            slot,
+            struct capability* src,
+            uintptr_t          kpi_paging_flags,
+            uintptr_t          offset,
+            uintptr_t          pte_count,
+            struct cte*        mapping_cte)
+{
+    //
+    // Note:
+    //
+    // We have chicken-and-egg problem in initializing resources so
+    // instead of treating an L3 table it's actual 1K size, we treat
+    // it as being 4K. As a result when we map an "L3" table we actually
+    // map a page of memory as if it is 4 consecutive L3 tables.
+    //
+    // See lib/barrelfish/arch/arm/pmap_arch.c for more discussion.
+    //
+
+    if (slot >= VMSAv8_64_PTABLE_NUM_ENTRIES) {
+        printf("slot = %"PRIuCSLOT"\n",slot);
+        panic("oops: slot id >= %d", VMSAv8_64_PTABLE_NUM_ENTRIES);
+        return SYS_ERR_VNODE_SLOT_INVALID;
+    }
+
+    if (pte_count != 1) {
+        printf("pte_count = %zu\n",(size_t)pte_count);
+        panic("oops: pte_count");
+        return SYS_ERR_VM_MAP_SIZE;
+    }
+
+    if (src->type != ObjType_VNode_AARCH64_l1) {
+        char buf[128];
+        sprint_cap(buf, 128, src);
+        printf("src: %s\n", buf);
+        panic("oops: l0 wrong src type");
+        return SYS_ERR_WRONG_MAPPING;
+    }
+
+//    if (slot >= VMSAv8_64_PTABLE_NUM_ENTRIES) {
+//        printf("slot = %"PRIuCSLOT", max=%d MEMORY_OFFSET=%p\n", slot, VMSAv8_64_L0_BASE(MEMORY_OFFSET),MEMORY_OFFSET);
+//        panic("oops: l0 slot id");
+//        return SYS_ERR_VNODE_SLOT_RESERVED;
+//    }
+//
+    // Destination
+    lpaddr_t dest_lpaddr = gen_phys_to_local_phys(get_address(dest));
+    lvaddr_t dest_lvaddr = local_phys_to_mem(dest_lpaddr);
+
+    union armv8_ttable_entry* entry = (union armv8_ttable_entry*) dest_lvaddr + slot;
+
+    // Source
+    genpaddr_t src_gpaddr = get_address(src);
+    lpaddr_t   src_lpaddr = gen_phys_to_local_phys(src_gpaddr);
+
+    //union armv8_l2_entry* entry1 = (union armv8_l2_entry*)local_phys_to_mem(src_gpaddr);
+
+
+    assert(offset == 0);
+    assert(aligned(src_lpaddr, 1u << 12));
+    assert((src_lpaddr < dest_lpaddr) || (src_lpaddr >= dest_lpaddr + 32));
+
+    if (entry->d.valid) {
+        // cleanup mapping info
+        debug(SUBSYS_PAGING, "slot in use\n");
+        return SYS_ERR_VNODE_SLOT_INUSE;
+    }
+
+    create_mapping_cap(mapping_cte, src,
+                       dest_lpaddr + slot * get_pte_size(),
+                       pte_count);
+
+    entry->raw = 0;
+    entry->d.valid = 1;
+    entry->d.mb1 = 1;
+    entry->d.base = (src_lpaddr) >> 12;
+    debug(SUBSYS_PAGING, "L0 mapping %"PRIuCSLOT". @%p = %08"PRIx32"\n",
+              slot, entry, entry->raw);
+
+    sysreg_invalidate_tlb();
+
+    return SYS_ERR_OK;
+}
+static errval_t
 caps_map_l1(struct capability* dest,
             cslot_t            slot,
             struct capability* src,
@@ -87,7 +171,7 @@ caps_map_l1(struct capability* dest,
         return SYS_ERR_WRONG_MAPPING;
     }
 
-    if (slot >= VMSAv8_64_L1_BASE(MEMORY_OFFSET)) {
+    if (slot >= VMSAv8_64_PTABLE_NUM_ENTRIES) {
         printf("slot = %"PRIuCSLOT"\n",slot);
         panic("oops: l1 slot id");
         return SYS_ERR_VNODE_SLOT_RESERVED;
@@ -281,6 +365,7 @@ typedef errval_t (*mapping_handler_t)(struct capability *dest_cap,
 
 /// Dispatcher table for the type of mapping to create
 static mapping_handler_t handler[ObjType_Num] = {
+        [ObjType_VNode_AARCH64_l0]   = caps_map_l0,
         [ObjType_VNode_AARCH64_l1]   = caps_map_l1,
         [ObjType_VNode_AARCH64_l2]   = caps_map_l2,
         [ObjType_VNode_AARCH64_l3]   = caps_map_l3,
index 8c98b80..4e2174a 100644 (file)
@@ -838,6 +838,10 @@ static invocation_t invocations[ObjType_Num][CAP_MAX_CMD] = {
         [CNodeCmd_Create]   = handle_create,
         [CNodeCmd_GetState] = handle_get_state,
     },
+    [ObjType_VNode_AARCH64_l0] = {
+        [VNodeCmd_Map]   = handle_map,
+        [VNodeCmd_Unmap] = handle_unmap,
+    },
     [ObjType_VNode_AARCH64_l1] = {
        [VNodeCmd_Map]   = handle_map,
        [VNodeCmd_Unmap] = handle_unmap,
@@ -858,6 +862,10 @@ static invocation_t invocations[ObjType_Num][CAP_MAX_CMD] = {
         [MappingCmd_Destroy] = handle_mapping_destroy,
         [MappingCmd_Modify] = handle_mapping_modify,
     },
+    [ObjType_VNode_AARCH64_l0_Mapping] = {
+        [MappingCmd_Destroy] = handle_mapping_destroy,
+        [MappingCmd_Modify] = handle_mapping_modify,
+    },
     [ObjType_VNode_AARCH64_l1_Mapping] = {
         [MappingCmd_Destroy] = handle_mapping_destroy,
         [MappingCmd_Modify] = handle_mapping_modify,
index 28925be..b4e6c18 100644 (file)
@@ -102,6 +102,11 @@ sys_dispatcher_setup(struct capability *to, capaddr_t cptr, int depth,
                 (lvaddr_t)gen_phys_to_local_phys(vroot->u.vnode_arm_l1.base);
             break;
 
+        case ObjType_VNode_AARCH64_l0:
+            dcb->vspace =
+                (lvaddr_t)gen_phys_to_local_phys(vroot->u.vnode_aarch64_l0.base);
+            break;
+
         case ObjType_VNode_AARCH64_l1:
             dcb->vspace =
                 (lvaddr_t)gen_phys_to_local_phys(vroot->u.vnode_aarch64_l1.base);
index 09e7f23..009f4ef 100644 (file)
@@ -239,12 +239,22 @@ static errval_t get_ptable(struct pmap_aarch64  *pmap,
 
     errval_t err;
     struct vnode *root = &pmap->root;
-    struct vnode *pl2, *pl3;
+    struct vnode *pl1, *pl2, *pl3;
     assert(root != NULL);
 
+
+    // L0 mapping
+    if((pl1 = find_vnode(root, VMSAv8_64_L0_BASE(vaddr))) == NULL) {
+        err = alloc_vnode(pmap, root, ObjType_VNode_AARCH64_l1,
+                            VMSAv8_64_L0_BASE(vaddr), &pl1);
+        if (err_is_fail(err)) {
+            return err_push(err, LIB_ERR_PMAP_ALLOC_VNODE);
+        }
+    }
+
     // L1 mapping
-    if((pl2 = find_vnode(root, VMSAv8_64_L1_BASE(vaddr))) == NULL) {
-        err = alloc_vnode(pmap, root, ObjType_VNode_AARCH64_l2,
+    if((pl2 = find_vnode(pl1, VMSAv8_64_L1_BASE(vaddr))) == NULL) {
+        err = alloc_vnode(pmap, pl1, ObjType_VNode_AARCH64_l2,
                             VMSAv8_64_L1_BASE(vaddr), &pl2);
         if (err_is_fail(err)) {
             return err_push(err, LIB_ERR_PMAP_ALLOC_VNODE);
@@ -272,11 +282,16 @@ static struct vnode *find_ptable(struct pmap_aarch64  *pmap,
                                  genvaddr_t vaddr)
 {
     struct vnode *root = &pmap->root;
-    struct vnode *pl2;
+    struct vnode *pl1, *pl2;
     assert(root != NULL);
 
+    // L0 mapping
+    if((pl1 = find_vnode(root, VMSAv8_64_L0_BASE(vaddr))) == NULL) {
+        return NULL;
+    }
+
     // L1 mapping
-    if((pl2 = find_vnode(root, VMSAv8_64_L1_BASE(vaddr))) == NULL) {
+    if((pl2 = find_vnode(pl1, VMSAv8_64_L1_BASE(vaddr))) == NULL) {
         return NULL;
     }
 
@@ -422,7 +437,10 @@ max_slabs_required(size_t bytes)
     // Perform a slab allocation for every L1 (do_map -> find_vnode)
     size_t l1entries = DIVIDE_ROUND_UP(l2entries, 512);
 
-    return pages + l2entries + l1entries + l3entries;
+    // Perform a slab allocation for every L0 (do_map -> find_vnode)
+    size_t l0entries = DIVIDE_ROUND_UP(l1entries, 512);
+
+    return pages + l3entries + l2entries + l1entries + l0entries;
 }
 
 /**
index f190b39..2eda7d9 100644 (file)
@@ -209,7 +209,7 @@ static errval_t spawn_setup_vspace(struct spawninfo *si)
         break;
 
     case CPU_ARM8:
-        err = vnode_create(si->vtree, ObjType_VNode_AARCH64_l1);
+        err = vnode_create(si->vtree, ObjType_VNode_AARCH64_l0);
         break;
 
     default: