3 * \brief pmap management
7 * Copyright (c) 2010-2013 ETH Zurich.
10 * This file is distributed under the terms in the attached LICENSE file.
11 * If you do not find this file, copies can be found by writing to:
12 * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
16 * There was some minor difficulty here with mapping the cpus native
17 * page table arrangement onto Barrelfish. The problem lies with
18 * resource bootstrapping. The bootstrap ram allocator allocates pages.
21 * The natural division of bits is 12/10/12, corresponding to 4K
22 * L1 entries in the L1 table and 256 L2 entries per L2
23 * table. Unfortunately 256 entries consumes 1KB rather than a
24 * page (4KB) so we pretend here and in the kernel caps page
25 * code that the L1 has 1024 entries and L2 tables are 4KB in
26 * size. The 4KB constraint comes from ram_alloc_fixed
27 * allocating single pages and the difficulty in bootstrapping
28 * cap slots (alloc_node takes a single slot.
30 * For now this suffices, but might need to be revisited in future.
32 * An earlier cut at this, used the first 1KB from each
33 * allocation made from ram_alloc_fixed and wasted the remaining
34 * space. Aside from the space wasted it entailed a couple of minor
35 * platform ifdefs to work around the discrepency.
37 * Alternative fixes discussed include:
39 * 1. avoid the need to create vnodes before connecting to a
40 * real allocator (probably not plausible).
42 * 2. somehow make ram_alloc_fixed handle sub-page allocations
43 * (it's clunky, but perhaps we can give each domain a separate
44 * cnode full of 1k- sized RAM caps?)
46 * 3. handle the problem at the level of vnode_create (can't see how to
49 * 4. waste the space -- doing this cleanly will require a new parameter
50 * to retype to prevent all 4 caps being created
52 * 5. introduce a new arm-specific version of vnode_create that creates
53 * 4 1k vnodes, and is only called from the ARM VM code.
57 #include <barrelfish/barrelfish.h>
58 #include <barrelfish/caddr.h>
59 #include <barrelfish/invocations_arch.h>
62 // Location of VSpace managed by this system.
63 #ifdef __ARM_ARCH_7M__
64 //virtual section 0x40000000-0x40100000 can not be used as regular memory
65 //because of "bit-banding".
66 //0x42000000-0x44000000 is also dangerous, so we start after that
67 //XXX: there are more virtual regions we
68 //are not allowed to use -> find out where to reserve those
69 #define VSPACE_BEGIN ((lvaddr_t)(1UL*1024*1024*1024 + 64UL*1024*1024)) //0x44000000
70 #else //"normal" arm architectures
71 #define VSPACE_BEGIN ((lvaddr_t)1UL*1024*1024*1024) //0x40000000
75 // Amount of virtual address space reserved for mapping frames
76 // backing refill_slabs.
77 //#define META_DATA_RESERVED_SPACE (BASE_PAGE_SIZE * 128) // 64
78 #define META_DATA_RESERVED_SPACE (BASE_PAGE_SIZE * 256)
79 // increased above value from 128 for pandaboard port
81 // Convenience macros to figure out user space page table indices
82 // we use 10 bits for both L1 and L2 tables in user space, even though
83 // in hardware we use 12 bits for L1 and 8 bits for L2.
84 #define ARM_USER_L1_OFFSET(addr) ((uintptr_t)(addr >> 22) & 0x3ffu)
85 #define ARM_USER_L2_OFFSET(addr) ((uintptr_t)(addr >> 12) & 0x3ffu)
88 #define FLAGS_LARGE 0x0100
89 #define FLAGS_SECTION 0x0200
90 #define FLAGS_SUPERSECTION 0x0300
92 static inline uintptr_t
93 vregion_flags_to_kpi_paging_flags(vregion_flags_t flags)
95 STATIC_ASSERT(0x2f == VREGION_FLAGS_MASK, "");
96 STATIC_ASSERT(0x0f == KPI_PAGING_FLAGS_MASK, "");
97 STATIC_ASSERT(VREGION_FLAGS_READ == KPI_PAGING_FLAGS_READ, "");
98 STATIC_ASSERT(VREGION_FLAGS_WRITE == KPI_PAGING_FLAGS_WRITE, "");
99 STATIC_ASSERT(VREGION_FLAGS_EXECUTE == KPI_PAGING_FLAGS_EXECUTE, "");
100 STATIC_ASSERT(VREGION_FLAGS_NOCACHE == KPI_PAGING_FLAGS_NOCACHE, "");
101 if ((flags & VREGION_FLAGS_MPB) != 0) {
102 // XXX: ignore MPB flag on ARM, otherwise the assert below fires -AB
103 flags &= ~VREGION_FLAGS_MPB;
105 if ((flags & VREGION_FLAGS_GUARD) != 0) {
108 assert(0 == (~KPI_PAGING_FLAGS_MASK & (uintptr_t)flags));
109 return (uintptr_t)flags;
113 * \brief Starting at a given root, return the vnode with entry equal to #entry
115 static struct vnode *find_vnode(struct vnode *root, uint32_t entry)
117 assert(root != NULL);
118 assert(root->is_vnode);
121 for(n = root->u.vnode.children; n != NULL; n = n->next) {
122 if(n->entry == entry) {
129 static bool inside_region(struct vnode *root, uint32_t entry, uint32_t npages)
131 assert(root != NULL);
132 assert(root->is_vnode);
136 for (n = root->u.vnode.children; n; n = n->next) {
138 uint16_t end = n->entry + n->u.frame.pte_count;
139 if (n->entry <= entry && entry + npages <= end) {
148 static bool has_vnode(struct vnode *root, uint32_t entry, size_t len)
150 assert(root != NULL);
151 assert(root->is_vnode);
154 uint32_t end_entry = entry + len;
156 for (n = root->u.vnode.children; n; n = n->next) {
157 if (n->is_vnode && n->entry == entry) {
161 uint32_t end = n->entry + n->u.frame.pte_count;
162 if (n->entry < entry && end > end_entry) {
165 if (n->entry >= entry && n->entry < end_entry) {
173 static void remove_vnode(struct vnode *root, struct vnode *item)
175 assert(root->is_vnode);
176 struct vnode *walk = root->u.vnode.children;
177 struct vnode *prev = NULL;
181 prev->next = walk->next;
184 root->u.vnode.children = walk->next;
191 assert(!"Should not get here");
195 * \brief Allocates a new VNode, adding it to the page table and our metadata
197 static errval_t alloc_vnode(struct pmap_arm *pmap_arm, struct vnode *root,
198 enum objtype type, uint32_t entry,
199 struct vnode **retvnode)
201 assert(root->is_vnode);
204 struct vnode *newvnode = slab_alloc(&pmap_arm->slab);
205 if (newvnode == NULL) {
206 return LIB_ERR_SLAB_ALLOC_FAIL;
208 newvnode->is_vnode = true;
210 // The VNode capability
211 err = slot_alloc(&newvnode->u.vnode.cap);
212 if (err_is_fail(err)) {
213 return err_push(err, LIB_ERR_SLOT_ALLOC);
216 err = vnode_create(newvnode->u.vnode.cap, type);
217 if (err_is_fail(err)) {
218 return err_push(err, LIB_ERR_VNODE_CREATE);
221 err = vnode_map(root->u.vnode.cap, newvnode->u.vnode.cap, entry,
222 KPI_PAGING_FLAGS_READ | KPI_PAGING_FLAGS_WRITE, 0, 1);
224 if (err_is_fail(err)) {
225 return err_push(err, LIB_ERR_VNODE_MAP);
228 // The VNode meta data
229 newvnode->entry = entry;
230 newvnode->next = root->u.vnode.children;
231 root->u.vnode.children = newvnode;
232 newvnode->u.vnode.children = NULL;
235 *retvnode = newvnode;
241 * \brief Returns the vnode for the pagetable mapping a given vspace address
243 static errval_t get_ptable(struct pmap_arm *pmap,
245 struct vnode **ptable)
247 // NB Strictly there are 12 bits in the ARM L1, but allocations unit
248 // of L2 is 1 page of L2 entries (4 tables) so we use 10 bits for the L1
250 uintptr_t idx = ARM_USER_L1_OFFSET(vaddr);
251 if ((*ptable = find_vnode(&pmap->root, idx)) == NULL)
253 // L1 table entries point to L2 tables so allocate an L2
254 // table for this L1 entry.
256 struct vnode *tmp = NULL; // Tmp variable for passing to alloc_vnode
258 errval_t err = alloc_vnode(pmap, &pmap->root, ObjType_VNode_ARM_l2,
260 if (err_is_fail(err)) {
261 DEBUG_ERR(err, "alloc_vnode");
265 *ptable = tmp; // Set argument to received value
268 if (err_is_fail(err)) {
269 return err_push(err, LIB_ERR_PMAP_ALLOC_VNODE);
276 static struct vnode *find_ptable(struct pmap_arm *pmap,
279 // NB Strictly there are 12 bits in the ARM L1, but allocations unit
280 // of L2 is 1 page of L2 entries (4 tables) so
281 uintptr_t idx = ARM_USER_L1_OFFSET(vaddr);
282 return find_vnode(&pmap->root, idx);
285 static errval_t do_single_map(struct pmap_arm *pmap, genvaddr_t vaddr, genvaddr_t vend,
286 struct capref frame, size_t offset, size_t pte_count,
287 vregion_flags_t flags)
289 errval_t err = SYS_ERR_OK;
290 // Get the page table
291 struct vnode *ptable;
293 if (flags&FLAGS_SECTION) {
294 //section mapping (1MB)
295 //mapped in the L1 table at root
296 ptable = &pmap->root;
297 index = ARM_USER_L1_OFFSET(vaddr);
298 printf("do_single_map: large path\n");
301 err = get_ptable(pmap, vaddr, &ptable);
302 index = ARM_USER_L2_OFFSET(vaddr);
304 if (err_is_fail(err)) {
305 return err_push(err, LIB_ERR_PMAP_GET_PTABLE);
307 uintptr_t pmap_flags = vregion_flags_to_kpi_paging_flags(flags&~FLAGS_SUPERSECTION);
308 // XXX: reassess the following note -SG
309 // NOTE: strictly speaking a l2 entry only has 8 bits, but due to the way
310 // Barrelfish allocates l1 and l2 tables, we use 10 bits for the tracking
311 // idx here and in the map syscall
312 uintptr_t idx = ARM_USER_L2_OFFSET(vaddr);
313 // Create user level datastructure for the mapping
314 bool has_page = has_vnode(ptable, idx, pte_count);
316 struct vnode *page = slab_alloc(&pmap->slab);
318 page->is_vnode = false;
320 page->next = ptable->u.vnode.children;
321 ptable->u.vnode.children = page;
322 page->u.frame.cap = frame;
323 page->u.frame.flags = flags;
324 page->u.frame.pte_count = pte_count;
326 // Map entry into the page table
327 err = vnode_map(ptable->u.vnode.cap, frame, idx,
328 pmap_flags, offset, pte_count);
329 if (err_is_fail(err)) {
330 return err_push(err, LIB_ERR_VNODE_MAP);
335 static errval_t do_map(struct pmap_arm *pmap, genvaddr_t vaddr,
336 struct capref frame, size_t offset, size_t size,
337 vregion_flags_t flags, size_t *retoff, size_t *retsize)
342 // determine mapping specific parts
343 if (flags&FLAGS_SECTION) {
344 //section mapping (1MB)
345 page_size = LARGE_PAGE_SIZE;
346 offset_level = ARM_L1_OFFSET(vaddr);
347 printf("do_map: large path\n");
348 printf("page_size: %i, size: %i\n", page_size, size);
351 page_size = BASE_PAGE_SIZE;
352 offset_level = ARM_L2_OFFSET(vaddr);
355 size = ROUND_UP(size, page_size);
356 size_t pte_count = DIVIDE_ROUND_UP(size, page_size);
357 genvaddr_t vend = vaddr + size;
359 //should be trivially true for section mappings
360 if ((ARM_L1_OFFSET(vaddr) == ARM_L1_OFFSET(vend-1)) ||
361 flags&FLAGS_SECTION) {
363 err = do_single_map(pmap, vaddr, vend, frame, offset, pte_count, flags);
364 if (err_is_fail(err)) {
365 DEBUG_ERR(err, "[do_map] in fast path");
366 return err_push(err, LIB_ERR_PMAP_DO_MAP);
368 } else { // multiple leaf page tables
370 uint32_t c = ARM_L2_MAX_ENTRIES - offset_level;
371 genvaddr_t temp_end = vaddr + c * page_size;
372 err = do_single_map(pmap, vaddr, temp_end, frame, offset, c, flags);
373 if (err_is_fail(err)) {
374 return err_push(err, LIB_ERR_PMAP_DO_MAP);
378 while (ARM_L1_OFFSET(temp_end) < ARM_L1_OFFSET(vend)) { // update vars
380 temp_end = vaddr + ARM_L2_MAX_ENTRIES * page_size;
381 offset += c * page_size;
382 c = ARM_L2_MAX_ENTRIES;
385 err = slot_alloc(&next);
386 if (err_is_fail(err)) {
387 return err_push(err, LIB_ERR_PMAP_DO_MAP);
389 err = cap_copy(next, frame);
390 if (err_is_fail(err)) {
391 return err_push(err, LIB_ERR_PMAP_DO_MAP);
396 err = do_single_map(pmap, vaddr, temp_end, frame, offset, ARM_L2_MAX_ENTRIES, flags);
397 if (err_is_fail(err)) {
398 return err_push(err, LIB_ERR_PMAP_DO_MAP);
402 // map remaining part
403 offset += c * page_size;
404 c = ARM_L2_OFFSET(vend) - ARM_L2_OFFSET(temp_end);
408 err = slot_alloc(&next);
409 if (err_is_fail(err)) {
410 return err_push(err, LIB_ERR_PMAP_DO_MAP);
412 err = cap_copy(next, frame);
413 if (err_is_fail(err)) {
414 return err_push(err, LIB_ERR_PMAP_DO_MAP);
418 err = do_single_map(pmap, temp_end, vend, next, offset, c, flags);
419 if (err_is_fail(err)) {
420 return err_push(err, LIB_ERR_PMAP_DO_MAP);
430 //has_vnode_debug = false;
434 uintptr_t pmap_flags = vregion_flags_to_kpi_paging_flags(flags);
436 for (size_t i = offset; i < offset + size; i += BASE_PAGE_SIZE) {
438 vaddr += BASE_PAGE_SIZE;
452 max_slabs_required(size_t bytes)
454 // Perform a slab allocation for every page (do_map -> slab_alloc)
455 size_t pages = DIVIDE_ROUND_UP(bytes, BASE_PAGE_SIZE);
456 // Perform a slab allocation for every L2 (get_ptable -> find_vnode)
457 size_t l2entries = DIVIDE_ROUND_UP(pages, 256 * 4);
458 // Perform a slab allocation for every L1 (do_map -> find_vnode)
459 size_t l1entries = DIVIDE_ROUND_UP(l2entries, 1024);
460 return pages + l2entries + l1entries;
462 static size_t max_slabs_required_large(size_t bytes)
464 // similar to the above, but larger page size and mapped only in a higher lvl paging structure
465 size_t pages = DIVIDE_ROUND_UP(bytes, LARGE_PAGE_SIZE);
466 size_t l1entries = DIVIDE_ROUND_UP(pages, 1024);
467 return pages + l1entries;
471 * \brief Refill slabs used for metadata
473 * \param pmap The pmap to refill in
474 * \param request The number of slabs the allocator must have
475 * when the function returns
477 * When the current pmap is initialized,
478 * it reserves some virtual address space for metadata.
479 * This reserved address space is used here
481 * Can only be called for the current pmap
482 * Will recursively call into itself till it has enough slabs
485 static errval_t refill_slabs(struct pmap_arm *pmap, size_t request)
489 /* Keep looping till we have #request slabs */
490 while (slab_freecount(&pmap->slab) < request) {
491 // Amount of bytes required for #request
492 size_t bytes = SLAB_STATIC_SIZE(request - slab_freecount(&pmap->slab),
493 sizeof(struct vnode));
495 /* Get a frame of that size */
497 err = frame_alloc(&cap, bytes, &bytes);
498 if (err_is_fail(err)) {
499 return err_push(err, LIB_ERR_FRAME_ALLOC);
502 /* If we do not have enough slabs to map the frame in, recurse */
503 size_t required_slabs_for_frame = max_slabs_required(bytes);
504 if (slab_freecount(&pmap->slab) < required_slabs_for_frame) {
505 // If we recurse, we require more slabs than to map a single page
506 assert(required_slabs_for_frame > 4);
508 err = refill_slabs(pmap, required_slabs_for_frame);
509 if (err_is_fail(err)) {
510 return err_push(err, LIB_ERR_SLAB_REFILL);
514 /* Perform mapping */
515 genvaddr_t genvaddr = pmap->vregion_offset;
516 pmap->vregion_offset += (genvaddr_t)bytes;
518 // if this assert fires, increase META_DATA_RESERVED_SPACE
519 assert(pmap->vregion_offset < (vregion_get_base_addr(&pmap->vregion) +
520 vregion_get_size(&pmap->vregion)));
522 err = do_map(pmap, genvaddr, cap, 0, bytes,
523 VREGION_FLAGS_READ_WRITE, NULL, NULL);
524 if (err_is_fail(err)) {
525 return err_push(err, LIB_ERR_PMAP_DO_MAP);
529 lvaddr_t buf = vspace_genvaddr_to_lvaddr(genvaddr);
530 slab_grow(&pmap->slab, (void*)buf, bytes);
537 * \brief Create page mappings
539 * \param pmap The pmap object
540 * \param vaddr The virtual address to create the mapping for
541 * \param frame The frame cap to map in
542 * \param offset Offset into the frame cap
543 * \param size Size of the mapping
544 * \param flags Flags for the mapping
545 * \param retoff If non-NULL, filled in with adjusted offset of mapped region
546 * \param retsize If non-NULL, filled in with adjusted size of mapped region
549 map(struct pmap *pmap,
554 vregion_flags_t flags,
558 struct pmap_arm *pmap_arm = (struct pmap_arm *)pmap;
562 size_t slabs_required;
564 // adjust the mapping to be on page boundaries
565 if (flags&FLAGS_SECTION) {
566 //section mapping (1MB)
567 base = LARGE_PAGE_OFFSET(offset);
568 page_size = LARGE_PAGE_SIZE;
569 slabs_required = max_slabs_required_large(size);
570 printf("map: large path, page_size: %i, base: %i, slabs: %i, size: %i\n", page_size, base, slabs_required, size);
573 base = BASE_PAGE_OFFSET(offset);
574 page_size = BASE_PAGE_SIZE;
575 slabs_required = max_slabs_required(size);
578 size = ROUND_UP(size, page_size);
581 const size_t slabs_reserve = 3; // == max_slabs_required(1)
582 uint64_t slabs_free = slab_freecount(&pmap_arm->slab);
584 slabs_required += slabs_reserve;
586 if (slabs_required > slabs_free) {
587 if (get_current_pmap() == pmap) {
588 errval_t err = refill_slabs(pmap_arm, slabs_required);
589 if (err_is_fail(err)) {
590 return err_push(err, LIB_ERR_SLAB_REFILL);
594 size_t bytes = SLAB_STATIC_SIZE(slabs_required - slabs_free,
595 sizeof(struct vnode));
596 void *buf = malloc(bytes);
598 return LIB_ERR_MALLOC_FAIL;
600 slab_grow(&pmap_arm->slab, buf, bytes);
604 return do_map(pmap_arm, vaddr, frame, offset, size, flags,
608 static errval_t do_single_unmap(struct pmap_arm *pmap, genvaddr_t vaddr,
609 size_t pte_count, bool delete_cap)
612 struct vnode *pt = find_ptable(pmap, vaddr);
614 // analog to do_single_map we use 10 bits for tracking pages in user space -SG
615 struct vnode *page = find_vnode(pt, ARM_USER_L2_OFFSET(vaddr));
616 if (page && page->u.frame.pte_count == pte_count) {
617 err = vnode_unmap(pt->u.vnode.cap, page->u.frame.cap,
618 page->entry, page->u.frame.pte_count);
619 if (err_is_fail(err)) {
620 DEBUG_ERR(err, "vnode_unmap");
621 return err_push(err, LIB_ERR_VNODE_UNMAP);
624 // Free up the resources
626 err = cap_destroy(page->u.frame.cap);
627 if (err_is_fail(err)) {
628 return err_push(err, LIB_ERR_PMAP_DO_SINGLE_UNMAP);
631 remove_vnode(pt, page);
632 slab_free(&pmap->slab, page);
635 return LIB_ERR_PMAP_FIND_VNODE;
643 * \brief Remove page mappings
645 * \param pmap The pmap object
646 * \param vaddr The start of the virtual addres to remove
647 * \param size The size of virtual address to remove
648 * \param retsize If non-NULL, filled in with the actual size removed
651 unmap(struct pmap *pmap,
656 errval_t err, ret = SYS_ERR_OK;
657 struct pmap_arm *pmap_arm = (struct pmap_arm*)pmap;
658 size = ROUND_UP(size, BASE_PAGE_SIZE);
659 size_t pte_count = size / BASE_PAGE_SIZE;
660 genvaddr_t vend = vaddr + size;
662 if (ARM_L1_OFFSET(vaddr) == ARM_L1_OFFSET(vend-1)) {
664 err = do_single_unmap(pmap_arm, vaddr, pte_count, false);
665 if (err_is_fail(err)) {
666 return err_push(err, LIB_ERR_PMAP_UNMAP);
668 } else { // slow path
670 uint32_t c = ARM_L2_MAX_ENTRIES - ARM_L2_OFFSET(vaddr);
671 err = do_single_unmap(pmap_arm, vaddr, c, false);
672 if (err_is_fail(err)) {
673 return err_push(err, LIB_ERR_PMAP_UNMAP);
677 vaddr += c * BASE_PAGE_SIZE;
678 while (ARM_L1_OFFSET(vaddr) < ARM_L1_OFFSET(vend)) {
679 c = ARM_L2_MAX_ENTRIES;
680 err = do_single_unmap(pmap_arm, vaddr, c, true);
681 if (err_is_fail(err)) {
682 return err_push(err, LIB_ERR_PMAP_UNMAP);
684 vaddr += c * BASE_PAGE_SIZE;
687 // unmap remaining part
688 c = ARM_L2_OFFSET(vend) - ARM_L2_OFFSET(vaddr);
690 err = do_single_unmap(pmap_arm, vaddr, c, true);
691 if (err_is_fail(err)) {
692 return err_push(err, LIB_ERR_PMAP_UNMAP);
705 * \brief Determine a suitable address for a given memory object
707 * \param pmap The pmap object
708 * \param memobj The memory object to determine the address for
709 * \param alignment Minimum alignment
710 * \param vaddr Pointer to return the determined address
712 * Relies on vspace.c code maintaining an ordered list of vregions
715 determine_addr(struct pmap *pmap,
716 struct memobj *memobj,
720 assert(pmap->vspace->head);
722 assert(alignment <= BASE_PAGE_SIZE); // NYI
724 struct vregion *walk = pmap->vspace->head;
725 while (walk->next) { // Try to insert between existing mappings
726 genvaddr_t walk_base = vregion_get_base_addr(walk);
727 genvaddr_t walk_size = vregion_get_size(walk);
728 genvaddr_t next_base = vregion_get_base_addr(walk->next);
730 if (next_base > walk_base + walk_size + memobj->size &&
731 walk_base + walk_size > VSPACE_BEGIN) { // Ensure mappings are larger than VSPACE_BEGIN
732 *vaddr = walk_base + walk_size;
738 *vaddr = vregion_get_base_addr(walk) + vregion_get_size(walk);
742 /** \brief Retrieves an address that can currently be used for large mappings
745 static errval_t determine_addr_raw(struct pmap *pmap, size_t size,
746 size_t alignment, genvaddr_t *retvaddr)
748 struct pmap_arm *pmap_arm = (struct pmap_arm *)pmap;
750 struct vnode *walk_pdir = pmap_arm->root.u.vnode.children;
751 assert(walk_pdir != NULL); // assume there's always at least one existing entry
753 if (alignment == 0) {
754 alignment = BASE_PAGE_SIZE;
756 alignment = ROUND_UP(alignment, BASE_PAGE_SIZE);
758 size = ROUND_UP(size, alignment);
760 size_t free_count = DIVIDE_ROUND_UP(size, LARGE_PAGE_SIZE);
761 //debug_printf("need %zu contiguous free pdirs\n", free_count);
763 // compile pdir free list
764 // barrelfish treats L1 as 1024 entries
766 for (int i = 0; i < 1024; i++) {
769 f[walk_pdir->entry] = false;
771 assert(walk_pdir->is_vnode);
772 f[walk_pdir->entry] = false;
773 walk_pdir = walk_pdir->next;
775 genvaddr_t first_free = 384;
776 for (; first_free < 512; first_free++) {
778 for (int i = 1; i < free_count; i++) {
779 if (!f[first_free + i]) {
781 first_free = first_free+i;
788 assert(1 == 1);// make compiler shut up about label
790 //printf("first free: %li\n", (uint32_t)first_free);
791 if (first_free + free_count <= 512) {
792 *retvaddr = first_free << 22;
795 return LIB_ERR_OUT_OF_VIRTUAL_ADDR;
801 static errval_t do_single_modify_flags(struct pmap_arm *pmap, genvaddr_t vaddr,
802 size_t pages, vregion_flags_t flags)
804 errval_t err = SYS_ERR_OK;
805 struct vnode *ptable = find_ptable(pmap, vaddr);
806 uint16_t ptentry = ARM_USER_L2_OFFSET(vaddr);
808 struct vnode *page = find_vnode(ptable, ptentry);
810 if (inside_region(ptable, ptentry, pages)) {
811 // we're modifying part of a valid mapped region
812 // arguments to invocation: invoke frame cap, first affected
813 // page (as offset from first page in mapping), #affected
814 // pages, new flags. Invocation should check compatibility of
815 // new set of flags with cap permissions.
816 size_t off = ptentry - page->entry;
817 uintptr_t pmap_flags = vregion_flags_to_kpi_paging_flags(flags);
818 err = invoke_frame_modify_flags(page->u.frame.cap, off, pages, pmap_flags);
819 printf("invoke_frame_modify_flags returned error: %s (%"PRIuERRV")\n",
820 err_getstring(err), err);
823 // overlaps some region border
824 return LIB_ERR_PMAP_EXISTING_MAPPING;
832 * \brief Modify page mapping
834 * \param pmap The pmap object
835 * \param vaddr The virtual address to unmap
836 * \param flags New flags for the mapping
837 * \param retsize If non-NULL, filled in with the actual size modified
840 modify_flags(struct pmap *pmap,
843 vregion_flags_t flags,
846 errval_t err, ret = SYS_ERR_OK;
847 struct pmap_arm *pmap_arm = (struct pmap_arm*)pmap;
848 size = ROUND_UP(size, BASE_PAGE_SIZE);
849 size_t pte_count = size / BASE_PAGE_SIZE;
850 genvaddr_t vend = vaddr + size;
852 if (ARM_L1_OFFSET(vaddr) == ARM_L1_OFFSET(vend-1)) {
854 err = do_single_modify_flags(pmap_arm, vaddr, pte_count, false);
855 if (err_is_fail(err)) {
856 return err_push(err, LIB_ERR_PMAP_UNMAP);
861 uint32_t c = ARM_L2_MAX_ENTRIES - ARM_L2_OFFSET(vaddr);
862 err = do_single_modify_flags(pmap_arm, vaddr, c, false);
863 if (err_is_fail(err)) {
864 return err_push(err, LIB_ERR_PMAP_UNMAP);
868 vaddr += c * BASE_PAGE_SIZE;
869 while (ARM_L1_OFFSET(vaddr) < ARM_L1_OFFSET(vend)) {
870 c = ARM_L2_MAX_ENTRIES;
871 err = do_single_modify_flags(pmap_arm, vaddr, c, true);
872 if (err_is_fail(err)) {
873 return err_push(err, LIB_ERR_PMAP_UNMAP);
875 vaddr += c * BASE_PAGE_SIZE;
878 // unmap remaining part
879 c = ARM_L2_OFFSET(vend) - ARM_L2_OFFSET(vaddr);
881 err = do_single_modify_flags(pmap_arm, vaddr, c, true);
882 if (err_is_fail(err)) {
883 return err_push(err, LIB_ERR_PMAP_UNMAP);
896 * \brief Query existing page mapping
898 * \param pmap The pmap object
899 * \param vaddr The virtual address to query
900 * \param retvaddr Returns the base virtual address of the mapping
901 * \param retsize Returns the actual size of the mapping
902 * \param retcap Returns the cap mapped at this address
903 * \param retoffset Returns the offset within the cap that is mapped
904 * \param retflags Returns the flags for this mapping
906 * All of the ret parameters are optional.
908 static errval_t lookup(struct pmap *pmap, genvaddr_t vaddr,
909 genvaddr_t *retvaddr, size_t *retsize,
910 struct capref *retcap, genvaddr_t *retoffset,
911 vregion_flags_t *retflags)
919 serialise(struct pmap *pmap, void *buf, size_t buflen)
921 // Unimplemented: ignored
926 deserialise(struct pmap *pmap, void *buf, size_t buflen)
928 // Unimplemented: we start with an empty pmap, and avoid the bottom of the A/S
932 static struct pmap_funcs pmap_funcs = {
933 .determine_addr = determine_addr,
934 .determine_addr_raw = determine_addr_raw,
937 .modify_flags = modify_flags,
939 .serialise = serialise,
940 .deserialise = deserialise,
944 * \brief Initialize the pmap object
947 pmap_init(struct pmap *pmap,
948 struct vspace *vspace,
950 struct slot_allocator *opt_slot_alloc)
952 struct pmap_arm* pmap_arm = (struct pmap_arm*)pmap;
954 /* Generic portion */
955 pmap->f = pmap_funcs;
956 pmap->vspace = vspace;
958 // Slab allocator for vnodes
959 slab_init(&pmap_arm->slab, sizeof(struct vnode), NULL);
960 slab_grow(&pmap_arm->slab,
961 pmap_arm->slab_buffer,
962 sizeof(pmap_arm->slab_buffer));
964 pmap_arm->root.is_vnode = true;
965 pmap_arm->root.u.vnode.cap = vnode;
966 pmap_arm->root.next = NULL;
967 pmap_arm->root.u.vnode.children = NULL;
972 errval_t pmap_current_init(bool init_domain)
974 struct pmap_arm *pmap_arm = (struct pmap_arm*)get_current_pmap();
976 // To reserve a block of virtual address space,
977 // a vregion representing the address space is required.
978 // We construct a superficial one here and add it to the vregion list.
979 struct vregion *vregion = &pmap_arm->vregion;
980 assert((void*)vregion > (void*)pmap_arm);
981 assert((void*)vregion < (void*)(pmap_arm + 1));
982 vregion->vspace = NULL;
983 vregion->memobj = NULL;
984 vregion->base = VSPACE_BEGIN;
986 vregion->size = META_DATA_RESERVED_SPACE;
988 vregion->next = NULL;
990 struct vspace *vspace = pmap_arm->p.vspace;
991 assert(!vspace->head);
992 vspace->head = vregion;
994 pmap_arm->vregion_offset = pmap_arm->vregion.base;