3 * \brief pmap management
7 * Copyright (c) 2010-2013 ETH Zurich.
10 * This file is distributed under the terms in the attached LICENSE file.
11 * If you do not find this file, copies can be found by writing to:
12 * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
16 * There was some minor difficulty here with mapping the cpus native
17 * page table arrangement onto Barrelfish. The problem lies with
18 * resource bootstrapping. The bootstrap ram allocator allocates pages.
21 * The natural division of bits is 12/10/12, corresponding to 4K
22 * L1 entries in the L1 table and 256 L2 entries per L2
23 * table. Unfortunately 256 entries consumes 1KB rather than a
24 * page (4KB) so we pretend here and in the kernel caps page
25 * code that the L1 has 1024 entries and L2 tables are 4KB in
26 * size. The 4KB constraint comes from ram_alloc_fixed
27 * allocating single pages and the difficulty in bootstrapping
28 * cap slots (alloc_node takes a single slot.
30 * For now this suffices, but might need to be revisited in future.
32 * An earlier cut at this, used the first 1KB from each
33 * allocation made from ram_alloc_fixed and wasted the remaining
34 * space. Aside from the space wasted it entailed a couple of minor
35 * platform ifdefs to work around the discrepency.
37 * Alternative fixes discussed include:
39 * 1. avoid the need to create vnodes before connecting to a
40 * real allocator (probably not plausible).
42 * 2. somehow make ram_alloc_fixed handle sub-page allocations
43 * (it's clunky, but perhaps we can give each domain a separate
44 * cnode full of 1k- sized RAM caps?)
46 * 3. handle the problem at the level of vnode_create (can't see how to
49 * 4. waste the space -- doing this cleanly will require a new parameter
50 * to retype to prevent all 4 caps being created
52 * 5. introduce a new arm-specific version of vnode_create that creates
53 * 4 1k vnodes, and is only called from the ARM VM code.
57 #include <barrelfish/barrelfish.h>
58 #include <barrelfish/caddr.h>
59 #include <barrelfish/invocations_arch.h>
62 // Location of VSpace managed by this system.
63 #ifdef __ARM_ARCH_7M__
64 //virtual section 0x40000000-0x40100000 can not be used as regular memory
65 //because of "bit-banding".
66 //0x42000000-0x44000000 is also dangerous, so we start after that
67 //XXX: there are more virtual regions we
68 //are not allowed to use -> find out where to reserve those
69 #define VSPACE_BEGIN ((lvaddr_t)(1UL*1024*1024*1024 + 64UL*1024*1024)) //0x44000000
70 #else //"normal" arm architectures
71 #define VSPACE_BEGIN ((lvaddr_t)1UL*1024*1024*1024) //0x40000000
75 // Amount of virtual address space reserved for mapping frames
76 // backing refill_slabs.
77 //#define META_DATA_RESERVED_SPACE (BASE_PAGE_SIZE * 128) // 64
78 #define META_DATA_RESERVED_SPACE (BASE_PAGE_SIZE * 256)
79 // increased above value from 128 for pandaboard port
81 // Convenience macros to figure out user space page table indices
82 // we use 10 bits for both L1 and L2 tables in user space, even though
83 // in hardware we use 12 bits for L1 and 8 bits for L2.
84 #define ARM_USER_L1_OFFSET(addr) ((uintptr_t)(addr >> 22) & 0x3ffu)
85 #define ARM_USER_L2_OFFSET(addr) ((uintptr_t)(addr >> 12) & 0x3ffu)
88 #define FLAGS_LARGE 0x0100
89 #define FLAGS_SECTION 0x0200
90 #define FLAGS_SUPERSECTION 0x0300
92 static inline uintptr_t
93 vregion_flags_to_kpi_paging_flags(vregion_flags_t flags)
95 STATIC_ASSERT(0x2f == VREGION_FLAGS_MASK, "");
96 STATIC_ASSERT(0x0f == KPI_PAGING_FLAGS_MASK, "");
97 STATIC_ASSERT(VREGION_FLAGS_READ == KPI_PAGING_FLAGS_READ, "");
98 STATIC_ASSERT(VREGION_FLAGS_WRITE == KPI_PAGING_FLAGS_WRITE, "");
99 STATIC_ASSERT(VREGION_FLAGS_EXECUTE == KPI_PAGING_FLAGS_EXECUTE, "");
100 STATIC_ASSERT(VREGION_FLAGS_NOCACHE == KPI_PAGING_FLAGS_NOCACHE, "");
101 if ((flags & VREGION_FLAGS_MPB) != 0) {
102 // XXX: ignore MPB flag on ARM, otherwise the assert below fires -AB
103 flags &= ~VREGION_FLAGS_MPB;
105 if ((flags & VREGION_FLAGS_GUARD) != 0) {
108 assert(0 == (~KPI_PAGING_FLAGS_MASK & (uintptr_t)flags));
109 return (uintptr_t)flags;
113 * \brief Starting at a given root, return the vnode with entry equal to #entry
115 static struct vnode *find_vnode(struct vnode *root, uint32_t entry)
117 assert(root != NULL);
118 assert(root->is_vnode);
121 for(n = root->u.vnode.children; n != NULL; n = n->next) {
122 if(n->entry == entry) {
129 static bool inside_region(struct vnode *root, uint32_t entry, uint32_t npages)
131 assert(root != NULL);
132 assert(root->is_vnode);
136 for (n = root->u.vnode.children; n; n = n->next) {
138 uint16_t end = n->entry + n->u.frame.pte_count;
139 if (n->entry <= entry && entry + npages <= end) {
148 static bool has_vnode(struct vnode *root, uint32_t entry, size_t len)
150 assert(root != NULL);
151 assert(root->is_vnode);
154 uint32_t end_entry = entry + len;
156 for (n = root->u.vnode.children; n; n = n->next) {
157 if (n->is_vnode && n->entry == entry) {
161 uint32_t end = n->entry + n->u.frame.pte_count;
162 if (n->entry < entry && end > end_entry) {
165 if (n->entry >= entry && n->entry < end_entry) {
173 static void remove_vnode(struct vnode *root, struct vnode *item)
175 assert(root->is_vnode);
176 struct vnode *walk = root->u.vnode.children;
177 struct vnode *prev = NULL;
181 prev->next = walk->next;
184 root->u.vnode.children = walk->next;
191 assert(!"Should not get here");
195 * \brief Allocates a new VNode, adding it to the page table and our metadata
197 static errval_t alloc_vnode(struct pmap_arm *pmap_arm, struct vnode *root,
198 enum objtype type, uint32_t entry,
199 struct vnode **retvnode)
201 assert(root->is_vnode);
204 struct vnode *newvnode = slab_alloc(&pmap_arm->slab);
205 if (newvnode == NULL) {
206 return LIB_ERR_SLAB_ALLOC_FAIL;
208 newvnode->is_vnode = true;
210 // The VNode capability
211 err = slot_alloc(&newvnode->u.vnode.cap);
212 if (err_is_fail(err)) {
213 return err_push(err, LIB_ERR_SLOT_ALLOC);
216 err = vnode_create(newvnode->u.vnode.cap, type);
217 if (err_is_fail(err)) {
218 return err_push(err, LIB_ERR_VNODE_CREATE);
221 err = vnode_map(root->u.vnode.cap, newvnode->u.vnode.cap, entry,
222 KPI_PAGING_FLAGS_READ | KPI_PAGING_FLAGS_WRITE, 0, 1);
224 if (err_is_fail(err)) {
225 return err_push(err, LIB_ERR_VNODE_MAP);
228 // The VNode meta data
229 newvnode->entry = entry;
230 newvnode->next = root->u.vnode.children;
231 root->u.vnode.children = newvnode;
232 newvnode->u.vnode.children = NULL;
235 *retvnode = newvnode;
241 * \brief Returns the vnode for the pagetable mapping a given vspace address
243 static errval_t get_ptable(struct pmap_arm *pmap,
245 struct vnode **ptable)
247 // NB Strictly there are 12 bits in the ARM L1, but allocations unit
248 // of L2 is 1 page of L2 entries (4 tables) so we use 10 bits for the L1
250 uintptr_t index = ARM_USER_L1_OFFSET(vaddr);
251 if ((*ptable = find_vnode(&pmap->root, index)) == NULL)
253 // L1 table entries point to L2 tables so allocate an L2
254 // table for this L1 entry.
256 struct vnode *tmp = NULL; // Tmp variable for passing to alloc_vnode
258 errval_t err = alloc_vnode(pmap, &pmap->root, ObjType_VNode_ARM_l2,
260 if (err_is_fail(err)) {
261 DEBUG_ERR(err, "alloc_vnode");
265 *ptable = tmp; // Set argument to received value
268 if (err_is_fail(err)) {
269 return err_push(err, LIB_ERR_PMAP_ALLOC_VNODE);
276 static struct vnode *find_ptable(struct pmap_arm *pmap,
279 // NB Strictly there are 12 bits in the ARM L1, but allocations unit
280 // of L2 is 1 page of L2 entries (4 tables) so
281 uintptr_t index = ARM_USER_L1_OFFSET(vaddr);
282 return find_vnode(&pmap->root, index);
285 static errval_t do_single_map(struct pmap_arm *pmap, genvaddr_t vaddr, genvaddr_t vend,
286 struct capref frame, size_t offset, size_t pte_count,
287 vregion_flags_t flags)
289 errval_t err = SYS_ERR_OK;
290 // Get the page table
291 struct vnode *ptable;
293 if (flags&FLAGS_SECTION) {
294 //section mapping (1MB)
295 //mapped in the L1 table at root
296 ptable = &pmap->root;
297 index = ARM_USER_L1_OFFSET(vaddr);
298 printf("do_single_map: large path\n");
301 err = get_ptable(pmap, vaddr, &ptable);
302 index = ARM_USER_L2_OFFSET(vaddr);
304 if (err_is_fail(err)) {
305 return err_push(err, LIB_ERR_PMAP_GET_PTABLE);
307 uintptr_t pmap_flags = vregion_flags_to_kpi_paging_flags(flags&~FLAGS_SUPERSECTION);
308 // XXX: reassess the following note -SG
309 // NOTE: strictly speaking a l2 entry only has 8 bits, but due to the way
310 // Barrelfish allocates l1 and l2 tables, we use 10 bits for the tracking
311 // index here and in the map syscall
312 // NOTE: index determined in the pagesize branch -AD
313 //uintptr_t index = ARM_USER_L2_OFFSET(vaddr);
314 // Create user level datastructure for the mapping
315 bool has_page = has_vnode(ptable, index, pte_count);
317 struct vnode *page = slab_alloc(&pmap->slab);
319 page->is_vnode = false;
321 page->next = ptable->u.vnode.children;
322 ptable->u.vnode.children = page;
323 page->u.frame.cap = frame;
324 page->u.frame.flags = flags;
325 page->u.frame.pte_count = pte_count;
327 // Map entry into the page table
328 err = vnode_map(ptable->u.vnode.cap, frame, index,
329 pmap_flags, offset, pte_count);
330 if (err_is_fail(err)) {
331 return err_push(err, LIB_ERR_VNODE_MAP);
336 static errval_t do_map(struct pmap_arm *pmap, genvaddr_t vaddr,
337 struct capref frame, size_t offset, size_t size,
338 vregion_flags_t flags, size_t *retoff, size_t *retsize)
343 // determine mapping specific parts
344 if (flags&FLAGS_SECTION) {
345 //section mapping (1MB)
346 page_size = LARGE_PAGE_SIZE;
347 offset_level = ARM_L1_OFFSET(vaddr);
348 printf("do_map: large path\n");
349 printf("page_size: %i, size: %i\n", page_size, size);
352 page_size = BASE_PAGE_SIZE;
353 offset_level = ARM_L2_OFFSET(vaddr);
356 size = ROUND_UP(size, page_size);
357 size_t pte_count = DIVIDE_ROUND_UP(size, page_size);
358 genvaddr_t vend = vaddr + size;
360 //should be trivially true for section mappings
361 if ((ARM_L1_OFFSET(vaddr) == ARM_L1_OFFSET(vend-1)) ||
362 flags&FLAGS_SECTION) {
364 err = do_single_map(pmap, vaddr, vend, frame, offset, pte_count, flags);
365 if (err_is_fail(err)) {
366 DEBUG_ERR(err, "[do_map] in fast path");
367 return err_push(err, LIB_ERR_PMAP_DO_MAP);
369 } else { // multiple leaf page tables
371 uint32_t c = ARM_L2_MAX_ENTRIES - offset_level;
372 genvaddr_t temp_end = vaddr + c * page_size;
373 err = do_single_map(pmap, vaddr, temp_end, frame, offset, c, flags);
374 if (err_is_fail(err)) {
375 return err_push(err, LIB_ERR_PMAP_DO_MAP);
379 while (ARM_L1_OFFSET(temp_end) < ARM_L1_OFFSET(vend)) { // update vars
381 temp_end = vaddr + ARM_L2_MAX_ENTRIES * page_size;
382 offset += c * page_size;
383 c = ARM_L2_MAX_ENTRIES;
386 err = slot_alloc(&next);
387 if (err_is_fail(err)) {
388 return err_push(err, LIB_ERR_PMAP_DO_MAP);
390 err = cap_copy(next, frame);
391 if (err_is_fail(err)) {
392 return err_push(err, LIB_ERR_PMAP_DO_MAP);
397 err = do_single_map(pmap, vaddr, temp_end, frame, offset, ARM_L2_MAX_ENTRIES, flags);
398 if (err_is_fail(err)) {
399 return err_push(err, LIB_ERR_PMAP_DO_MAP);
403 // map remaining part
404 offset += c * page_size;
405 c = ARM_L2_OFFSET(vend) - ARM_L2_OFFSET(temp_end);
409 err = slot_alloc(&next);
410 if (err_is_fail(err)) {
411 return err_push(err, LIB_ERR_PMAP_DO_MAP);
413 err = cap_copy(next, frame);
414 if (err_is_fail(err)) {
415 return err_push(err, LIB_ERR_PMAP_DO_MAP);
419 err = do_single_map(pmap, temp_end, vend, next, offset, c, flags);
420 if (err_is_fail(err)) {
421 return err_push(err, LIB_ERR_PMAP_DO_MAP);
431 //has_vnode_debug = false;
435 uintptr_t pmap_flags = vregion_flags_to_kpi_paging_flags(flags);
437 for (size_t i = offset; i < offset + size; i += BASE_PAGE_SIZE) {
439 vaddr += BASE_PAGE_SIZE;
453 max_slabs_required(size_t bytes)
455 // Perform a slab allocation for every page (do_map -> slab_alloc)
456 size_t pages = DIVIDE_ROUND_UP(bytes, BASE_PAGE_SIZE);
457 // Perform a slab allocation for every L2 (get_ptable -> find_vnode)
458 size_t l2entries = DIVIDE_ROUND_UP(pages, 256 * 4);
459 // Perform a slab allocation for every L1 (do_map -> find_vnode)
460 size_t l1entries = DIVIDE_ROUND_UP(l2entries, 1024);
461 return pages + l2entries + l1entries;
463 static size_t max_slabs_required_large(size_t bytes)
465 // similar to the above, but larger page size and mapped only in a higher lvl paging structure
466 size_t pages = DIVIDE_ROUND_UP(bytes, LARGE_PAGE_SIZE);
467 size_t l1entries = DIVIDE_ROUND_UP(pages, 1024);
468 return pages + l1entries;
472 * \brief Refill slabs used for metadata
474 * \param pmap The pmap to refill in
475 * \param request The number of slabs the allocator must have
476 * when the function returns
478 * When the current pmap is initialized,
479 * it reserves some virtual address space for metadata.
480 * This reserved address space is used here
482 * Can only be called for the current pmap
483 * Will recursively call into itself till it has enough slabs
486 static errval_t refill_slabs(struct pmap_arm *pmap, size_t request)
490 /* Keep looping till we have #request slabs */
491 while (slab_freecount(&pmap->slab) < request) {
492 // Amount of bytes required for #request
493 size_t bytes = SLAB_STATIC_SIZE(request - slab_freecount(&pmap->slab),
494 sizeof(struct vnode));
496 /* Get a frame of that size */
498 err = frame_alloc(&cap, bytes, &bytes);
499 if (err_is_fail(err)) {
500 return err_push(err, LIB_ERR_FRAME_ALLOC);
503 /* If we do not have enough slabs to map the frame in, recurse */
504 size_t required_slabs_for_frame = max_slabs_required(bytes);
505 if (slab_freecount(&pmap->slab) < required_slabs_for_frame) {
506 // If we recurse, we require more slabs than to map a single page
507 assert(required_slabs_for_frame > 4);
509 err = refill_slabs(pmap, required_slabs_for_frame);
510 if (err_is_fail(err)) {
511 return err_push(err, LIB_ERR_SLAB_REFILL);
515 /* Perform mapping */
516 genvaddr_t genvaddr = pmap->vregion_offset;
517 pmap->vregion_offset += (genvaddr_t)bytes;
519 // if this assert fires, increase META_DATA_RESERVED_SPACE
520 assert(pmap->vregion_offset < (vregion_get_base_addr(&pmap->vregion) +
521 vregion_get_size(&pmap->vregion)));
523 err = do_map(pmap, genvaddr, cap, 0, bytes,
524 VREGION_FLAGS_READ_WRITE, NULL, NULL);
525 if (err_is_fail(err)) {
526 return err_push(err, LIB_ERR_PMAP_DO_MAP);
530 lvaddr_t buf = vspace_genvaddr_to_lvaddr(genvaddr);
531 slab_grow(&pmap->slab, (void*)buf, bytes);
538 * \brief Create page mappings
540 * \param pmap The pmap object
541 * \param vaddr The virtual address to create the mapping for
542 * \param frame The frame cap to map in
543 * \param offset Offset into the frame cap
544 * \param size Size of the mapping
545 * \param flags Flags for the mapping
546 * \param retoff If non-NULL, filled in with adjusted offset of mapped region
547 * \param retsize If non-NULL, filled in with adjusted size of mapped region
550 map(struct pmap *pmap,
555 vregion_flags_t flags,
559 struct pmap_arm *pmap_arm = (struct pmap_arm *)pmap;
563 size_t slabs_required;
565 // adjust the mapping to be on page boundaries
566 if (flags&FLAGS_SECTION) {
567 //section mapping (1MB)
568 base = LARGE_PAGE_OFFSET(offset);
569 page_size = LARGE_PAGE_SIZE;
570 slabs_required = max_slabs_required_large(size);
571 printf("map: large path, page_size: %i, base: %i, slabs: %i, size: %i\n", page_size, base, slabs_required, size);
574 base = BASE_PAGE_OFFSET(offset);
575 page_size = BASE_PAGE_SIZE;
576 slabs_required = max_slabs_required(size);
579 size = ROUND_UP(size, page_size);
582 const size_t slabs_reserve = 3; // == max_slabs_required(1)
583 uint64_t slabs_free = slab_freecount(&pmap_arm->slab);
585 slabs_required += slabs_reserve;
587 if (slabs_required > slabs_free) {
588 if (get_current_pmap() == pmap) {
589 errval_t err = refill_slabs(pmap_arm, slabs_required);
590 if (err_is_fail(err)) {
591 return err_push(err, LIB_ERR_SLAB_REFILL);
595 size_t bytes = SLAB_STATIC_SIZE(slabs_required - slabs_free,
596 sizeof(struct vnode));
597 void *buf = malloc(bytes);
599 return LIB_ERR_MALLOC_FAIL;
601 slab_grow(&pmap_arm->slab, buf, bytes);
605 return do_map(pmap_arm, vaddr, frame, offset, size, flags,
609 static errval_t do_single_unmap(struct pmap_arm *pmap, genvaddr_t vaddr,
610 size_t pte_count, bool delete_cap)
613 struct vnode *pt = find_ptable(pmap, vaddr);
615 // analog to do_single_map we use 10 bits for tracking pages in user space -SG
616 struct vnode *page = find_vnode(pt, ARM_USER_L2_OFFSET(vaddr));
617 if (page && page->u.frame.pte_count == pte_count) {
618 err = vnode_unmap(pt->u.vnode.cap, page->u.frame.cap,
619 page->entry, page->u.frame.pte_count);
620 if (err_is_fail(err)) {
621 DEBUG_ERR(err, "vnode_unmap");
622 return err_push(err, LIB_ERR_VNODE_UNMAP);
625 // Free up the resources
627 err = cap_destroy(page->u.frame.cap);
628 if (err_is_fail(err)) {
629 return err_push(err, LIB_ERR_PMAP_DO_SINGLE_UNMAP);
632 remove_vnode(pt, page);
633 slab_free(&pmap->slab, page);
636 return LIB_ERR_PMAP_FIND_VNODE;
644 * \brief Remove page mappings
646 * \param pmap The pmap object
647 * \param vaddr The start of the virtual addres to remove
648 * \param size The size of virtual address to remove
649 * \param retsize If non-NULL, filled in with the actual size removed
652 unmap(struct pmap *pmap,
657 errval_t err, ret = SYS_ERR_OK;
658 struct pmap_arm *pmap_arm = (struct pmap_arm*)pmap;
659 size = ROUND_UP(size, BASE_PAGE_SIZE);
660 size_t pte_count = size / BASE_PAGE_SIZE;
661 genvaddr_t vend = vaddr + size;
663 if (ARM_L1_OFFSET(vaddr) == ARM_L1_OFFSET(vend-1)) {
665 err = do_single_unmap(pmap_arm, vaddr, pte_count, false);
666 if (err_is_fail(err)) {
667 return err_push(err, LIB_ERR_PMAP_UNMAP);
669 } else { // slow path
671 uint32_t c = ARM_L2_MAX_ENTRIES - ARM_L2_OFFSET(vaddr);
672 err = do_single_unmap(pmap_arm, vaddr, c, false);
673 if (err_is_fail(err)) {
674 return err_push(err, LIB_ERR_PMAP_UNMAP);
678 vaddr += c * BASE_PAGE_SIZE;
679 while (ARM_L1_OFFSET(vaddr) < ARM_L1_OFFSET(vend)) {
680 c = ARM_L2_MAX_ENTRIES;
681 err = do_single_unmap(pmap_arm, vaddr, c, true);
682 if (err_is_fail(err)) {
683 return err_push(err, LIB_ERR_PMAP_UNMAP);
685 vaddr += c * BASE_PAGE_SIZE;
688 // unmap remaining part
689 c = ARM_L2_OFFSET(vend) - ARM_L2_OFFSET(vaddr);
691 err = do_single_unmap(pmap_arm, vaddr, c, true);
692 if (err_is_fail(err)) {
693 return err_push(err, LIB_ERR_PMAP_UNMAP);
706 * \brief Determine a suitable address for a given memory object
708 * \param pmap The pmap object
709 * \param memobj The memory object to determine the address for
710 * \param alignment Minimum alignment
711 * \param vaddr Pointer to return the determined address
713 * Relies on vspace.c code maintaining an ordered list of vregions
716 determine_addr(struct pmap *pmap,
717 struct memobj *memobj,
721 assert(pmap->vspace->head);
723 assert(alignment <= BASE_PAGE_SIZE); // NYI
725 struct vregion *walk = pmap->vspace->head;
726 while (walk->next) { // Try to insert between existing mappings
727 genvaddr_t walk_base = vregion_get_base_addr(walk);
728 genvaddr_t walk_size = vregion_get_size(walk);
729 genvaddr_t next_base = vregion_get_base_addr(walk->next);
731 if (next_base > walk_base + walk_size + memobj->size &&
732 walk_base + walk_size > VSPACE_BEGIN) { // Ensure mappings are larger than VSPACE_BEGIN
733 *vaddr = walk_base + walk_size;
739 *vaddr = vregion_get_base_addr(walk) + vregion_get_size(walk);
743 /** \brief Retrieves an address that can currently be used for large mappings
746 static errval_t determine_addr_raw(struct pmap *pmap, size_t size,
747 size_t alignment, genvaddr_t *retvaddr)
749 struct pmap_arm *pmap_arm = (struct pmap_arm *)pmap;
751 struct vnode *walk_pdir = pmap_arm->root.u.vnode.children;
752 assert(walk_pdir != NULL); // assume there's always at least one existing entry
754 if (alignment == 0) {
755 alignment = BASE_PAGE_SIZE;
757 alignment = ROUND_UP(alignment, BASE_PAGE_SIZE);
759 size = ROUND_UP(size, alignment);
761 size_t free_count = DIVIDE_ROUND_UP(size, LARGE_PAGE_SIZE);
762 //debug_printf("need %zu contiguous free pdirs\n", free_count);
764 // compile pdir free list
765 // barrelfish treats L1 as 1024 entries
767 for (int i = 0; i < 1024; i++) {
770 f[walk_pdir->entry] = false;
772 assert(walk_pdir->is_vnode);
773 f[walk_pdir->entry] = false;
774 walk_pdir = walk_pdir->next;
776 genvaddr_t first_free = 384;
777 for (; first_free < 512; first_free++) {
779 for (int i = 1; i < free_count; i++) {
780 if (!f[first_free + i]) {
782 first_free = first_free+i;
789 assert(1 == 1);// make compiler shut up about label
791 //printf("first free: %li\n", (uint32_t)first_free);
792 if (first_free + free_count <= 512) {
793 *retvaddr = first_free << 22;
796 return LIB_ERR_OUT_OF_VIRTUAL_ADDR;
802 static errval_t do_single_modify_flags(struct pmap_arm *pmap, genvaddr_t vaddr,
803 size_t pages, vregion_flags_t flags)
805 errval_t err = SYS_ERR_OK;
806 struct vnode *ptable = find_ptable(pmap, vaddr);
807 uint16_t ptentry = ARM_USER_L2_OFFSET(vaddr);
809 struct vnode *page = find_vnode(ptable, ptentry);
811 if (inside_region(ptable, ptentry, pages)) {
812 // we're modifying part of a valid mapped region
813 // arguments to invocation: invoke frame cap, first affected
814 // page (as offset from first page in mapping), #affected
815 // pages, new flags. Invocation should check compatibility of
816 // new set of flags with cap permissions.
817 size_t off = ptentry - page->entry;
818 uintptr_t pmap_flags = vregion_flags_to_kpi_paging_flags(flags);
819 err = invoke_frame_modify_flags(page->u.frame.cap, off, pages, pmap_flags);
820 printf("invoke_frame_modify_flags returned error: %s (%"PRIuERRV")\n",
821 err_getstring(err), err);
824 // overlaps some region border
825 return LIB_ERR_PMAP_EXISTING_MAPPING;
833 * \brief Modify page mapping
835 * \param pmap The pmap object
836 * \param vaddr The virtual address to unmap
837 * \param flags New flags for the mapping
838 * \param retsize If non-NULL, filled in with the actual size modified
841 modify_flags(struct pmap *pmap,
844 vregion_flags_t flags,
847 errval_t err, ret = SYS_ERR_OK;
848 struct pmap_arm *pmap_arm = (struct pmap_arm*)pmap;
849 size = ROUND_UP(size, BASE_PAGE_SIZE);
850 size_t pte_count = size / BASE_PAGE_SIZE;
851 genvaddr_t vend = vaddr + size;
853 if (ARM_L1_OFFSET(vaddr) == ARM_L1_OFFSET(vend-1)) {
855 err = do_single_modify_flags(pmap_arm, vaddr, pte_count, false);
856 if (err_is_fail(err)) {
857 return err_push(err, LIB_ERR_PMAP_UNMAP);
862 uint32_t c = ARM_L2_MAX_ENTRIES - ARM_L2_OFFSET(vaddr);
863 err = do_single_modify_flags(pmap_arm, vaddr, c, false);
864 if (err_is_fail(err)) {
865 return err_push(err, LIB_ERR_PMAP_UNMAP);
869 vaddr += c * BASE_PAGE_SIZE;
870 while (ARM_L1_OFFSET(vaddr) < ARM_L1_OFFSET(vend)) {
871 c = ARM_L2_MAX_ENTRIES;
872 err = do_single_modify_flags(pmap_arm, vaddr, c, true);
873 if (err_is_fail(err)) {
874 return err_push(err, LIB_ERR_PMAP_UNMAP);
876 vaddr += c * BASE_PAGE_SIZE;
879 // unmap remaining part
880 c = ARM_L2_OFFSET(vend) - ARM_L2_OFFSET(vaddr);
882 err = do_single_modify_flags(pmap_arm, vaddr, c, true);
883 if (err_is_fail(err)) {
884 return err_push(err, LIB_ERR_PMAP_UNMAP);
897 * \brief Query existing page mapping
899 * \param pmap The pmap object
900 * \param vaddr The virtual address to query
901 * \param retvaddr Returns the base virtual address of the mapping
902 * \param retsize Returns the actual size of the mapping
903 * \param retcap Returns the cap mapped at this address
904 * \param retoffset Returns the offset within the cap that is mapped
905 * \param retflags Returns the flags for this mapping
907 * All of the ret parameters are optional.
909 static errval_t lookup(struct pmap *pmap, genvaddr_t vaddr,
910 genvaddr_t *retvaddr, size_t *retsize,
911 struct capref *retcap, genvaddr_t *retoffset,
912 vregion_flags_t *retflags)
920 serialise(struct pmap *pmap, void *buf, size_t buflen)
922 // Unimplemented: ignored
927 deserialise(struct pmap *pmap, void *buf, size_t buflen)
929 // Unimplemented: we start with an empty pmap, and avoid the bottom of the A/S
933 static struct pmap_funcs pmap_funcs = {
934 .determine_addr = determine_addr,
935 .determine_addr_raw = determine_addr_raw,
938 .modify_flags = modify_flags,
940 .serialise = serialise,
941 .deserialise = deserialise,
945 * \brief Initialize the pmap object
948 pmap_init(struct pmap *pmap,
949 struct vspace *vspace,
951 struct slot_allocator *opt_slot_alloc)
953 struct pmap_arm* pmap_arm = (struct pmap_arm*)pmap;
955 /* Generic portion */
956 pmap->f = pmap_funcs;
957 pmap->vspace = vspace;
959 // Slab allocator for vnodes
960 slab_init(&pmap_arm->slab, sizeof(struct vnode), NULL);
961 slab_grow(&pmap_arm->slab,
962 pmap_arm->slab_buffer,
963 sizeof(pmap_arm->slab_buffer));
965 pmap_arm->root.is_vnode = true;
966 pmap_arm->root.u.vnode.cap = vnode;
967 pmap_arm->root.next = NULL;
968 pmap_arm->root.u.vnode.children = NULL;
973 errval_t pmap_current_init(bool init_domain)
975 struct pmap_arm *pmap_arm = (struct pmap_arm*)get_current_pmap();
977 // To reserve a block of virtual address space,
978 // a vregion representing the address space is required.
979 // We construct a superficial one here and add it to the vregion list.
980 struct vregion *vregion = &pmap_arm->vregion;
981 assert((void*)vregion > (void*)pmap_arm);
982 assert((void*)vregion < (void*)(pmap_arm + 1));
983 vregion->vspace = NULL;
984 vregion->memobj = NULL;
985 vregion->base = VSPACE_BEGIN;
987 vregion->size = META_DATA_RESERVED_SPACE;
989 vregion->next = NULL;
991 struct vspace *vspace = pmap_arm->p.vspace;
992 assert(!vspace->head);
993 vspace->head = vregion;
995 pmap_arm->vregion_offset = pmap_arm->vregion.base;