paging_set_flags(entry, kpi_paging_flags);
}
- return paging_tlb_flush_range(mapping, pages);
+ return paging_tlb_flush_range(mapping, offset, pages);
}
void paging_dump_tables(struct dcb *dispatcher)
paging_set_flags(entry, kpi_paging_flags);
}
- return paging_tlb_flush_range(mapping, pages);
+ return paging_tlb_flush_range(mapping, offset, pages);
}
void paging_dump_tables(struct dcb *dispatcher)
paging_set_flags(entry, kpi_paging_flags);
}
- return paging_tlb_flush_range(mapping, pages);
+ return paging_tlb_flush_range(mapping, offset, pages);
}
void paging_dump_tables(struct dcb *dispatcher)
paging_x86_32_modify_flags(entry, flags);
}
- return paging_tlb_flush_range(mapping, pages);
+ return paging_tlb_flush_range(mapping, offset, pages);
}
void paging_dump_tables(struct dcb *dispatcher)
return SYS_ERR_OK;
}
+/**
+ * \brief modify flags of mapping for `frame`.
+ *
+ * \arg frame the frame whose mapping should be modified
+ * \arg offset the offset from the first page table entry in entries
+ * \arg pages the number of pages to modify
+ * \arg mflags the new flags
+ */
errval_t page_mappings_modify_flags(struct capability *frame, size_t offset,
size_t pages, size_t mflags)
{
if (err_is_fail(err)) {
return err;
}
- genvaddr_t vaddr;
- size_t entry2 = (info->pte - get_address(&leaf_pt->cap)) /
- PTABLE_ENTRY_SIZE;
- err = compile_vaddr(leaf_pt, entry2, &vaddr);
- if (err_is_fail(err)) {
- if (err_no(err) == SYS_ERR_VNODE_NOT_INSTALLED) {
- debug(SUBSYS_PAGING, "couldn't reconstruct virtual address\n");
- }
- else {
- return err;
- }
- }
/* Calculate page access protection flags */
// Get frame cap rights
// Unconditionally mark the page present
flags |= X86_64_PTABLE_PRESENT;
- /* Calculate location of page table entries we need to modify */
- lvaddr_t base = local_phys_to_mem(info->pte) + offset;
+ assert(offset < X86_64_PTABLE_SIZE);
+ assert(offset + pages < X86_64_PTABLE_SIZE);
+
+ /* Calculate location of first pt entry we need to modify */
+ lvaddr_t base = local_phys_to_mem(info->pte) +
+ offset * sizeof(union x86_64_ptable_entry);
switch(leaf_pt->cap.type) {
case ObjType_VNode_x86_64_ptable :
errval_t compile_vaddr(struct cte *ptable, size_t entry, genvaddr_t *retvaddr);
errval_t unmap_capability(struct cte *mem);
errval_t lookup_cap_for_mapping(genpaddr_t paddr, lvaddr_t pte, struct cte **retcte);
-errval_t paging_tlb_flush_range(struct cte *frame, size_t pages);
+errval_t paging_tlb_flush_range(struct cte *frame, size_t offset, size_t pages);
#endif // PAGING_H
}
// TODO: cleanup arch compatibility mess for page size selection
-errval_t paging_tlb_flush_range(struct cte *frame, size_t pages)
+errval_t paging_tlb_flush_range(struct cte *frame, size_t offset, size_t pages)
{
// reconstruct first virtual address for TLB flushing
struct cte *leaf_pt;
genvaddr_t vaddr;
size_t entry = (frame->mapping_info.pte - get_address(&leaf_pt->cap)) /
PTABLE_ENTRY_SIZE;
+ entry += offset;
err = compile_vaddr(leaf_pt, entry, &vaddr);
if (err_is_fail(err)) {
if (err_no(err) == SYS_ERR_VNODE_NOT_INSTALLED) {
struct vnode *n;
for(n = root->u.vnode.children; n != NULL; n = n->next) {
- if(n->entry == entry) {
+ if (!n->is_vnode) {
+ // check whether entry is inside a large region
+ uint16_t end = n->entry + n->u.frame.pte_count;
+ if (n->entry <= entry && entry < end) {
+ //if (n->entry < entry) {
+ // debug_printf("%d \\in [%d, %d]\n", entry, n->entry, end);
+ //}
+ return n;
+ }
+ }
+ else if(n->entry == entry) {
+ // return n if n is a vnode and the indices match
return n;
}
}
* \brief Remove page mappings
*
* \param pmap The pmap object
- * \param vaddr The start of the virtual addres to remove
- * \param size The size of virtual address to remove
+ * \param vaddr The start of the virtual region to remove
+ * \param size The size of virtual region to remove
* \param retsize If non-NULL, filled in with the actual size removed
*/
static errval_t unmap(struct pmap *pmap, genvaddr_t vaddr, size_t size,
table_base = X86_64_PDPT_BASE(vaddr);
map_bits = X86_64_HUGE_PAGE_BITS + X86_64_PTABLE_BITS;
}
+ if (page->entry > table_base) {
+ debug_printf("trying to partially unmap region\n");
+ // XXX: error code
+ return LIB_ERR_PMAP_FIND_VNODE;
+ }
// TODO: match new policy of map when implemented
size = ROUND_UP(size, page_size);
assert(pt && pt->is_vnode && page && !page->is_vnode);
uint16_t ptentry = X86_64_PTABLE_BASE(vaddr);
+ size_t pagesize = BASE_PAGE_SIZE;
if (is_large_page(page)) {
//large 2M page
ptentry = X86_64_PDIR_BASE(vaddr);
+ pagesize = LARGE_PAGE_SIZE;
} else if (is_huge_page(page)) {
//huge 1GB page
ptentry = X86_64_PDPT_BASE(vaddr);
+ pagesize = HUGE_PAGE_SIZE;
}
if (inside_region(pt, ptentry, pages)) {
return err;
} else {
// overlaps some region border
+ // XXX: need better error
return LIB_ERR_PMAP_EXISTING_MAPPING;
}
* \brief Modify page mapping
*
* \param pmap The pmap object
- * \param vaddr The virtual address to unmap
+ * \param vaddr The first virtual address for which to change the flags
+ * \param size The length of the region to change in bytes
* \param flags New flags for the mapping
* \param retsize If non-NULL, filled in with the actual size modified
*/
return LIB_ERR_PMAP_UNMAP;
}
- assert(!page->is_vnode);
+ assert(page && !page->is_vnode);
size_t page_size = X86_64_BASE_PAGE_SIZE;
size_t table_base = X86_64_PTABLE_BASE(vaddr);
#include <barrelfish/except.h>
#include <stdio.h>
-static void *vbase = NULL;
+static void *vbase = NULL, *vend = NULL;
+static struct memobj *memobj = NULL;
+static struct vregion *vregion = NULL;
#define EX_STACK_SIZE 16384
static char ex_stack[EX_STACK_SIZE];
static void handler(enum exception_type type, int subtype, void *addr,
arch_registers_state_t *regs, arch_registers_fpu_state_t *fpuregs)
{
- debug_printf("got exception %d(%d) on %p\n", type, subtype, addr);
+ static int count = 0;
+ debug_printf("got exception %d(%d) on %p [%d]\n", type, subtype, addr, ++count);
+ errval_t err;
assert(type == EXCEPT_PAGEFAULT);
assert(subtype == PAGEFLT_WRITE);
- assert(addr == vbase);
+ assert(addr >= vbase && addr < vend);
debug_printf("got expected write pagefault on %p\n", addr);
- // exit program
- exit(0);
+ // unprotect 4k page
+ genvaddr_t offset = (genvaddr_t)addr - (genvaddr_t)vbase;
+ err = memobj->f.protect(memobj, vregion, offset, BASE_PAGE_SIZE, VREGION_FLAGS_READ_WRITE);
+ assert(err_is_ok(err));
}
int main(void)
struct capref frame;
errval_t err;
size_t retsize;
- err = frame_alloc(&frame, BASE_PAGE_SIZE, &retsize);
+ err = frame_alloc(&frame, 16 * BASE_PAGE_SIZE, &retsize);
assert(err_is_ok(err));
// map read-write
- struct memobj *memobj;
- struct vregion *vregion;
err = vspace_map_anon_attr(&vbase, &memobj, &vregion, retsize, &retsize,
VREGION_FLAGS_READ_WRITE);
assert(err_is_ok(err));
err = memobj->f.pagefault(memobj, vregion, 0, 0);
assert(err_is_ok(err));
assert(vbase);
+ vend = (unsigned char *)vbase + retsize;
unsigned char *base = vbase;
debug_printf("filling region %p\n", base);
for (int i = 0; i < retsize; i++) {
assert(err_is_ok(err));
// this should fault
- debug_printf("provoke write pagefault on %p\n", base);
- base[0] = 0x42;
-
- assert(!"reached");
+ for (int i = 0; i < retsize / BASE_PAGE_SIZE; i++) {
+ debug_printf("provoke write pagefault on %p\n", base);
+ base[i * BASE_PAGE_SIZE] = 0x42;
+ }
return 0;
}