failure PMAP_EXISTING_MAPPING "Cannot replace existing mapping, unmap first",
failure PMAP_FRAME_SIZE "Given Frame to small to fulfil mapping request",
failure PMAP_FRAME_IDENTIFY "Frame could not be identified",
+ failure PMAP_NOT_MAPPED "No mapping in given address range",
failure OUT_OF_VIRTUAL_ADDR "Out of virtual address",
static inline errval_t invoke_frame_modify_flags(struct capref frame,
size_t offset,
size_t pages,
- size_t flags)
+ size_t flags,
+ genvaddr_t va_hint)
{
- uint8_t invoke_bits = get_cap_valid_bits(frame);
- capaddr_t invoke_cptr = get_cap_addr(frame) >> (CPTR_BITS - invoke_bits);
-
- return syscall5((invoke_bits << 16) | (FrameCmd_ModifyFlags << 8) |
- SYSCALL_INVOKE, invoke_cptr, offset, pages, flags).error;
+ return cap_invoke5(frame, FrameCmd_ModifyFlags, offset,
+ pages, flags, va_hint).error;
}
#define X86_32_PTABLE_PRESENT (((paging_x86_32_flags_t)1) << 0)
#ifdef CONFIG_PAE
-#define X86_32_PDPTE_SIZE 4
-#define X86_32_PDPTE_MASK 3
-#define X86_32_PDPTE_CLEAR 0
+#define X86_32_PDPTE_BITS 2
+#define X86_32_PDPTE_SIZE (1U<<X86_32_PDPTE_BITS)
+#define X86_32_PDPTE_MASK (X86_32_PDPTE_SIZE - 1)
+#define X86_32_PDPTE_CLEAR 0
-#define X86_32_PDIR_SIZE 512
+#define X86_32_PDIR_BITS 9
+#define X86_32_PDIR_SIZE (1U<<X86_32_PDIR_BITS)
+#define X86_32_PDIR_MASK (X86_32_PDIR_SIZE - 1)
+#define X86_32_PDIR_CLEAR 0
-#define X86_32_PTABLE_SIZE 512 /**< Page directory/table size */
-#define X86_32_PTABLE_MASK 0x1ff /**< Page dir/table address mask */
-#define X86_32_PTABLE_CLEAR 0 /**< Bitmap of a clear table entry */
+#define X86_32_PTABLE_BITS 9
+#define X86_32_PTABLE_SIZE (1U<<X86_32_PTABLE_BITS) /**< Page directory/table size */
+#define X86_32_PTABLE_MASK (X86_32_PTABLE_SIZE-1) /**< Page dir/table address mask */
+#define X86_32_PTABLE_CLEAR 0 /**< Bitmap of a clear table entry */
#else
-#define X86_32_PDIR_SIZE 1024
-#define X86_32_PDIR_MASK 0x3ff
-#define X86_32_PDIR_CLEAR 0
-
-#define X86_32_PTABLE_SIZE 1024 /**< Page directory/table size */
-#define X86_32_PTABLE_MASK 0x3ff /**< Page dir/table address mask */
-#define X86_32_PTABLE_CLEAR 0 /**< Bitmap of a clear table entry */
+#define X86_32_PDIR_BITS 10
+#define X86_32_PDIR_SIZE (1U << X86_32_PDIR_BITS)
+#define X86_32_PDIR_MASK (X86_32_PDIR_SIZE - 1)
+#define X86_32_PDIR_CLEAR 0
+
+#define X86_32_PTABLE_BITS 10
+#define X86_32_PTABLE_SIZE (1U<<X86_32_PTABLE_BITS) /**< Page directory/table size */
+#define X86_32_PTABLE_MASK (X86_32_PTABLE_SIZE - 1) /**< Page dir/table address mask */
+#define X86_32_PTABLE_CLEAR 0 /**< Bitmap of a clear table entry */
#endif
#include <target/x86_32/paging_kernel_target.h>
#include <target/x86_32/offsets_target.h>
#include <paging_kernel_arch.h>
+#include <mdb/mdb_tree.h>
#include <string.h>
#include <cap_predicates.h>
}
errval_t page_mappings_modify_flags(struct capability *frame, size_t offset,
- size_t pages, size_t uflags)
+ size_t pages, size_t mflags, genvaddr_t va_hint)
{
struct cte *mapping = cte_for_cap(frame);
struct mapping_info *info = &mapping->mapping_info;
+ struct cte *leaf_pt;
+ errval_t err;
+ err = mdb_find_cap_for_address(info->pte, &leaf_pt);
+ if (err_is_fail(err)) {
+ return err;
+ }
/* Calculate page access protection flags */
// Get frame cap rights
paging_x86_32_flags_t flags =
paging_x86_32_cap_to_page_flags(frame->rights);
// Mask with provided access rights mask
- flags = paging_x86_32_mask_attrs(flags, X86_32_PTABLE_ACCESS(uflags));
+ flags = paging_x86_32_mask_attrs(flags, X86_32_PTABLE_ACCESS(mflags));
// Add additional arch-specific flags
- flags |= X86_32_PTABLE_FLAGS(uflags);
+ flags |= X86_32_PTABLE_FLAGS(mflags);
// Unconditionally mark the page present
flags |= X86_32_PTABLE_PRESENT;
- /* Calculate location of page table entries we need to modify */
- lvaddr_t base = local_phys_to_mem(info->pte) + offset;
+ // check arguments
+ if (offset >= X86_32_PTABLE_SIZE) { // Within pagetable
+ return SYS_ERR_VNODE_SLOT_INVALID;
+ }
+ if (offset + pages > X86_32_PTABLE_SIZE) { // mapping size ok
+ return SYS_ERR_VM_MAP_SIZE;
+ }
- for (int i = 0; i < pages; i++) {
- union x86_32_ptable_entry *entry =
- (union x86_32_ptable_entry *)base + i;
- paging_x86_32_modify_flags(entry, flags);
+ /* Calculate location of page table entries we need to modify */
+ lvaddr_t base = local_phys_to_mem(info->pte) +
+ offset * sizeof(union x86_32_ptable_entry);
+
+ size_t pagesize = BASE_PAGE_SIZE;
+ switch(leaf_pt->cap.type) {
+ case ObjType_VNode_x86_32_ptable :
+ for (int i = 0; i < pages; i++) {
+ union x86_32_ptable_entry *entry =
+ (union x86_32_ptable_entry *)base + i;
+ paging_x86_32_modify_flags(entry, flags);
+ }
+ break;
+ case ObjType_VNode_x86_32_pdir :
+ for (int i = 0; i < pages; i++) {
+ union x86_32_ptable_entry *entry =
+ (union x86_32_ptable_entry *)base + i;
+ paging_x86_32_modify_flags_large(entry, flags);
+ }
+ pagesize = LARGE_PAGE_SIZE;
+ break;
+ default:
+ return SYS_ERR_WRONG_MAPPING;
}
- return paging_tlb_flush_range(mapping, offset, pages);
+ if (va_hint != 0 && va_hint > BASE_PAGE_SIZE) {
+ // use as direct hint
+ // invlpg should work for large/huge pages
+ for (int i = 0; i < pages; i++) {
+ do_one_tlb_flush(va_hint + i * pagesize);
+ }
+ } else {
+ /* do full TLB flush */
+ do_full_tlb_flush();
+ }
+ return SYS_ERR_OK;
}
void paging_dump_tables(struct dcb *dispatcher)
// page in mapped region
size_t pages = args[1]; // #pages to modify
size_t flags = args[2]; // new flags
+ genvaddr_t va = args[3]; // virtual addr hint
- page_mappings_modify_flags(to, offset, pages, flags);
+ page_mappings_modify_flags(to, offset, pages, flags, va);
return (struct sysret) {
.error = SYS_ERR_OK,
return SYS_ERR_WRONG_MAPPING;
}
- if (va_hint != 0) {
- if (va_hint > BASE_PAGE_SIZE) {
- // use as direct hint
- // invlpg should work for large/huge pages
- for (int i = 0; i < pages; i++) {
- // XXX: check proper instructions for large/huge pages
- do_one_tlb_flush(va_hint + i * pagesize);
- }
- } else if (va_hint == 1) {
- // XXX: remove this or cleanup interface, -SG, 2015-03-11
- // do computed selective flush
- return paging_tlb_flush_range(mapping, offset, pages);
+ if (va_hint != 0 && va_hint > BASE_PAGE_SIZE) {
+ // use as direct hint
+ // invlpg should work for large/huge pages
+ for (int i = 0; i < pages; i++) {
+ do_one_tlb_flush(va_hint + i * pagesize);
}
} else {
/* do full TLB flush */
}
/**
+ * \brief Modify flags of a large page.
+ *
+ * Update small page table entry, pointed to by 'entry', with page attribute
+ * bitmap 'bitmap'.
+ *
+ * \param entry Pointer to page table entry to map from.
+ * \param bitmap Bitmap to apply to page attributes.
+ */
+static inline void paging_x86_32_modify_flags_large(union x86_32_ptable_entry * NONNULL entry,
+ paging_x86_32_flags_t bitmap)
+{
+ union x86_32_ptable_entry tmp = *entry;
+
+ tmp.base.present = bitmap & X86_32_PTABLE_PRESENT ? 1 : 0;
+ tmp.base.read_write = bitmap & X86_32_PTABLE_READ_WRITE ? 1 : 0;
+ tmp.base.user_supervisor = bitmap & X86_32_PTABLE_USER_SUPERVISOR ? 1 : 0;
+ tmp.base.write_through = bitmap & X86_32_PTABLE_WRITE_THROUGH ? 1 : 0;
+ tmp.base.cache_disabled = bitmap & X86_32_PTABLE_CACHE_DISABLED ? 1 : 0;
+ tmp.base.attr_index = bitmap & X86_32_PTABLE_ATTR_INDEX ? 1 : 0;
+ tmp.base.global = bitmap & X86_32_PTABLE_GLOBAL_PAGE ? 1 : 0;
+#ifdef CONFIG_NXE
+ tmp.base.execute_disable = bitmap & X86_32_PTABLE_EXECUTE_DISABLE ? 1 : 0;
+#endif
+
+ *entry = tmp;
+}
+
+/**
* \brief Modify flags of a normal (small) page.
*
- * From small page table entry, pointed to by 'entry', maps physical address
- * 'base' with page attribute bitmap 'bitmap'.
+ * Update small page table entry, pointed to by 'entry', with page attribute
+ * bitmap 'bitmap'.
*
* \param entry Pointer to page table entry to map from.
* \param bitmap Bitmap to apply to page attributes.
static inline void paging_x86_32_modify_flags(union x86_32_ptable_entry * NONNULL entry,
paging_x86_32_flags_t bitmap)
{
- union x86_32_ptable_entry tmp;
+ union x86_32_ptable_entry tmp = *entry;
tmp.base.present = bitmap & X86_32_PTABLE_PRESENT ? 1 : 0;
tmp.base.read_write = bitmap & X86_32_PTABLE_READ_WRITE ? 1 : 0;
#endif
}
-static struct vnode *find_ptable(struct pmap_x86 *pmap, genvaddr_t base)
-{
- struct vnode *pdir = find_pdir(pmap, base);
-
- if (pdir) {
- // PDIR mapping
- return find_vnode(pdir, X86_32_PDIR_BASE(base));
- } else {
- return NULL;
- }
-}
-
static errval_t do_single_map(struct pmap_x86 *pmap, genvaddr_t vaddr,
genvaddr_t vend, struct capref frame,
size_t offset, size_t pte_count,
size_t pages, vregion_flags_t flags)
{
errval_t err = SYS_ERR_OK;
- struct vnode *ptable = find_ptable(pmap, vaddr);
+
+ struct vnode *pt = NULL, *page = NULL;
+
+ if (!find_mapping(pmap, vaddr, &pt, &page)) {
+ return LIB_ERR_PMAP_FIND_VNODE;
+ }
+
+ assert(pt && pt->is_vnode && page && !page->is_vnode);
+
uint16_t ptentry = X86_32_PTABLE_BASE(vaddr);
- if (ptable) {
- struct vnode *page = find_vnode(ptable, ptentry);
- if (page) {
- if (inside_region(ptable, ptentry, pages)) {
- // we're modifying part of a valid mapped region
- // arguments to invocation: invoke frame cap, first affected
- // page (as offset from first page in mapping), #affected
- // pages, new flags. Invocation should check compatibility of
- // new set of flags with cap permissions.
- size_t off = ptentry - page->entry;
- paging_x86_32_flags_t pmap_flags = vregion_to_pmap_flag(flags);
- err = invoke_frame_modify_flags(page->u.frame.cap, off, pages, pmap_flags);
- printf("invoke_frame_modify_flags returned error: %s (%"PRIuERRV")\n",
- err_getstring(err), err);
- return err;
- } else {
- // overlaps some region border
- return LIB_ERR_PMAP_EXISTING_MAPPING;
- }
+ size_t pagesize = BASE_PAGE_SIZE;
+ if (is_large_page(page)) {
+ //large 2M page
+ ptentry = X86_32_PDIR_BASE(vaddr);
+ pagesize = LARGE_PAGE_SIZE;
+ }
+
+ if (inside_region(pt, ptentry, pages)) {
+ // we're modifying part of a valid mapped region
+ // arguments to invocation: invoke frame cap, first affected
+ // page (as offset from first page in mapping), #affected
+ // pages, new flags. Invocation should check compatibility of
+ // new set of flags with cap permissions.
+ size_t off = ptentry - page->entry;
+ paging_x86_32_flags_t pmap_flags = vregion_to_pmap_flag(flags);
+ // calculate TLB flushing hint
+ genvaddr_t va_hint = 0;
+ if (pages == 1) {
+ // do assisted selective flush for single page
+ va_hint = vaddr & ~X86_32_BASE_PAGE_MASK;
}
+ err = invoke_frame_modify_flags(page->u.frame.cap, off, pages, pmap_flags, va_hint);
+ printf("invoke_frame_modify_flags returned error: %s (%"PRIuERRV")\n",
+ err_getstring(err), err);
+ return err;
}
return SYS_ERR_OK;
}
+
/**
* \brief Modify page mapping
*
{
errval_t err;
struct pmap_x86 *x86 = (struct pmap_x86 *)pmap;
- size = ROUND_UP(size, X86_32_BASE_PAGE_SIZE);
- size_t pages = DIVIDE_ROUND_UP(size, X86_32_BASE_PAGE_SIZE);
+
+ //determine if we unmap a larger page
+ struct vnode* page = NULL;
+
+ if (!find_mapping(x86, vaddr, NULL, &page)) {
+ return LIB_ERR_PMAP_NOT_MAPPED;
+ }
+
+ assert(page && !page->is_vnode);
+
+ size_t page_size = X86_32_BASE_PAGE_SIZE;
+ size_t table_base = X86_32_PTABLE_BASE(vaddr);
+ uint8_t map_bits= X86_32_BASE_PAGE_BITS + X86_32_PTABLE_BITS;
+ if (is_large_page(page)) {
+ //large 2/4M page
+ page_size = X86_32_LARGE_PAGE_SIZE;
+ table_base = X86_32_PDIR_BASE(vaddr);
+ map_bits = X86_32_LARGE_PAGE_BITS + X86_32_PTABLE_BITS;
+ }
+
+ // TODO: match new policy of map when implemented
+ size = ROUND_UP(size, page_size);
genvaddr_t vend = vaddr + size;
- if (is_same_pdir(vaddr, vend)) {
+ size_t pages = size / page_size;
+
+ if (is_same_pdir(vaddr, vend) ||
+ (is_same_pdpt(vaddr, vend) && is_large_page(page)))
+ {
// fast path
err = do_single_modify_flags(x86, vaddr, pages, flags);
if (err_is_fail(err)) {
uint32_t c = X86_32_PTABLE_SIZE - X86_32_PTABLE_BASE(vaddr);
err = do_single_modify_flags(x86, vaddr, c, flags);
if (err_is_fail(err)) {
- return err_push(err, LIB_ERR_VNODE_UNMAP);
return err_push(err, LIB_ERR_PMAP_MODIFY_FLAGS);
}
// unmap full leaves
- vaddr += c * X86_32_BASE_PAGE_SIZE;
+ vaddr += c * page_size;
while (get_addr_prefix(vaddr) < get_addr_prefix(vend)) {
c = X86_32_PTABLE_SIZE;
err = do_single_modify_flags(x86, vaddr, X86_32_PTABLE_SIZE, flags);
if (err_is_fail(err)) {
return err_push(err, LIB_ERR_PMAP_MODIFY_FLAGS);
}
- vaddr += c * X86_32_BASE_PAGE_SIZE;
+ vaddr += c * page_size;
}
// unmap remaining part
struct vnode* page = NULL;
if (!find_mapping(x86, vaddr, NULL, &page)) {
- //TODO: better error --> LIB_ERR_PMAP_NOT_MAPPED
- return LIB_ERR_PMAP_UNMAP;
+ return LIB_ERR_PMAP_NOT_MAPPED;
}
assert(page && !page->is_vnode);
// vaddr and vend specify begin and end of the region (inside a mapping)
// that should receive the new set of flags
- //
if (is_same_pdir(vaddr, vend) ||
(is_same_pdpt(vaddr, vend) && is_large_page(page)) ||
(is_same_pml4(vaddr, vend) && is_huge_page(page))) {