* from the first page in the mapping identified by `frame`
* \param pages Number of pages that should get new set of flags
* \param flags New set of flags
+ * \param va_hint Hint for selective TLB flushing
*
* \return Error code
*/
static inline errval_t invoke_frame_modify_flags(struct capref frame,
size_t offset,
size_t pages,
- size_t flags)
+ size_t flags,
+ genvaddr_t va_hint)
{
- return cap_invoke4(frame, FrameCmd_ModifyFlags, offset, pages, flags).error;
+ return cap_invoke5(frame, FrameCmd_ModifyFlags, offset,
+ pages, flags, va_hint).error;
}
static inline errval_t invoke_iocap_in(struct capref iocap, enum io_cmd cmd,
* \arg offset the offset from the first page table entry in entries
* \arg pages the number of pages to modify
* \arg mflags the new flags
+ * \arg va_hint a user-supplied virtual address for hinting selective TLB
+ * flushing
*/
errval_t page_mappings_modify_flags(struct capability *frame, size_t offset,
- size_t pages, size_t mflags)
+ size_t pages, size_t mflags, genvaddr_t va_hint)
{
struct cte *mapping = cte_for_cap(frame);
struct mapping_info *info = &mapping->mapping_info;
lvaddr_t base = local_phys_to_mem(info->pte) +
offset * sizeof(union x86_64_ptable_entry);
+ size_t pagesize = BASE_PAGE_SIZE;
switch(leaf_pt->cap.type) {
case ObjType_VNode_x86_64_ptable :
for (int i = 0; i < pages; i++) {
(union x86_64_ptable_entry *)base + i;
paging_x86_64_modify_flags_large(entry, flags);
}
+ pagesize = LARGE_PAGE_SIZE;
break;
case ObjType_VNode_x86_64_pdpt :
for (int i = 0; i < pages; i++) {
(union x86_64_ptable_entry *)base + i;
paging_x86_64_modify_flags_huge(entry, flags);
}
+ pagesize = HUGE_PAGE_SIZE;
break;
default:
return SYS_ERR_WRONG_MAPPING;
}
- /* do full TLB flush */
- do_full_tlb_flush();
+ if (va_hint != 0) {
+ if (va_hint > BASE_PAGE_SIZE) {
+ // use as direct hint
+ for (int i = 0; i < pages; i++) {
+ // XXX: check proper instructions for large/huge pages
+ do_one_tlb_flush(va_hint + i * pagesize);
+ }
+ } else if (va_hint == 1) {
+ // XXX: remove this or cleanup interface, -SG, 2015-03-11
+ // do computed selective flush
+ return paging_tlb_flush_range(mapping, offset, pages);
+ }
+ } else {
+ /* do full TLB flush */
+ do_full_tlb_flush();
+ }
return SYS_ERR_OK;
}
// page in mapped region
size_t pages = args[1]; // #pages to modify
size_t flags = args[2]; // new flags
+ genvaddr_t va = args[3]; // virtual addr hint
- errval_t err = page_mappings_modify_flags(to, offset, pages, flags);
+ errval_t err = page_mappings_modify_flags(to, offset, pages, flags, va);
return (struct sysret) {
.error = err,
errval_t page_mappings_unmap(struct capability *pgtable, struct cte *mapping,
size_t entry, size_t num_pages);
errval_t page_mappings_modify_flags(struct capability *mapping, size_t offset,
- size_t pages, size_t flags);
+ size_t pages, size_t mflags,
+ genvaddr_t va_hint);
errval_t paging_modify_flags(struct capability *frame, uintptr_t offset,
uintptr_t pages, uintptr_t kpi_paging_flags);
void paging_dump_tables(struct dcb *dispatcher);
// access permissions.
size_t off = ptentry - page->entry;
paging_x86_64_flags_t pmap_flags = vregion_to_pmap_flag(flags);
- err = invoke_frame_modify_flags(page->u.frame.cap, off, pages, pmap_flags);
- //printf("invoke_frame_modify_flags returned error: %s (%"PRIuERRV")\n",
- // err_getstring(err), err);
+ // calculate TLB flushing hint
+ genvaddr_t va_hint = 0;
+ if (pages == 1) {
+ // do assisted selective flush for single page
+ va_hint = vaddr & ~X86_64_BASE_PAGE_MASK;
+ }
+ err = invoke_frame_modify_flags(page->u.frame.cap, off, pages,
+ pmap_flags, va_hint);
return err;
} else {
// overlaps some region border
return LIB_ERR_PMAP_EXISTING_MAPPING;
}
-
return SYS_ERR_OK;
}