From da0abb2fc1d33c041fd6d79d5e733d6ad61accb5 Mon Sep 17 00:00:00 2001 From: Simon Gerber Date: Thu, 30 Apr 2015 19:23:34 +0200 Subject: [PATCH] libbarrelfish: fix morecore heap with 1GB pages. This fixes the vspace_mmu_aware vregion to actually allocate 1GB pages if requested through morecore=0x40000000 on the command line. Signed-off-by: Simon Gerber --- lib/barrelfish/morecore.c | 3 +++ lib/barrelfish/vspace/mmu_aware.c | 30 +++++++++++++++++++++++------- 2 files changed, 26 insertions(+), 7 deletions(-) diff --git a/lib/barrelfish/morecore.c b/lib/barrelfish/morecore.c index dfb5bcd..e61c489 100644 --- a/lib/barrelfish/morecore.c +++ b/lib/barrelfish/morecore.c @@ -144,6 +144,9 @@ errval_t morecore_reinit(void) if (remapsize <= mapoffset) { // don't need to do anything if we only recreate the exact same // mapping + // XXX: do we need/want to recreate existing mappings with a larger + // page size here? If so, what is the implication on early boot + // domains that don't have access to mem_serv? -SG, 2015-04-30. return SYS_ERR_OK; } struct capref frame; diff --git a/lib/barrelfish/vspace/mmu_aware.c b/lib/barrelfish/vspace/mmu_aware.c index ea38369..073d7d6 100644 --- a/lib/barrelfish/vspace/mmu_aware.c +++ b/lib/barrelfish/vspace/mmu_aware.c @@ -97,24 +97,40 @@ errval_t vspace_mmu_aware_map(struct vspace_mmu_aware *state, size_t ret_size = 0; if (req_size > 0) { - if ((state->mapoffset & LARGE_PAGE_MASK) == 0 && - state->alignment >= LARGE_PAGE_SIZE) +#if __x86_64__ + if ((state->vregion.flags & VREGION_FLAGS_HUGE) && + (state->mapoffset & HUGE_PAGE_MASK) == 0) { - // this is an opportunity to switch to 2M pages + // this is an opportunity to switch to 1G pages if requested. // we know that we can use large pages without jumping through hoops - // if state->alignment is at least LARGE_PAGE_SIZE as we always create - // the vregion with VREGION_FLAGS_LARGE. + // if state->vregion.flags has VREGION_FLAGS_HUGE set and + // mapoffset is aligned to at least HUGE_PAGE_SIZE. + alloc_size = ROUND_UP(req_size, HUGE_PAGE_SIZE); + + // goto allocation directly so we can avoid nasty code interaction + // between #if __x86_64__ and the size checks, we want to be able + // to use 2M pages on x86_64 also. -SG, 2015-04-30. + goto allocate; + } +#endif + if ((state->vregion.flags & VREGION_FLAGS_LARGE) && + (state->mapoffset & LARGE_PAGE_MASK) == 0) + { + // this is an opportunity to switch to 2M pages if requested. + // we know that we can use large pages without jumping through hoops + // if state->vregion.flags has VREGION_FLAGS_LARGE set and + // mapoffset is aligned to at least LARGE_PAGE_SIZE. alloc_size = ROUND_UP(req_size, LARGE_PAGE_SIZE); } // Create frame of appropriate size -retry: +allocate: err = frame_create(frame, alloc_size, &ret_size); if (err_is_fail(err)) { if (err_no(err) == LIB_ERR_RAM_ALLOC_MS_CONSTRAINTS) { // we can only get 4k frames for now; retry with 4k if (alloc_size > BASE_PAGE_SIZE && origsize < BASE_PAGE_SIZE) { alloc_size = BASE_PAGE_SIZE; - goto retry; + goto allocate; } return err_push(err, LIB_ERR_FRAME_CREATE_MS_CONSTRAINTS); } -- 1.7.2.5