/// Amount of virtual space for malloc
#ifdef __x86_64__
-# define HEAP_REGION (2UL * 1024 * 1024 * 1024) /* 2GB */
+# define HEAP_REGION (3500UL * 1024 * 1024) /* 2GB */
#else
# define HEAP_REGION (512UL * 1024 * 1024) /* 512MB */
#endif
return get_morecore_state()->header_freep;
}
-errval_t morecore_init(void)
+errval_t morecore_init(size_t alignment)
{
errval_t err;
struct morecore_state *state = get_morecore_state();
thread_mutex_init(&state->mutex);
+ // setup flags that match the alignment
+ vregion_flags_t morecore_flags = VREGION_FLAGS_READ_WRITE;
+#if __x86_64__
+ morecore_flags |= (alignment == HUGE_PAGE_SIZE ? VREGION_FLAGS_HUGE : 0);
+#endif
+ morecore_flags |= (alignment == LARGE_PAGE_SIZE ? VREGION_FLAGS_LARGE : 0);
+
err = vspace_mmu_aware_init_aligned(&state->mmu_state, HEAP_REGION,
- BASE_PAGE_SIZE, VREGION_FLAGS_READ_WRITE);
+ alignment, morecore_flags);
if (err_is_fail(err)) {
return err_push(err, LIB_ERR_VSPACE_MMU_AWARE_INIT);
}
size_t mapoffset = state->mmu_state.mapoffset;
size_t remapsize = ROUND_UP(mapoffset, state->mmu_state.alignment);
- if (remapsize == mapoffset) {
+ if (remapsize <= mapoffset) {
// don't need to do anything if we only recreate the exact same
// mapping
return SYS_ERR_OK;