3 * \brief Morecore implementation for malloc
7 * Copyright (c) 2007, 2008, 2009, 2010, 2011, ETH Zurich.
8 * Copyright (c) 2014, HP Labs.
11 * This file is distributed under the terms in the attached LICENSE file.
12 * If you do not find this file, copies can be found by writing to:
13 * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
16 #include <barrelfish/barrelfish.h>
17 #include <barrelfish/core_state.h>
18 #include <barrelfish/morecore.h>
21 /// Amount of virtual space for malloc
23 # define HEAP_REGION (2UL * 1024 * 1024 * 1024) /* 2GB */
25 # define HEAP_REGION (512UL * 1024 * 1024) /* 512MB */
28 typedef void *(*morecore_alloc_func_t)(size_t bytes, size_t *retbytes);
29 extern morecore_alloc_func_t sys_morecore_alloc;
31 typedef void (*morecore_free_func_t)(void *base, size_t bytes);
32 extern morecore_free_func_t sys_morecore_free;
35 * \brief Allocate some memory for malloc to use
37 * This function will keep trying with smaller and smaller frames till
38 * it finds a set of frames that satisfy the requirement. retbytes can
39 * be smaller than bytes if we were able to allocate a smaller memory
40 * region than requested for.
42 static void *morecore_alloc(size_t bytes, size_t *retbytes)
45 struct morecore_state *state = get_morecore_state();
50 while (mapped < bytes) {
52 err = slot_alloc(&cap);
53 if (err_is_fail(err)) {
54 USER_PANIC_ERR(err, "slot_alloc failed");
58 err = vspace_mmu_aware_map(&state->mmu_state, cap, step,
67 vspace_mmu_aware_map failed probably because we asked
68 for a very large frame, will try asking for smaller one.
70 if (err_no(err) == LIB_ERR_FRAME_CREATE_MS_CONSTRAINTS) {
72 if (err_is_fail(err)) {
73 debug_err(__FILE__, __func__, __LINE__, err,
77 if (step < BASE_PAGE_SIZE) {
78 // Return whatever we have allocated until now
84 debug_err(__FILE__, __func__, __LINE__, err,
85 "vspace_mmu_aware_map fail");
95 static void morecore_free(void *base, size_t bytes)
97 struct morecore_state *state = get_morecore_state();
98 errval_t err = vspace_mmu_aware_unmap(&state->mmu_state,
99 (lvaddr_t)base, bytes);
100 if(err_is_fail(err)) {
101 USER_PANIC_ERR(err, "vspace_mmu_aware_unmap");
105 Header *get_malloc_freep(void);
106 Header *get_malloc_freep(void)
108 return get_morecore_state()->header_freep;
111 errval_t morecore_init(size_t alignment)
114 struct morecore_state *state = get_morecore_state();
116 thread_mutex_init(&state->mutex);
118 // setup flags that match the alignment
119 vregion_flags_t morecore_flags = VREGION_FLAGS_READ_WRITE;
121 morecore_flags |= (alignment == HUGE_PAGE_SIZE ? VREGION_FLAGS_HUGE : 0);
123 morecore_flags |= (alignment == LARGE_PAGE_SIZE ? VREGION_FLAGS_LARGE : 0);
125 err = vspace_mmu_aware_init_aligned(&state->mmu_state, HEAP_REGION,
126 alignment, morecore_flags);
127 if (err_is_fail(err)) {
128 return err_push(err, LIB_ERR_VSPACE_MMU_AWARE_INIT);
131 sys_morecore_alloc = morecore_alloc;
132 sys_morecore_free = morecore_free;
137 errval_t morecore_reinit(void)
140 struct morecore_state *state = get_morecore_state();
142 size_t mapoffset = state->mmu_state.mapoffset;
143 size_t remapsize = ROUND_UP(mapoffset, state->mmu_state.alignment);
144 if (remapsize <= mapoffset) {
145 // don't need to do anything if we only recreate the exact same
151 err = frame_alloc(&frame, remapsize, &retsize);
152 if (err_is_fail(err)) {
155 return vspace_mmu_aware_reset(&state->mmu_state, frame, remapsize);