3 * \brief Creates mapping based on the MMU type
5 * If the MMU supports translations, then anonymous type is used
6 * (more efficient) else non contiguous memory is used.
10 * Copyright (c) 2010, 2011, ETH Zurich.
11 * Copyright (c) 2014, HP Labs.
12 * All rights reserved.
14 * This file is distributed under the terms in the attached LICENSE file.
15 * If you do not find this file, copies can be found by writing to:
16 * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
19 #include <barrelfish/barrelfish.h>
20 #include <barrelfish/vspace_mmu_aware.h>
21 #include <barrelfish/core_state.h>
24 /// Minimum free memory before we return it to memory server
25 #define MIN_MEM_FOR_FREE (1 * 1024 * 1024)
28 * \brief Initialize vspace_mmu_aware struct
30 * \param state The struct to initialize
31 * \param init The buffer to use to initialize the struct
32 * \param size The size of anon memobj to create
34 * Initializes the struct according to the type of MMU
36 errval_t vspace_mmu_aware_init(struct vspace_mmu_aware *state, size_t size)
38 return vspace_mmu_aware_init_aligned(state, size, 0,
39 VREGION_FLAGS_READ_WRITE);
42 errval_t vspace_mmu_aware_init_aligned(struct vspace_mmu_aware *state,
43 size_t size, size_t alignment, vregion_flags_t flags)
47 state->alignment = alignment;
51 size = ROUND_UP(size, BASE_PAGE_SIZE);
52 err = memobj_create_anon(&state->memobj, size, 0);
53 if (err_is_fail(err)) {
54 return err_push(err, LIB_ERR_MEMOBJ_CREATE_ANON);
57 err = vregion_map_aligned(&state->vregion, get_current_vspace(),
58 &state->memobj.m, 0, size,
60 if (err_is_fail(err)) {
61 return err_push(err, LIB_ERR_VREGION_MAP);
63 state->offset = state->mapoffset = 0;
69 * \brief Create mappings
71 * \param state The object metadata
72 * \param frame An empty slot to place the frame capability in
73 * \param req_size The required amount by the application
74 * \param retbuf Pointer to return the mapped buffer
75 * \param retsize The actual size returned
77 * This function will returns a special error code if frame_create
78 * fails due to the constrains to the memory server (amount of memory
79 * or region of memory). This is to facilitate retrying with different
82 errval_t vspace_mmu_aware_map(struct vspace_mmu_aware *state,
83 struct capref frame, size_t req_size,
84 void **retbuf, size_t *retsize)
88 // Calculate how much still to map in
89 size_t origsize = req_size;
90 assert(state->mapoffset >= state->offset);
91 if(state->mapoffset - state->offset > req_size) {
94 req_size -= state->mapoffset - state->offset;
96 size_t alloc_size = req_size;
101 if ((state->vregion.flags & VREGION_FLAGS_HUGE) &&
102 (state->mapoffset & HUGE_PAGE_MASK) == 0)
104 // this is an opportunity to switch to 1G pages if requested.
105 // we know that we can use large pages without jumping through hoops
106 // if state->vregion.flags has VREGION_FLAGS_HUGE set and
107 // mapoffset is aligned to at least HUGE_PAGE_SIZE.
108 alloc_size = ROUND_UP(req_size, HUGE_PAGE_SIZE);
110 // goto allocation directly so we can avoid nasty code interaction
111 // between #if __x86_64__ and the size checks, we want to be able
112 // to use 2M pages on x86_64 also. -SG, 2015-04-30.
116 if ((state->vregion.flags & VREGION_FLAGS_LARGE) &&
117 (state->mapoffset & LARGE_PAGE_MASK) == 0)
119 // this is an opportunity to switch to 2M pages if requested.
120 // we know that we can use large pages without jumping through hoops
121 // if state->vregion.flags has VREGION_FLAGS_LARGE set and
122 // mapoffset is aligned to at least LARGE_PAGE_SIZE.
123 alloc_size = ROUND_UP(req_size, LARGE_PAGE_SIZE);
125 // Create frame of appropriate size
127 err = frame_create(frame, alloc_size, &ret_size);
128 if (err_is_fail(err)) {
129 if (err_no(err) == LIB_ERR_RAM_ALLOC_MS_CONSTRAINTS) {
130 // we can only get 4k frames for now; retry with 4k
131 if (alloc_size > BASE_PAGE_SIZE && origsize < BASE_PAGE_SIZE) {
132 alloc_size = BASE_PAGE_SIZE;
135 return err_push(err, LIB_ERR_FRAME_CREATE_MS_CONSTRAINTS);
137 return err_push(err, LIB_ERR_FRAME_CREATE);
139 assert(ret_size >= req_size);
140 origsize += ret_size - req_size;
143 if (state->consumed + req_size > state->size) {
144 err = cap_delete(frame);
145 if (err_is_fail(err)) {
146 debug_err(__FILE__, __func__, __LINE__, err,
147 "cap_delete failed");
149 return LIB_ERR_VSPACE_MMU_AWARE_NO_SPACE;
153 err = state->memobj.m.f.fill(&state->memobj.m, state->mapoffset, frame,
155 if (err_is_fail(err)) {
156 return err_push(err, LIB_ERR_MEMOBJ_FILL);
158 err = state->memobj.m.f.pagefault(&state->memobj.m, &state->vregion,
159 state->mapoffset, 0);
160 if (err_is_fail(err)) {
161 return err_push(err, LIB_ERR_MEMOBJ_PAGEFAULT_HANDLER);
166 genvaddr_t gvaddr = vregion_get_base_addr(&state->vregion) + state->offset;
167 *retbuf = (void*)vspace_genvaddr_to_lvaddr(gvaddr);
169 state->mapoffset += req_size;
170 state->offset += origsize;
171 state->consumed += origsize;
176 errval_t vspace_mmu_aware_reset(struct vspace_mmu_aware *state,
177 struct capref frame, size_t size)
180 struct vregion *vregion;
181 struct capref oldframe;
183 // create copy of new region
184 err = slot_alloc(&oldframe);
185 if (err_is_fail(err)) {
188 err = cap_copy(oldframe, frame);
189 if (err_is_fail(err)) {
192 err = vspace_map_one_frame_attr_aligned(&vbuf, size, oldframe,
193 VREGION_FLAGS_READ_WRITE | VREGION_FLAGS_LARGE, LARGE_PAGE_SIZE,
195 if (err_is_fail(err)) {
198 // copy over data to new frame
199 genvaddr_t gen_base = vregion_get_base_addr(&state->vregion);
200 memcpy(vbuf, (void*)(lvaddr_t)gen_base, state->mapoffset);
202 err = vregion_destroy(vregion);
203 if (err_is_fail(err)) {
207 genvaddr_t offset = 0;
208 // Unmap backing frames for [0, size) in state.vregion
210 err = state->memobj.m.f.unfill(&state->memobj.m, 0, &oldframe,
212 if (err_is_fail(err) &&
213 err_no(err) != LIB_ERR_MEMOBJ_UNFILL_TOO_HIGH_OFFSET)
215 return err_push(err, LIB_ERR_MEMOBJ_UNMAP_REGION);
217 struct frame_identity fi;
219 err = invoke_frame_identify(oldframe, &fi);
220 if (err_is_fail(err)) {
223 offset += (1UL<<fi.bits);
224 err = cap_destroy(oldframe);
225 if (err_is_fail(err)) {
228 } while(offset < state->mapoffset);
231 err = state->memobj.m.f.fill(&state->memobj.m, 0, frame, size);
232 if (err_is_fail(err)) {
233 return err_push(err, LIB_ERR_MEMOBJ_FILL);
235 err = state->memobj.m.f.pagefault(&state->memobj.m, &state->vregion, 0, 0);
236 if (err_is_fail(err)) {
237 return err_push(err, LIB_ERR_MEMOBJ_PAGEFAULT_HANDLER);
240 state->mapoffset = size;
244 errval_t vspace_mmu_aware_unmap(struct vspace_mmu_aware *state,
245 lvaddr_t base, size_t bytes)
249 genvaddr_t gvaddr = vregion_get_base_addr(&state->vregion) + state->offset;
250 lvaddr_t eaddr = vspace_genvaddr_to_lvaddr(gvaddr);
252 genvaddr_t gen_base = vspace_lvaddr_to_genvaddr(base)
253 - vregion_get_base_addr(&state->vregion);
254 genvaddr_t min_offset = 0;
255 bool success = false;
257 assert(vspace_lvaddr_to_genvaddr(base) >= vregion_get_base_addr(&state->vregion));
258 assert(base + bytes == (lvaddr_t)eaddr);
260 assert(bytes <= state->consumed);
261 assert(bytes <= state->offset);
264 state->offset -= bytes;
265 state->consumed -= bytes;
267 // Free only in bigger blocks
268 if(state->mapoffset - state->offset > MIN_MEM_FOR_FREE) {
270 // Unmap and return (via unfill) frames from base
271 err = state->memobj.m.f.unfill(&state->memobj.m, gen_base,
273 if(err_is_fail(err) && err_no(err) != LIB_ERR_MEMOBJ_UNFILL_TOO_HIGH_OFFSET) {
274 return err_push(err, LIB_ERR_MEMOBJ_UNMAP_REGION);
280 if (min_offset == 0 || min_offset > offset) {
284 err = cap_destroy(frame);
285 if(err_is_fail(err)) {
289 } while(err != LIB_ERR_MEMOBJ_UNFILL_TOO_HIGH_OFFSET);
291 // state->consumed -= bytes;
293 state->mapoffset = min_offset;