3 * \brief memory object of anonymous type.
4 * The object maintains a list of frames.
6 * The object maintains a list of frames and a list of vregions.
7 * The lists are backed by slabs.
8 * The slabs may have to be grown,
9 * in which case the object will use #vspace_pinned_alloc.
11 * morecore uses this memory object so it cannot use malloc for its lists.
12 * Therefore, this uses slabs and grows them using the pinned memory.
16 * Copyright (c) 2009, 2010, 2011, ETH Zurich.
17 * Copyright (c) 2014, HP Labs.
18 * All rights reserved.
20 * This file is distributed under the terms in the attached LICENSE file.
21 * If you do not find this file, copies can be found by writing to:
22 * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
25 #include <barrelfish/barrelfish.h>
26 #include "vspace_internal.h"
29 * \brief Map the memory object into a region
31 * \param memobj The memory object
32 * \param region The region to add
34 static errval_t map_region(struct memobj *memobj, struct vregion *vregion)
37 struct memobj_anon *anon = (struct memobj_anon*)memobj;
40 struct vregion_list *data = slab_alloc(&anon->vregion_slab);
43 err = vspace_pinned_alloc(&buf, VREGION_LIST);
44 if (err_is_fail(err)) {
45 return err_push(err, LIB_ERR_VSPACE_PINNED_ALLOC);
47 slab_grow(&anon->vregion_slab, buf,
48 VSPACE_PINNED_UNIT * sizeof(struct vregion_list));
49 data = slab_alloc(&anon->vregion_slab);
51 return LIB_ERR_SLAB_ALLOC_FAIL;
54 data->region = vregion;
56 // Insert into the list
57 struct vregion_list *walk = anon->vregion_list;
58 anon->vregion_list = data;
65 * \brief Unmap the memory object from a region
67 * \param memobj The memory object
68 * \param region The region to remove
70 static errval_t unmap_region(struct memobj *memobj, struct vregion *vregion)
72 struct memobj_anon *anon = (struct memobj_anon*)memobj;
75 /* Unmap the affected area in the pmap */
76 struct vspace *vspace = vregion_get_vspace(vregion);
77 struct pmap *pmap = vspace_get_pmap(vspace);
78 genvaddr_t vregion_base = vregion_get_base_addr(vregion);
79 genvaddr_t vregion_off = vregion_get_offset(vregion);
80 size_t vregion_size = vregion_get_size(vregion);
81 genvaddr_t vregion_end = vregion_off + vregion_size;
83 //printf("(%s:%d) unmap(0x%"PRIxGENVADDR", memobj->size = %zd) vregion size = %zd\n", __FILE__, __LINE__, vregion_base + vregion_off, memobj->size, vregion_size);
85 // unmap all affected frames
86 struct memobj_frame_list *fwalk = anon->frame_list;
87 struct memobj_frame_list *fprev = NULL;
88 //printf("vregion_off = 0x%"PRIxGENVADDR"\n", vregion_off);
89 //printf("vregion_end = 0x%"PRIxGENVADDR"\n", vregion_end);
90 err = LIB_ERR_VSPACE_VREGION_NOT_FOUND;
92 //printf("fwalk->offset = %zd\n", fwalk->offset);
93 //printf("fwalk->next = %p\n", fwalk->next);
94 if (fwalk->offset < vregion_off) {
99 else if (fwalk->offset < vregion_end) {
100 err = pmap->f.unmap(pmap, vregion_base + vregion_off, fwalk->size, NULL);
101 if (err_is_fail(err)) {
102 return err_push(err, LIB_ERR_PMAP_UNMAP);
105 /* Remove the vregion from the list */
106 struct vregion_list *prev = NULL;
107 for (struct vregion_list *elt = anon->vregion_list; elt != NULL;
109 if (elt->region == vregion) {
111 assert(elt == anon->vregion_list);
112 anon->vregion_list = elt->next;
114 assert(prev->next == elt);
115 prev->next = elt->next;
117 slab_free(&anon->vregion_slab, elt);
121 vregion_off += fwalk->size;
127 return err; // XXX: not quite the right error
131 * \brief Set the protection on a range
133 * \param memobj The memory object
134 * \param region The vregion to modify the mappings on
135 * \param offset Offset into the memory object
136 * \param range The range of space to set the protection for
137 * \param flags The protection flags
139 static errval_t protect(struct memobj *memobj, struct vregion *vregion,
140 genvaddr_t offset, size_t range, vs_prot_flags_t flags)
142 struct vspace *vspace = vregion_get_vspace(vregion);
143 struct pmap *pmap = vspace_get_pmap(vspace);
144 genvaddr_t base = vregion_get_base_addr(vregion);
145 genvaddr_t vregion_off = vregion_get_offset(vregion);
148 err = pmap->f.modify_flags(pmap, base + vregion_off + offset, range,
150 if (err_is_fail(err)) {
151 return err_push(err, LIB_ERR_PMAP_MODIFY_FLAGS);
160 * \param memobj The memory object
161 * \param region The vregion to modify the state on
162 * \param offset Offset into the memory object
163 * \param range The range of space to pin
165 static errval_t pin(struct memobj *memobj, struct vregion *vregion,
166 genvaddr_t offset, size_t range)
172 * \brief Unpin a range
174 * \param memobj The memory object
175 * \param region The vregion to modify the state on
176 * \param offset Offset into the memory object
177 * \param range The range of space to unpin
179 static errval_t unpin(struct memobj *memobj, struct vregion *vregion,
180 genvaddr_t offset, size_t range)
186 * \brief Set a frame for an offset into the memobj
188 * \param memobj The memory object
189 * \param offset Offset into the memory object
190 * \param frame The frame cap for the offset
191 * \param size The size of frame cap
193 * Pagefault relies on frames inserted in order
195 static errval_t fill_foff(struct memobj *memobj, genvaddr_t offset, struct capref frame,
196 size_t size, genpaddr_t foffset)
199 struct memobj_anon *anon = (struct memobj_anon*)memobj;
201 assert((offset & BASE_PAGE_MASK) == 0);
203 // AB: allow frame to overlap end of memobj; that might have been the most
204 // efficient allocation size (even if the end of the frame will be unusable)
205 if (offset >= memobj->size) {
206 return LIB_ERR_MEMOBJ_WRONG_OFFSET;
210 struct memobj_frame_list *new = slab_alloc(&anon->frame_slab);
213 err = vspace_pinned_alloc(&buf, FRAME_LIST);
214 if (err_is_fail(err)) {
215 return err_push(err, LIB_ERR_VSPACE_PINNED_ALLOC);
217 slab_grow(&anon->frame_slab, buf,
218 VSPACE_PINNED_UNIT * sizeof(struct memobj_frame_list));
219 new = slab_alloc(&anon->frame_slab);
221 return LIB_ERR_SLAB_ALLOC_FAIL;
224 new->offset = offset;
227 new->foffset = foffset;
230 struct frame_identity id;
231 err = invoke_frame_identify(frame, &id);
232 assert(err_is_ok(err));
237 struct memobj_frame_list *walk = anon->frame_list;
238 struct memobj_frame_list *prev = NULL;
240 if (new->offset < walk->offset) {
241 if ((prev != NULL && new->offset < prev->offset + prev->size)
242 || new->offset + new->size > walk->offset) {
243 slab_free(&anon->frame_slab, new);
244 return LIB_ERR_MEMOBJ_DUPLICATE_FILL;
250 assert(walk == anon->frame_list);
251 anon->frame_list = new;
259 if (new->offset < prev->offset + prev->size) {
260 slab_free(&anon->frame_slab, new);
261 return LIB_ERR_MEMOBJ_DUPLICATE_FILL;
266 assert(anon->frame_list == NULL);
267 anon->frame_list = new;
272 static errval_t fill(struct memobj *memobj, genvaddr_t offset, struct capref frame,
275 return fill_foff(memobj, offset, frame, size, 0);
279 * \brief Unmap/remove one frame from the end of the memobj
281 * \param memobj The memory object
282 * \param offset The offset from which to remove a frame from
283 * \param ret_frame Pointer to return the removed frame
285 * This will try to remove one frame at an offset greater than the one
286 * specified. Call this function again and again till it returns the
287 * LIB_ERR_MEMOBJ_UNFILL_TOO_HIGH_OFFSET error to get all frames.
289 static errval_t unfill(struct memobj *memobj, genvaddr_t offset,
290 struct capref *ret_frame, genvaddr_t *ret_offset)
293 struct memobj_anon *anon = (struct memobj_anon*)memobj;
295 // Walk the ordered list of frames to find one right frame
296 struct memobj_frame_list *fwalk = anon->frame_list;
297 struct memobj_frame_list *fprev = NULL;
299 if (fwalk->offset < offset) {
307 // The specified offset is too high.
308 return LIB_ERR_MEMOBJ_UNFILL_TOO_HIGH_OFFSET;
312 { // Unmap the frame from all vregions
313 struct vregion_list *vwalk = anon->vregion_list;
315 struct vspace *vspace = vregion_get_vspace(vwalk->region);
316 struct pmap *pmap = vspace_get_pmap(vspace);
317 genvaddr_t vregion_base = vregion_get_base_addr(vwalk->region);
320 assert((vregion_base + fwalk->offset) % BASE_PAGE_SIZE == 0);
321 //printf("(%s:%d) unmap(0x%"PRIxGENVADDR", %zd)\n", __FILE__, __LINE__, vregion_base + fwalk->offset, fwalk->size);
322 err = pmap->f.unmap(pmap, vregion_base + fwalk->offset, fwalk->size,
324 if (err_is_fail(err)) {
325 return err_push(err, LIB_ERR_PMAP_UNMAP);
327 assert(retsize == fwalk->size);
334 *ret_offset = fwalk->offset;
337 *ret_frame = fwalk->frame;
340 fprev->next = fwalk->next;
342 anon->frame_list = fwalk->next;
344 slab_free(&anon->frame_slab, fwalk);
349 * \brief Page fault handler
351 * \param memobj The memory object
352 * \param region The associated vregion
353 * \param offset Offset into memory object of the page fault
354 * \param type The fault type
356 * Locates the frame for the offset and maps it in.
357 * Relies on fill inserting frames in order.
359 static errval_t pagefault(struct memobj *memobj, struct vregion *vregion,
360 genvaddr_t offset, vm_fault_type_t type)
363 struct memobj_anon *anon = (struct memobj_anon*)memobj;
365 // Walk the ordered list for the frame and map it in
366 struct memobj_frame_list *walk = anon->frame_list;
368 if (offset >= walk->offset && offset < walk->offset + walk->size) {
369 struct vspace *vspace = vregion_get_vspace(vregion);
370 struct pmap *pmap = vspace_get_pmap(vspace);
371 genvaddr_t base = vregion_get_base_addr(vregion);
372 genvaddr_t vregion_off = vregion_get_offset(vregion);
373 vregion_flags_t flags = vregion_get_flags(vregion);
374 err = pmap->f.map(pmap, base + vregion_off + walk->offset,
375 walk->frame, walk->foffset, walk->size, flags,
377 if (err_is_fail(err)) {
378 return err_push(err, LIB_ERR_PMAP_MAP);
385 return LIB_ERR_MEMOBJ_WRONG_OFFSET;
389 * \brief Free up some pages by placing them in the backing storage
391 * \param memobj The memory object
392 * \param size The amount of space to free up
393 * \param frames An array of capref frames to return the freed pages
394 * \param num_frames The number of frames returned
396 * This will affect all the vregions that are associated with the object
398 static errval_t pager_free(struct memobj *memobj, size_t size,
399 struct capref *frames, size_t num_frames)
407 * \param memobj The memory object
408 * \param size Size of the memory region
409 * \param flags Memory object specific flags
411 * This object handles multiple frames.
412 * The frames are mapped in on demand.
414 errval_t memobj_create_anon(struct memobj_anon *anon, size_t size,
415 memobj_flags_t flags)
417 struct memobj *memobj = &anon->m;
419 /* Generic portion */
420 memobj->f.map_region = map_region;
421 memobj->f.unmap_region = unmap_region;
422 memobj->f.protect = protect;
424 memobj->f.unpin = unpin;
425 memobj->f.fill = fill;
426 memobj->f.fill_foff = fill_foff;
427 memobj->f.unfill = unfill;
428 memobj->f.pagefault = pagefault;
429 memobj->f.pager_free = pager_free;
432 memobj->flags = flags;
434 memobj->type = ANONYMOUS;
436 /* anon specific portion */
437 slab_init(&anon->vregion_slab, sizeof(struct vregion_list), NULL);
438 slab_init(&anon->frame_slab, sizeof(struct memobj_frame_list), NULL);
440 anon->vregion_list = NULL;
441 anon->frame_list = NULL;
446 * \brief Destroy the object
449 errval_t memobj_destroy_anon(struct memobj *memobj)
451 struct memobj_anon *m = (struct memobj_anon *)memobj;
453 errval_t err = SYS_ERR_OK;
455 struct vregion_list *vwalk = m->vregion_list;
457 err = vregion_destroy(vwalk->region);
458 if (err_is_fail(err)) {
461 struct vregion_list *old = vwalk;
463 slab_free(&m->vregion_slab, old);
466 struct memobj_frame_list *fwalk = m->frame_list;
468 err = cap_delete(fwalk->frame);
469 if (err_is_fail(err)) {
472 struct memobj_frame_list *old = fwalk;
474 slab_free(&m->frame_slab, old);