3 * \brief memory object of anonymous type.
4 * The object maintains a list of frames.
6 * The object maintains a list of frames and a list of vregions.
7 * The lists are backed by slabs.
8 * The slabs may have to be grown,
9 * in which case the object will use #vspace_pinned_alloc.
11 * morecore uses this memory object so it cannot use malloc for its lists.
12 * Therefore, this uses slabs and grows them using the pinned memory.
16 * Copyright (c) 2009, 2010, 2011, ETH Zurich.
17 * Copyright (c) 2014, HP Labs.
18 * All rights reserved.
20 * This file is distributed under the terms in the attached LICENSE file.
21 * If you do not find this file, copies can be found by writing to:
22 * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
25 #include <barrelfish/barrelfish.h>
26 #include "vspace_internal.h"
29 * \brief Map the memory object into a region
31 * \param memobj The memory object
32 * \param region The region to add
34 static errval_t map_region(struct memobj *memobj, struct vregion *vregion)
37 struct memobj_anon *anon = (struct memobj_anon*)memobj;
40 struct vregion_list *data = slab_alloc(&anon->vregion_slab);
43 err = vspace_pinned_alloc(&buf, VREGION_LIST);
44 if (err_is_fail(err)) {
45 return err_push(err, LIB_ERR_VSPACE_PINNED_ALLOC);
47 slab_grow(&anon->vregion_slab, buf,
48 VSPACE_PINNED_UNIT * sizeof(struct vregion_list));
49 data = slab_alloc(&anon->vregion_slab);
51 return LIB_ERR_SLAB_ALLOC_FAIL;
54 data->region = vregion;
56 // Insert into the list
57 struct vregion_list *walk = anon->vregion_list;
58 anon->vregion_list = data;
65 * \brief Unmap the memory object from a region
67 * \param memobj The memory object
68 * \param region The region to remove
70 static errval_t unmap_region(struct memobj *memobj, struct vregion *vregion)
72 struct memobj_anon *anon = (struct memobj_anon*)memobj;
75 /* Unmap the affected area in the pmap */
76 struct vspace *vspace = vregion_get_vspace(vregion);
77 struct pmap *pmap = vspace_get_pmap(vspace);
78 genvaddr_t vregion_base = vregion_get_base_addr(vregion);
79 genvaddr_t vregion_off = vregion_get_offset(vregion);
80 size_t vregion_size = vregion_get_size(vregion);
81 genvaddr_t vregion_end = vregion_off + vregion_size;
83 //printf("(%s:%d) unmap(0x%"PRIxGENVADDR", memobj->size = %zd) vregion size = %zd\n", __FILE__, __LINE__, vregion_base + vregion_off, memobj->size, vregion_size);
85 // unmap all affected frames
86 struct memobj_frame_list *fwalk = anon->frame_list;
87 struct memobj_frame_list *fprev = NULL;
88 //printf("vregion_off = 0x%"PRIxGENVADDR"\n", vregion_off);
89 //printf("vregion_end = 0x%"PRIxGENVADDR"\n", vregion_end);
90 err = LIB_ERR_VSPACE_VREGION_NOT_FOUND;
92 //printf("fwalk->offset = %zd\n", fwalk->offset);
93 //printf("fwalk->next = %p\n", fwalk->next);
94 if (fwalk->offset < vregion_off) {
99 else if (fwalk->offset < vregion_end) {
100 err = pmap->f.unmap(pmap, vregion_base + vregion_off, fwalk->size, NULL);
101 if (err_is_fail(err)) {
102 return err_push(err, LIB_ERR_PMAP_UNMAP);
105 /* Remove the vregion from the list */
106 struct vregion_list *prev = NULL;
107 for (struct vregion_list *elt = anon->vregion_list; elt != NULL;
109 if (elt->region == vregion) {
111 assert(elt == anon->vregion_list);
112 anon->vregion_list = elt->next;
114 assert(prev->next == elt);
115 prev->next = elt->next;
117 slab_free(&anon->vregion_slab, elt);
121 vregion_off += fwalk->size;
127 return err; // XXX: not quite the right error
131 * \brief Set the protection on a range
133 * \param memobj The memory object
134 * \param region The vregion to modify the mappings on
135 * \param offset Offset into the memory object
136 * \param range The range of space to set the protection for
137 * \param flags The protection flags
139 static errval_t protect(struct memobj *memobj, struct vregion *vregion,
140 genvaddr_t offset, size_t range, vs_prot_flags_t flags)
142 struct memobj_anon *anon = (struct memobj_anon*)memobj;
145 /* protect the affected area in the pmap */
146 struct vspace *vspace = vregion_get_vspace(vregion);
147 struct pmap *pmap = vspace_get_pmap(vspace);
148 genvaddr_t vregion_base = vregion_get_base_addr(vregion);
149 genvaddr_t vregion_off = vregion_get_offset(vregion);
150 size_t vregion_size = vregion_get_size(vregion);
151 genvaddr_t vregion_end = vregion_off + vregion_size;
153 //printf("(%s:%d) protect(0x%"PRIxGENVADDR", memobj->size = %zd) vregion size = %zd offset=%zd range=%zd\n", __FILE__, __LINE__, vregion_base + vregion_off, memobj->size, vregion_size, offset, range);
155 if (offset + range > vregion_end) {
156 return LIB_ERR_MEMOBJ_WRONG_OFFSET;
159 offset += vregion_off;
161 // Special handling if the range cannot span frames
162 if (range <= BASE_PAGE_SIZE) {
163 err = pmap->f.modify_flags(pmap, vregion_base + offset, range, flags, NULL);
164 if (err_is_fail(err)) {
165 return err_push(err, LIB_ERR_PMAP_MODIFY_FLAGS);
170 // protect all affected frames
171 struct memobj_frame_list *fwalk = anon->frame_list;
172 //printf("vregion_off = 0x%"PRIxGENVADDR"\n", vregion_off);
173 //printf("vregion_end = 0x%"PRIxGENVADDR"\n", vregion_end);
174 while (fwalk && range) {
175 //printf("fwalk->offset = %zd\n", fwalk->offset);
176 //printf("fwalk->next = %p\n", fwalk->next);
177 if (offset >= fwalk->offset && offset < fwalk->offset + fwalk->size) {
179 size_t range_in_frame = fwalk->offset + fwalk->size - offset;
180 size_t size = range_in_frame < range ? range_in_frame : range;
183 err = pmap->f.modify_flags(pmap, vregion_base + offset, size, flags, &retsize);
184 if (err_is_fail(err)) {
185 return err_push(err, LIB_ERR_PMAP_MODIFY_FLAGS);
193 return LIB_ERR_VSPACE_VREGION_NOT_FOUND;
202 * \param memobj The memory object
203 * \param region The vregion to modify the state on
204 * \param offset Offset into the memory object
205 * \param range The range of space to pin
207 static errval_t pin(struct memobj *memobj, struct vregion *vregion,
208 genvaddr_t offset, size_t range)
214 * \brief Unpin a range
216 * \param memobj The memory object
217 * \param region The vregion to modify the state on
218 * \param offset Offset into the memory object
219 * \param range The range of space to unpin
221 static errval_t unpin(struct memobj *memobj, struct vregion *vregion,
222 genvaddr_t offset, size_t range)
228 * \brief Set a frame for an offset into the memobj
230 * \param memobj The memory object
231 * \param offset Offset into the memory object
232 * \param frame The frame cap for the offset
233 * \param size The size of frame cap
235 * Pagefault relies on frames inserted in order
237 static errval_t fill_foff(struct memobj *memobj, genvaddr_t offset, struct capref frame,
238 size_t size, genpaddr_t foffset)
241 struct memobj_anon *anon = (struct memobj_anon*)memobj;
243 assert((offset & BASE_PAGE_MASK) == 0);
245 // AB: allow frame to overlap end of memobj; that might have been the most
246 // efficient allocation size (even if the end of the frame will be unusable)
247 if (offset >= memobj->size) {
248 return LIB_ERR_MEMOBJ_WRONG_OFFSET;
252 struct memobj_frame_list *new = slab_alloc(&anon->frame_slab);
253 // We have to grow our slab allocator when there's still one slab left as
254 // we otherwise might run out of slabs when calling memobj->fill() from
255 // vspace_pinned_alloc(). The is_refilling flag allows us to hand out the
256 // last slab when coming back here from vspace_pinned_alloc().
258 static bool is_refilling = false;
259 if (slab_freecount(&anon->frame_slab) <= 1 && !is_refilling) {
262 err = vspace_pinned_alloc(&buf, FRAME_LIST);
263 if (err_is_fail(err)) {
264 return err_push(err, LIB_ERR_VSPACE_PINNED_ALLOC);
266 slab_grow(&anon->frame_slab, buf,
267 VSPACE_PINNED_UNIT * sizeof(struct memobj_frame_list));
268 new = slab_alloc(&anon->frame_slab);
270 return LIB_ERR_SLAB_ALLOC_FAIL;
272 is_refilling = false;
274 new->offset = offset;
277 new->foffset = foffset;
279 struct frame_identity fi;
280 err = frame_identify(frame, &fi);
281 if (err_is_fail(err)) {
282 return err_push(err, LIB_ERR_FRAME_IDENTIFY);
284 assert(err_is_ok(err));
288 struct memobj_frame_list *walk = anon->frame_list;
289 struct memobj_frame_list *prev = NULL;
291 if (new->offset < walk->offset) {
292 if ((prev != NULL && new->offset < prev->offset + prev->size)
293 || new->offset + new->size > walk->offset) {
294 slab_free(&anon->frame_slab, new);
295 return LIB_ERR_MEMOBJ_DUPLICATE_FILL;
301 assert(walk == anon->frame_list);
302 anon->frame_list = new;
310 if (new->offset < prev->offset + prev->size) {
311 slab_free(&anon->frame_slab, new);
312 return LIB_ERR_MEMOBJ_DUPLICATE_FILL;
317 assert(anon->frame_list == NULL);
318 anon->frame_list = new;
323 static errval_t fill(struct memobj *memobj, genvaddr_t offset, struct capref frame,
326 return fill_foff(memobj, offset, frame, size, 0);
330 * \brief Unmap/remove one frame from the end of the memobj
332 * \param memobj The memory object
333 * \param offset The offset from which to remove a frame from
334 * \param ret_frame Pointer to return the removed frame
336 * This will try to remove one frame at an offset greater than the one
337 * specified. Call this function again and again till it returns the
338 * LIB_ERR_MEMOBJ_UNFILL_TOO_HIGH_OFFSET error to get all frames.
340 static errval_t unfill(struct memobj *memobj, genvaddr_t offset,
341 struct capref *ret_frame, genvaddr_t *ret_offset)
344 struct memobj_anon *anon = (struct memobj_anon*)memobj;
346 // Walk the ordered list of frames to find one right frame
347 struct memobj_frame_list *fwalk = anon->frame_list;
348 struct memobj_frame_list *fprev = NULL;
350 if (fwalk->offset < offset) {
358 // The specified offset is too high.
359 return LIB_ERR_MEMOBJ_UNFILL_TOO_HIGH_OFFSET;
363 { // Unmap the frame from all vregions
364 struct vregion_list *vwalk = anon->vregion_list;
366 struct vspace *vspace = vregion_get_vspace(vwalk->region);
367 struct pmap *pmap = vspace_get_pmap(vspace);
368 genvaddr_t vregion_base = vregion_get_base_addr(vwalk->region);
371 assert((vregion_base + fwalk->offset) % BASE_PAGE_SIZE == 0);
372 //printf("(%s:%d) unmap(0x%"PRIxGENVADDR", %zd)\n", __FILE__, __LINE__, vregion_base + fwalk->offset, fwalk->size);
373 err = pmap->f.unmap(pmap, vregion_base + fwalk->offset, fwalk->size,
375 if (err_is_fail(err)) {
376 return err_push(err, LIB_ERR_PMAP_UNMAP);
378 assert(retsize == fwalk->size);
385 *ret_offset = fwalk->offset;
388 *ret_frame = fwalk->frame;
391 fprev->next = fwalk->next;
393 anon->frame_list = fwalk->next;
395 slab_free(&anon->frame_slab, fwalk);
400 * \brief Page fault handler
402 * \param memobj The memory object
403 * \param region The associated vregion
404 * \param offset Offset into memory object of the page fault
405 * \param type The fault type
407 * Locates the frame for the offset and maps it in.
408 * Relies on fill inserting frames in order.
410 static errval_t pagefault(struct memobj *memobj, struct vregion *vregion,
411 genvaddr_t offset, vm_fault_type_t type)
414 struct memobj_anon *anon = (struct memobj_anon*)memobj;
416 // Walk the ordered list for the frame and map it in
417 struct memobj_frame_list *walk = anon->frame_list;
419 if (offset >= walk->offset && offset < walk->offset + walk->size) {
420 struct vspace *vspace = vregion_get_vspace(vregion);
421 struct pmap *pmap = vspace_get_pmap(vspace);
422 genvaddr_t base = vregion_get_base_addr(vregion);
423 genvaddr_t vregion_off = vregion_get_offset(vregion);
424 vregion_flags_t flags = vregion_get_flags(vregion);
425 err = pmap->f.map(pmap, base + vregion_off + walk->offset,
426 walk->frame, walk->foffset, walk->size, flags,
428 if (err_is_fail(err)) {
429 return err_push(err, LIB_ERR_PMAP_MAP);
436 return LIB_ERR_MEMOBJ_WRONG_OFFSET;
440 * \brief Free up some pages by placing them in the backing storage
442 * \param memobj The memory object
443 * \param size The amount of space to free up
444 * \param frames An array of capref frames to return the freed pages
445 * \param num_frames The number of frames returned
447 * This will affect all the vregions that are associated with the object
449 static errval_t pager_free(struct memobj *memobj, size_t size,
450 struct capref *frames, size_t num_frames)
458 * \param memobj The memory object
459 * \param size Size of the memory region
460 * \param flags Memory object specific flags
462 * This object handles multiple frames.
463 * The frames are mapped in on demand.
465 errval_t memobj_create_anon(struct memobj_anon *anon, size_t size,
466 memobj_flags_t flags)
468 struct memobj *memobj = &anon->m;
470 /* Generic portion */
471 memobj->f.map_region = map_region;
472 memobj->f.unmap_region = unmap_region;
473 memobj->f.protect = protect;
475 memobj->f.unpin = unpin;
476 memobj->f.fill = fill;
477 memobj->f.fill_foff = fill_foff;
478 memobj->f.unfill = unfill;
479 memobj->f.pagefault = pagefault;
480 memobj->f.pager_free = pager_free;
483 memobj->flags = flags;
485 memobj->type = ANONYMOUS;
487 /* anon specific portion */
488 slab_init(&anon->vregion_slab, sizeof(struct vregion_list), NULL);
489 slab_init(&anon->frame_slab, sizeof(struct memobj_frame_list), NULL);
491 anon->vregion_list = NULL;
492 anon->frame_list = NULL;
497 * \brief Destroy the object
500 errval_t memobj_destroy_anon(struct memobj *memobj, bool delete_caps)
502 struct memobj_anon *m = (struct memobj_anon *)memobj;
504 errval_t err = SYS_ERR_OK;
506 struct vregion_list *vwalk = m->vregion_list;
508 err = vregion_destroy(vwalk->region);
509 if (err_is_fail(err)) {
512 struct vregion_list *old = vwalk;
514 slab_free(&m->vregion_slab, old);
517 struct memobj_frame_list *fwalk = m->frame_list;
520 err = cap_delete(fwalk->frame);
521 if (err_is_fail(err)) {
525 struct memobj_frame_list *old = fwalk;
527 slab_free(&m->frame_slab, old);