3 * \brief memory object of anonymous type.
4 * The object maintains a list of frames.
6 * The object maintains a list of frames and a list of vregions.
7 * The lists are backed by slabs.
8 * The slabs may have to be grown,
9 * in which case the object will use #vspace_pinned_alloc.
11 * morecore uses this memory object so it cannot use malloc for its lists.
12 * Therefore, this uses slabs and grows them using the pinned memory.
16 * Copyright (c) 2009, 2010, 2011, ETH Zurich.
17 * Copyright (c) 2014, HP Labs.
18 * All rights reserved.
20 * This file is distributed under the terms in the attached LICENSE file.
21 * If you do not find this file, copies can be found by writing to:
22 * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
25 #include <barrelfish/barrelfish.h>
26 #include "vspace_internal.h"
29 * \brief Map the memory object into a region
31 * \param memobj The memory object
32 * \param region The region to add
34 static errval_t map_region(struct memobj *memobj, struct vregion *vregion)
37 struct memobj_anon *anon = (struct memobj_anon*)memobj;
40 struct vregion_list *data = slab_alloc(&anon->vregion_slab);
43 err = vspace_pinned_alloc(&buf, VREGION_LIST);
44 if (err_is_fail(err)) {
45 return err_push(err, LIB_ERR_VSPACE_PINNED_ALLOC);
47 slab_grow(&anon->vregion_slab, buf,
48 VSPACE_PINNED_UNIT * sizeof(struct vregion_list));
49 data = slab_alloc(&anon->vregion_slab);
51 return LIB_ERR_SLAB_ALLOC_FAIL;
54 data->region = vregion;
56 // Insert into the list
57 struct vregion_list *walk = anon->vregion_list;
58 anon->vregion_list = data;
65 * \brief Unmap the memory object from a region
67 * \param memobj The memory object
68 * \param region The region to remove
70 static errval_t unmap_region(struct memobj *memobj, struct vregion *vregion)
72 struct memobj_anon *anon = (struct memobj_anon*)memobj;
75 /* Unmap the affected area in the pmap */
76 struct vspace *vspace = vregion_get_vspace(vregion);
77 struct pmap *pmap = vspace_get_pmap(vspace);
78 genvaddr_t vregion_base = vregion_get_base_addr(vregion);
79 genvaddr_t vregion_off = vregion_get_offset(vregion);
80 size_t vregion_size = vregion_get_size(vregion);
81 genvaddr_t vregion_end = vregion_off + vregion_size;
83 //printf("(%s:%d) unmap(0x%"PRIxGENVADDR", memobj->size = %zd) vregion size = %zd\n", __FILE__, __LINE__, vregion_base + vregion_off, memobj->size, vregion_size);
85 // unmap all affected frames
86 struct memobj_frame_list *fwalk = anon->frame_list;
87 struct memobj_frame_list *fprev = NULL;
88 //printf("vregion_off = 0x%"PRIxGENVADDR"\n", vregion_off);
89 //printf("vregion_end = 0x%"PRIxGENVADDR"\n", vregion_end);
90 err = LIB_ERR_VSPACE_VREGION_NOT_FOUND;
92 //printf("fwalk->offset = %zd\n", fwalk->offset);
93 //printf("fwalk->next = %p\n", fwalk->next);
94 if (fwalk->offset < vregion_off) {
99 else if (fwalk->offset < vregion_end) {
100 err = pmap->f.unmap(pmap, vregion_base + vregion_off, fwalk->size, NULL);
101 if (err_is_fail(err)) {
102 return err_push(err, LIB_ERR_PMAP_UNMAP);
105 /* Remove the vregion from the list */
106 struct vregion_list *prev = NULL;
107 for (struct vregion_list *elt = anon->vregion_list; elt != NULL;
109 if (elt->region == vregion) {
111 assert(elt == anon->vregion_list);
112 anon->vregion_list = elt->next;
114 assert(prev->next == elt);
115 prev->next = elt->next;
117 slab_free(&anon->vregion_slab, elt);
121 vregion_off += fwalk->size;
127 return err; // XXX: not quite the right error
131 * \brief Set the protection on a range
133 * \param memobj The memory object
134 * \param region The vregion to modify the mappings on
135 * \param offset Offset into the memory object
136 * \param range The range of space to set the protection for
137 * \param flags The protection flags
139 static errval_t protect(struct memobj *memobj, struct vregion *vregion,
140 genvaddr_t offset, size_t range, vs_prot_flags_t flags)
142 struct memobj_anon *anon = (struct memobj_anon*)memobj;
145 /* protect the affected area in the pmap */
146 struct vspace *vspace = vregion_get_vspace(vregion);
147 struct pmap *pmap = vspace_get_pmap(vspace);
148 genvaddr_t vregion_base = vregion_get_base_addr(vregion);
149 genvaddr_t vregion_off = vregion_get_offset(vregion);
150 size_t vregion_size = vregion_get_size(vregion);
151 genvaddr_t vregion_end = vregion_off + vregion_size;
153 //printf("(%s:%d) protect(0x%"PRIxGENVADDR", memobj->size = %zd) vregion size = %zd offset=%zd range=%zd\n", __FILE__, __LINE__, vregion_base + vregion_off, memobj->size, vregion_size, offset, range);
155 if (offset + range > vregion_end) {
156 return LIB_ERR_MEMOBJ_WRONG_OFFSET;
159 offset += vregion_off;
161 // Special handling if the range cannot span frames
162 if (range <= BASE_PAGE_SIZE) {
163 err = pmap->f.modify_flags(pmap, vregion_base + offset, range, flags, NULL);
164 if (err_is_fail(err)) {
165 return err_push(err, LIB_ERR_PMAP_MODIFY_FLAGS);
170 // protect all affected frames
171 struct memobj_frame_list *fwalk = anon->frame_list;
172 //printf("vregion_off = 0x%"PRIxGENVADDR"\n", vregion_off);
173 //printf("vregion_end = 0x%"PRIxGENVADDR"\n", vregion_end);
174 while (fwalk && range) {
175 //printf("fwalk->offset = %zd\n", fwalk->offset);
176 //printf("fwalk->next = %p\n", fwalk->next);
177 if (offset >= fwalk->offset && offset < fwalk->offset + fwalk->size) {
179 size_t range_in_frame = fwalk->offset + fwalk->size - offset;
180 size_t size = range_in_frame < range ? range_in_frame : range;
183 err = pmap->f.modify_flags(pmap, vregion_base + offset, size, flags, &retsize);
184 if (err_is_fail(err)) {
185 return err_push(err, LIB_ERR_PMAP_MODIFY_FLAGS);
193 return LIB_ERR_VSPACE_VREGION_NOT_FOUND;
202 * \param memobj The memory object
203 * \param region The vregion to modify the state on
204 * \param offset Offset into the memory object
205 * \param range The range of space to pin
207 static errval_t pin(struct memobj *memobj, struct vregion *vregion,
208 genvaddr_t offset, size_t range)
214 * \brief Unpin a range
216 * \param memobj The memory object
217 * \param region The vregion to modify the state on
218 * \param offset Offset into the memory object
219 * \param range The range of space to unpin
221 static errval_t unpin(struct memobj *memobj, struct vregion *vregion,
222 genvaddr_t offset, size_t range)
228 * \brief Set a frame for an offset into the memobj
230 * \param memobj The memory object
231 * \param offset Offset into the memory object
232 * \param frame The frame cap for the offset
233 * \param size The size of frame cap
235 * Pagefault relies on frames inserted in order
237 static errval_t fill_foff(struct memobj *memobj, genvaddr_t offset, struct capref frame,
238 size_t size, genpaddr_t foffset)
241 struct memobj_anon *anon = (struct memobj_anon*)memobj;
243 assert((offset & BASE_PAGE_MASK) == 0);
245 // AB: allow frame to overlap end of memobj; that might have been the most
246 // efficient allocation size (even if the end of the frame will be unusable)
247 if (offset >= memobj->size) {
248 return LIB_ERR_MEMOBJ_WRONG_OFFSET;
252 struct memobj_frame_list *new = slab_alloc(&anon->frame_slab);
253 // We have to grow our slab allocator when there's still one slab left as
254 // we otherwise might run out of slabs when calling memobj->fill() from
255 // vspace_pinned_alloc(). The is_refilling flag allows us to hand out the
256 // last slab when coming back here from vspace_pinned_alloc().
258 if (slab_freecount(&anon->frame_slab) <= 1 && !anon->frame_slab_refilling) {
259 anon->frame_slab_refilling = true;
261 err = vspace_pinned_alloc(&buf, FRAME_LIST);
262 if (err_is_fail(err)) {
263 return err_push(err, LIB_ERR_VSPACE_PINNED_ALLOC);
265 slab_grow(&anon->frame_slab, buf,
266 VSPACE_PINNED_UNIT * sizeof(struct memobj_frame_list));
268 new = slab_alloc(&anon->frame_slab);
270 anon->frame_slab_refilling = false;
273 return LIB_ERR_SLAB_ALLOC_FAIL;
276 new->offset = offset;
279 new->foffset = foffset;
281 struct frame_identity fi;
282 err = frame_identify(frame, &fi);
283 if (err_is_fail(err)) {
284 return err_push(err, LIB_ERR_FRAME_IDENTIFY);
286 assert(err_is_ok(err));
290 struct memobj_frame_list *walk = anon->frame_list;
291 struct memobj_frame_list *prev = NULL;
293 if (new->offset < walk->offset) {
294 if ((prev != NULL && new->offset < prev->offset + prev->size)
295 || new->offset + new->size > walk->offset) {
296 slab_free(&anon->frame_slab, new);
297 return LIB_ERR_MEMOBJ_DUPLICATE_FILL;
303 assert(walk == anon->frame_list);
304 anon->frame_list = new;
312 if (new->offset < prev->offset + prev->size) {
313 slab_free(&anon->frame_slab, new);
314 return LIB_ERR_MEMOBJ_DUPLICATE_FILL;
319 assert(anon->frame_list == NULL);
320 anon->frame_list = new;
325 static errval_t fill(struct memobj *memobj, genvaddr_t offset, struct capref frame,
328 return fill_foff(memobj, offset, frame, size, 0);
332 * \brief Unmap/remove one frame from the end of the memobj
334 * \param memobj The memory object
335 * \param offset The offset from which to remove a frame from
336 * \param ret_frame Pointer to return the removed frame
338 * This will try to remove one frame at an offset greater than the one
339 * specified. Call this function again and again till it returns the
340 * LIB_ERR_MEMOBJ_UNFILL_TOO_HIGH_OFFSET error to get all frames.
342 static errval_t unfill(struct memobj *memobj, genvaddr_t offset,
343 struct capref *ret_frame, genvaddr_t *ret_offset)
346 struct memobj_anon *anon = (struct memobj_anon*)memobj;
348 // Walk the ordered list of frames to find one right frame
349 struct memobj_frame_list *fwalk = anon->frame_list;
350 struct memobj_frame_list *fprev = NULL;
352 if (fwalk->offset < offset) {
360 // The specified offset is too high.
361 return LIB_ERR_MEMOBJ_UNFILL_TOO_HIGH_OFFSET;
365 { // Unmap the frame from all vregions
366 struct vregion_list *vwalk = anon->vregion_list;
368 struct vspace *vspace = vregion_get_vspace(vwalk->region);
369 struct pmap *pmap = vspace_get_pmap(vspace);
370 genvaddr_t vregion_base = vregion_get_base_addr(vwalk->region);
373 assert((vregion_base + fwalk->offset) % BASE_PAGE_SIZE == 0);
374 //printf("(%s:%d) unmap(0x%"PRIxGENVADDR", %zd)\n", __FILE__, __LINE__, vregion_base + fwalk->offset, fwalk->size);
375 err = pmap->f.unmap(pmap, vregion_base + fwalk->offset, fwalk->size,
377 if (err_is_fail(err)) {
378 return err_push(err, LIB_ERR_PMAP_UNMAP);
380 assert(retsize == fwalk->size);
387 *ret_offset = fwalk->offset;
390 *ret_frame = fwalk->frame;
393 fprev->next = fwalk->next;
395 anon->frame_list = fwalk->next;
397 slab_free(&anon->frame_slab, fwalk);
402 * \brief Page fault handler
404 * \param memobj The memory object
405 * \param region The associated vregion
406 * \param offset Offset into memory object of the page fault
407 * \param type The fault type
409 * Locates the frame for the offset and maps it in.
410 * Relies on fill inserting frames in order.
412 static errval_t pagefault(struct memobj *memobj, struct vregion *vregion,
413 genvaddr_t offset, vm_fault_type_t type)
416 struct memobj_anon *anon = (struct memobj_anon*)memobj;
418 // Walk the ordered list for the frame and map it in
419 struct memobj_frame_list *walk = anon->frame_list;
421 if (offset >= walk->offset && offset < walk->offset + walk->size) {
422 struct vspace *vspace = vregion_get_vspace(vregion);
423 struct pmap *pmap = vspace_get_pmap(vspace);
424 genvaddr_t base = vregion_get_base_addr(vregion);
425 genvaddr_t vregion_off = vregion_get_offset(vregion);
426 vregion_flags_t flags = vregion_get_flags(vregion);
427 err = pmap->f.map(pmap, base + vregion_off + walk->offset,
428 walk->frame, walk->foffset, walk->size, flags,
430 if (err_is_fail(err)) {
431 return err_push(err, LIB_ERR_PMAP_MAP);
438 return LIB_ERR_MEMOBJ_WRONG_OFFSET;
442 * \brief Free up some pages by placing them in the backing storage
444 * \param memobj The memory object
445 * \param size The amount of space to free up
446 * \param frames An array of capref frames to return the freed pages
447 * \param num_frames The number of frames returned
449 * This will affect all the vregions that are associated with the object
451 static errval_t pager_free(struct memobj *memobj, size_t size,
452 struct capref *frames, size_t num_frames)
460 * \param memobj The memory object
461 * \param size Size of the memory region
462 * \param flags Memory object specific flags
464 * This object handles multiple frames.
465 * The frames are mapped in on demand.
467 errval_t memobj_create_anon(struct memobj_anon *anon, size_t size,
468 memobj_flags_t flags)
470 struct memobj *memobj = &anon->m;
472 /* Generic portion */
473 memobj->f.map_region = map_region;
474 memobj->f.unmap_region = unmap_region;
475 memobj->f.protect = protect;
477 memobj->f.unpin = unpin;
478 memobj->f.fill = fill;
479 memobj->f.fill_foff = fill_foff;
480 memobj->f.unfill = unfill;
481 memobj->f.pagefault = pagefault;
482 memobj->f.pager_free = pager_free;
485 memobj->flags = flags;
487 memobj->type = ANONYMOUS;
489 /* anon specific portion */
490 slab_init(&anon->vregion_slab, sizeof(struct vregion_list), NULL);
491 slab_init(&anon->frame_slab, sizeof(struct memobj_frame_list), NULL);
493 anon->frame_slab_refilling = false;
495 anon->vregion_list = NULL;
496 anon->frame_list = NULL;
501 * \brief Destroy the object
504 errval_t memobj_destroy_anon(struct memobj *memobj, bool delete_caps)
506 struct memobj_anon *m = (struct memobj_anon *)memobj;
508 errval_t err = SYS_ERR_OK;
510 struct vregion_list *vwalk = m->vregion_list;
512 err = vregion_destroy(vwalk->region);
513 if (err_is_fail(err)) {
516 struct vregion_list *old = vwalk;
518 slab_free(&m->vregion_slab, old);
521 struct memobj_frame_list *fwalk = m->frame_list;
524 err = cap_delete(fwalk->frame);
525 if (err_is_fail(err)) {
529 struct memobj_frame_list *old = fwalk;
531 slab_free(&m->frame_slab, old);