3 * \brief memory object of anonymous type.
4 * The object maintains a list of frames.
6 * The object maintains a list of frames and a list of vregions.
7 * The lists are backed by slabs.
8 * The slabs may have to be grown,
9 * in which case the object will use #vspace_pinned_alloc.
11 * morecore uses this memory object so it cannot use malloc for its lists.
12 * Therefore, this uses slabs and grows them using the pinned memory.
16 * Copyright (c) 2009, 2010, 2011, ETH Zurich.
17 * All rights reserved.
19 * This file is distributed under the terms in the attached LICENSE file.
20 * If you do not find this file, copies can be found by writing to:
21 * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
24 #include <barrelfish/barrelfish.h>
25 #include "vspace_internal.h"
28 * \brief Map the memory object into a region
30 * \param memobj The memory object
31 * \param region The region to add
33 static errval_t map_region(struct memobj *memobj, struct vregion *vregion)
36 struct memobj_anon *anon = (struct memobj_anon*)memobj;
39 struct vregion_list *data = slab_alloc(&anon->vregion_slab);
42 err = vspace_pinned_alloc(&buf, VREGION_LIST);
43 if (err_is_fail(err)) {
44 return err_push(err, LIB_ERR_VSPACE_PINNED_ALLOC);
46 slab_grow(&anon->vregion_slab, buf,
47 VSPACE_PINNED_UNIT * sizeof(struct vregion_list));
48 data = slab_alloc(&anon->vregion_slab);
50 return LIB_ERR_SLAB_ALLOC_FAIL;
53 data->region = vregion;
55 // Insert into the list
56 struct vregion_list *walk = anon->vregion_list;
57 anon->vregion_list = data;
64 * \brief Unmap the memory object from a region
66 * \param memobj The memory object
67 * \param region The region to remove
69 static errval_t unmap_region(struct memobj *memobj, struct vregion *vregion)
71 struct memobj_anon *anon = (struct memobj_anon*)memobj;
74 /* Unmap the affected area in the pmap */
75 struct vspace *vspace = vregion_get_vspace(vregion);
76 struct pmap *pmap = vspace_get_pmap(vspace);
77 genvaddr_t vregion_base = vregion_get_base_addr(vregion);
78 genvaddr_t vregion_off = vregion_get_offset(vregion);
79 size_t vregion_size = vregion_get_size(vregion);
80 genvaddr_t vregion_end = vregion_off + vregion_size;
82 //printf("(%s:%d) unmap(0x%"PRIxGENVADDR", memobj->size = %zd) vregion size = %zd\n", __FILE__, __LINE__, vregion_base + vregion_off, memobj->size, vregion_size);
84 // unmap all affected frames
85 struct memobj_frame_list *fwalk = anon->frame_list;
86 struct memobj_frame_list *fprev = NULL;
87 //printf("vregion_off = 0x%"PRIxGENVADDR"\n", vregion_off);
88 //printf("vregion_end = 0x%"PRIxGENVADDR"\n", vregion_end);
89 err = LIB_ERR_VSPACE_VREGION_NOT_FOUND;
91 //printf("fwalk->offset = %zd\n", fwalk->offset);
92 //printf("fwalk->next = %p\n", fwalk->next);
93 if (fwalk->offset < vregion_off) {
98 else if (fwalk->offset < vregion_end) {
99 err = pmap->f.unmap(pmap, vregion_base + vregion_off, fwalk->size, NULL);
100 if (err_is_fail(err)) {
101 return err_push(err, LIB_ERR_PMAP_UNMAP);
104 /* Remove the vregion from the list */
105 struct vregion_list *prev = NULL;
106 for (struct vregion_list *elt = anon->vregion_list; elt != NULL;
108 if (elt->region == vregion) {
110 assert(elt == anon->vregion_list);
111 anon->vregion_list = elt->next;
113 assert(prev->next == elt);
114 prev->next = elt->next;
116 slab_free(&anon->vregion_slab, elt);
120 vregion_off += fwalk->size;
126 return err; // XXX: not quite the right error
130 * \brief Set the protection on a range
132 * \param memobj The memory object
133 * \param region The vregion to modify the mappings on
134 * \param offset Offset into the memory object
135 * \param range The range of space to set the protection for
136 * \param flags The protection flags
138 static errval_t protect(struct memobj *memobj, struct vregion *vregion,
139 genvaddr_t offset, size_t range, vs_prot_flags_t flags)
141 struct vspace *vspace = vregion_get_vspace(vregion);
142 struct pmap *pmap = vspace_get_pmap(vspace);
143 genvaddr_t base = vregion_get_base_addr(vregion);
144 genvaddr_t vregion_off = vregion_get_offset(vregion);
147 err = pmap->f.modify_flags(pmap, base + vregion_off + offset, range,
149 if (err_is_fail(err)) {
150 return err_push(err, LIB_ERR_PMAP_MODIFY_FLAGS);
159 * \param memobj The memory object
160 * \param region The vregion to modify the state on
161 * \param offset Offset into the memory object
162 * \param range The range of space to pin
164 static errval_t pin(struct memobj *memobj, struct vregion *vregion,
165 genvaddr_t offset, size_t range)
171 * \brief Unpin a range
173 * \param memobj The memory object
174 * \param region The vregion to modify the state on
175 * \param offset Offset into the memory object
176 * \param range The range of space to unpin
178 static errval_t unpin(struct memobj *memobj, struct vregion *vregion,
179 genvaddr_t offset, size_t range)
185 * \brief Set a frame for an offset into the memobj
187 * \param memobj The memory object
188 * \param offset Offset into the memory object
189 * \param frame The frame cap for the offset
190 * \param size The size of frame cap
192 * Pagefault relies on frames inserted in order
194 static errval_t fill(struct memobj *memobj, genvaddr_t offset, struct capref frame,
198 struct memobj_anon *anon = (struct memobj_anon*)memobj;
200 assert((offset & BASE_PAGE_MASK) == 0);
202 // AB: allow frame to overlap end of memobj; that might have been the most
203 // efficient allocation size (even if the end of the frame will be unusable)
204 if (offset >= memobj->size) {
205 return LIB_ERR_MEMOBJ_WRONG_OFFSET;
209 struct memobj_frame_list *new = slab_alloc(&anon->frame_slab);
212 err = vspace_pinned_alloc(&buf, FRAME_LIST);
213 if (err_is_fail(err)) {
214 return err_push(err, LIB_ERR_VSPACE_PINNED_ALLOC);
216 slab_grow(&anon->frame_slab, buf,
217 VSPACE_PINNED_UNIT * sizeof(struct memobj_frame_list));
218 new = slab_alloc(&anon->frame_slab);
220 return LIB_ERR_SLAB_ALLOC_FAIL;
223 new->offset = offset;
228 struct memobj_frame_list *walk = anon->frame_list;
229 struct memobj_frame_list *prev = NULL;
231 if (new->offset < walk->offset) {
232 if ((prev != NULL && new->offset < prev->offset + prev->size)
233 || new->offset + new->size > walk->offset) {
234 slab_free(&anon->frame_slab, new);
235 return LIB_ERR_MEMOBJ_DUPLICATE_FILL;
241 assert(walk == anon->frame_list);
242 anon->frame_list = new;
250 if (new->offset < prev->offset + prev->size) {
251 slab_free(&anon->frame_slab, new);
252 return LIB_ERR_MEMOBJ_DUPLICATE_FILL;
257 assert(anon->frame_list == NULL);
258 anon->frame_list = new;
265 * \brief Unmap/remove one frame from the end of the memobj
267 * \param memobj The memory object
268 * \param offset The offset from which to remove a frame from
269 * \param ret_frame Pointer to return the removed frame
271 * This will try to remove one frame at an offset greater than the one
272 * specified. Call this function again and again till it returns the
273 * LIB_ERR_MEMOBJ_UNFILL_TOO_HIGH_OFFSET error to get all frames.
275 static errval_t unfill(struct memobj *memobj, genvaddr_t offset,
276 struct capref *ret_frame, genvaddr_t *ret_offset)
279 struct memobj_anon *anon = (struct memobj_anon*)memobj;
281 // Walk the ordered list of frames to find one right frame
282 struct memobj_frame_list *fwalk = anon->frame_list;
283 struct memobj_frame_list *fprev = NULL;
285 if (fwalk->offset < offset) {
293 // The specified offset is too high.
294 return LIB_ERR_MEMOBJ_UNFILL_TOO_HIGH_OFFSET;
298 { // Unmap the frame from all vregions
299 struct vregion_list *vwalk = anon->vregion_list;
301 struct vspace *vspace = vregion_get_vspace(vwalk->region);
302 struct pmap *pmap = vspace_get_pmap(vspace);
303 genvaddr_t vregion_base = vregion_get_base_addr(vwalk->region);
306 assert((vregion_base + fwalk->offset) % BASE_PAGE_SIZE == 0);
307 //printf("(%s:%d) unmap(0x%"PRIxGENVADDR", %zd)\n", __FILE__, __LINE__, vregion_base + fwalk->offset, fwalk->size);
308 err = pmap->f.unmap(pmap, vregion_base + fwalk->offset, fwalk->size,
310 if (err_is_fail(err)) {
311 return err_push(err, LIB_ERR_PMAP_UNMAP);
313 assert(retsize == fwalk->size);
319 *ret_offset = fwalk->offset;
320 *ret_frame = fwalk->frame;
322 fprev->next = fwalk->next;
324 anon->frame_list = fwalk->next;
326 slab_free(&anon->frame_slab, fwalk);
331 * \brief Page fault handler
333 * \param memobj The memory object
334 * \param region The associated vregion
335 * \param offset Offset into memory object of the page fault
336 * \param type The fault type
338 * Locates the frame for the offset and maps it in.
339 * Relies on fill inserting frames in order.
341 static errval_t pagefault(struct memobj *memobj, struct vregion *vregion,
342 genvaddr_t offset, vm_fault_type_t type)
345 struct memobj_anon *anon = (struct memobj_anon*)memobj;
347 // Walk the ordered list for the frame and map it in
348 struct memobj_frame_list *walk = anon->frame_list;
350 if (offset >= walk->offset && offset < walk->offset + walk->size) {
351 struct vspace *vspace = vregion_get_vspace(vregion);
352 struct pmap *pmap = vspace_get_pmap(vspace);
353 genvaddr_t base = vregion_get_base_addr(vregion);
354 genvaddr_t vregion_off = vregion_get_offset(vregion);
355 vregion_flags_t flags = vregion_get_flags(vregion);
356 err = pmap->f.map(pmap, base + vregion_off + walk->offset,
357 walk->frame, 0, walk->size, flags, NULL, NULL);
358 if (err_is_fail(err)) {
359 return err_push(err, LIB_ERR_PMAP_MAP);
366 return LIB_ERR_MEMOBJ_WRONG_OFFSET;
370 * \brief Free up some pages by placing them in the backing storage
372 * \param memobj The memory object
373 * \param size The amount of space to free up
374 * \param frames An array of capref frames to return the freed pages
375 * \param num_frames The number of frames returned
377 * This will affect all the vregions that are associated with the object
379 static errval_t pager_free(struct memobj *memobj, size_t size,
380 struct capref *frames, size_t num_frames)
388 * \param memobj The memory object
389 * \param size Size of the memory region
390 * \param flags Memory object specific flags
392 * This object handles multiple frames.
393 * The frames are mapped in on demand.
395 errval_t memobj_create_anon(struct memobj_anon *anon, size_t size,
396 memobj_flags_t flags)
398 struct memobj *memobj = &anon->m;
400 /* Generic portion */
401 memobj->f.map_region = map_region;
402 memobj->f.unmap_region = unmap_region;
403 memobj->f.protect = protect;
405 memobj->f.unpin = unpin;
406 memobj->f.fill = fill;
407 memobj->f.unfill = unfill;
408 memobj->f.pagefault = pagefault;
409 memobj->f.pager_free = pager_free;
412 memobj->flags = flags;
414 memobj->type = ANONYMOUS;
416 /* anon specific portion */
417 slab_init(&anon->vregion_slab, sizeof(struct vregion_list), NULL);
418 slab_init(&anon->frame_slab, sizeof(struct memobj_frame_list), NULL);
420 anon->vregion_list = NULL;
421 anon->frame_list = NULL;
426 * \brief Destroy the object
430 errval_t memobj_destroy_anon(struct memobj *memobj)