morecore: fixing leaking slot allocator.(closes T153
authorReto Achermann <reto.achermann@inf.ethz.ch>
Thu, 6 Aug 2015 11:44:11 +0000 (13:44 +0200)
committerReto Achermann <reto.achermann@inf.ethz.ch>
Thu, 6 Aug 2015 11:46:10 +0000 (13:46 +0200)
Signed-off-by: Reto Achermann <reto.achermann@inf.ethz.ch>

include/barrelfish/vspace_mmu_aware.h
lib/barrelfish/morecore.c
lib/barrelfish/slot_alloc/multi_slot_alloc.c
lib/barrelfish/threads.c
lib/barrelfish/vspace/mmu_aware.c

index 064231b..9db6188 100644 (file)
@@ -31,6 +31,7 @@ struct vspace_mmu_aware {
     size_t size;
     size_t alignment;
     size_t consumed;
+    struct slot_allocator *slot_alloc; ///< slot allocator
     struct vregion vregion;           ///< Needs just one vregion
     struct memobj_anon memobj;        ///< Needs just one memobj
     lvaddr_t offset;    ///< Offset of free space in anon
@@ -38,13 +39,15 @@ struct vspace_mmu_aware {
 };
 
 errval_t vspace_mmu_aware_init(struct vspace_mmu_aware *state, size_t size);
+void vspace_mmu_aware_set_slot_alloc(struct vspace_mmu_aware *state,
+                                     struct slot_allocator *slot_allocator);
 errval_t vspace_mmu_aware_init_aligned(struct vspace_mmu_aware *state,
+                                       struct slot_allocator *slot_alloc,
                                        size_t size, size_t alignment,
                                        vregion_flags_t flags);
 errval_t vspace_mmu_aware_reset(struct vspace_mmu_aware *state,
                                 struct capref frame, size_t size);
-errval_t vspace_mmu_aware_map(struct vspace_mmu_aware *state,
-                              struct capref frame, size_t req_size,
+errval_t vspace_mmu_aware_map(struct vspace_mmu_aware *state, size_t req_size,
                               void **retbuf, size_t *retsize);
 errval_t vspace_mmu_aware_unmap(struct vspace_mmu_aware *state,
                                 lvaddr_t base, size_t bytes);
index e61c489..1ba853c 100644 (file)
@@ -48,15 +48,8 @@ static void *morecore_alloc(size_t bytes, size_t *retbytes)
     size_t mapped = 0;
     size_t step = bytes;
     while (mapped < bytes) {
-        struct capref cap;
-        err = slot_alloc(&cap);
-        if (err_is_fail(err)) {
-            USER_PANIC_ERR(err, "slot_alloc failed");
-        }
-
         void *mid_buf = NULL;
-        err = vspace_mmu_aware_map(&state->mmu_state, cap, step,
-                                   &mid_buf, &step);
+        err = vspace_mmu_aware_map(&state->mmu_state, step, &mid_buf, &step);
         if (err_is_ok(err)) {
             if (buf == NULL) {
                 buf = mid_buf;
@@ -68,7 +61,6 @@ static void *morecore_alloc(size_t bytes, size_t *retbytes)
               for a very large frame, will try asking for smaller one.
              */
             if (err_no(err) == LIB_ERR_FRAME_CREATE_MS_CONSTRAINTS) {
-                err = slot_free(cap);
                 if (err_is_fail(err)) {
                     debug_err(__FILE__, __func__, __LINE__, err,
                               "slot_free failed");
@@ -122,8 +114,8 @@ errval_t morecore_init(size_t alignment)
 #endif
     morecore_flags |= (alignment == LARGE_PAGE_SIZE ? VREGION_FLAGS_LARGE : 0);
 
-    err = vspace_mmu_aware_init_aligned(&state->mmu_state, HEAP_REGION,
-            alignment, morecore_flags);
+    err = vspace_mmu_aware_init_aligned(&state->mmu_state, NULL, HEAP_REGION,
+                                        alignment, morecore_flags);
     if (err_is_fail(err)) {
         return err_push(err, LIB_ERR_VSPACE_MMU_AWARE_INIT);
     }
index cb91e6e..e790e0f 100644 (file)
@@ -94,19 +94,15 @@ errval_t multi_alloc(struct slot_allocator *ca, struct capref *ret)
         if (!buf) { /* Grow slab */
             // Allocate slot out of the list
             mca->a.space--;
-            struct capref frame;
-            err = mca->head->a.a.alloc(&mca->head->a.a, &frame);
-            if (err_is_fail(err)) {
-                thread_mutex_unlock(&ca->mutex);
-                return err_push(err, LIB_ERR_SLOT_ALLOC);
-            }
+            // ensure we have the right allocator
+            vspace_mmu_aware_set_slot_alloc(&mca->mmu_state, &mca->head->a.a);
 
             thread_mutex_unlock(&ca->mutex); // following functions may call
                                              // slot_alloc
             void *slab_buf;
             size_t size;
-            err = vspace_mmu_aware_map(&mca->mmu_state, frame,
-                                       mca->slab.blocksize, &slab_buf, &size);
+            err = vspace_mmu_aware_map(&mca->mmu_state, mca->slab.blocksize,
+                                       &slab_buf, &size);
             if (err_is_fail(err)) {
                 return err_push(err, LIB_ERR_VSPACE_MMU_AWARE_MAP);
             }
index cdac9f1..1bac6da 100644 (file)
@@ -212,18 +212,12 @@ static errval_t refill_thread_slabs(struct slab_allocator *slabs)
 {
     assert(slabs == &thread_slabs);
 
-    struct capref frame;
     size_t size;
     void *buf;
     errval_t err;
 
-    err = slot_alloc(&frame);
-    if (err_is_fail(err)) {
-        return err_push(err, LIB_ERR_SLOT_ALLOC);
-    }
-
     size_t blocksize = sizeof(struct thread) + tls_block_total_len;
-    err = vspace_mmu_aware_map(&thread_slabs_vm, frame, blocksize, &buf, &size);
+    err = vspace_mmu_aware_map(&thread_slabs_vm, blocksize, &buf, &size);
     if (err_is_fail(err)) {
         return err_push(err, LIB_ERR_VSPACE_MMU_AWARE_MAP);
     }
@@ -1165,13 +1159,8 @@ void threads_prepare_to_span(dispatcher_handle_t newdh)
             void *buf;
             errval_t err;
 
-            err = slot_alloc(&frame);
-            if (err_is_fail(err)) {
-                USER_PANIC_ERR(err, "in slot_alloc while prefilling thread slabs\n");
-            }
-
             size_t blocksize = sizeof(struct thread) + tls_block_total_len;
-            err = vspace_mmu_aware_map(&thread_slabs_vm, frame, blocksize,
+            err = vspace_mmu_aware_map(&thread_slabs_vm, blocksize,
                                        &buf, &size);
             if (err_is_fail(err)) {
                 slot_free(frame);
index 0d10eac..87ddf99 100644 (file)
 /// Minimum free memory before we return it to memory server
 #define MIN_MEM_FOR_FREE        (1 * 1024 * 1024)
 
+void vspace_mmu_aware_set_slot_alloc(struct vspace_mmu_aware *state,
+                                     struct slot_allocator *slot_allocator)
+{
+    if (slot_allocator) {
+        state->slot_alloc = slot_allocator;
+    } else {
+        state->slot_alloc = get_default_slot_allocator();
+    }
+}
+
 /**
  * \brief Initialize vspace_mmu_aware struct
  *
  */
 errval_t vspace_mmu_aware_init(struct vspace_mmu_aware *state, size_t size)
 {
-    return vspace_mmu_aware_init_aligned(state, size, 0,
+    return vspace_mmu_aware_init_aligned(state, NULL, size, 0,
             VREGION_FLAGS_READ_WRITE);
 }
 
 errval_t vspace_mmu_aware_init_aligned(struct vspace_mmu_aware *state,
-        size_t size, size_t alignment, vregion_flags_t flags)
+                                       struct slot_allocator *slot_allocator,
+                                       size_t size, size_t alignment,
+                                       vregion_flags_t flags)
 {
     state->size = size;
     state->consumed = 0;
     state->alignment = alignment;
 
+    if (slot_allocator) {
+        state->slot_alloc = slot_allocator;
+    } else {
+        state->slot_alloc = get_default_slot_allocator();
+    }
+
     errval_t err;
 
     size = ROUND_UP(size, BASE_PAGE_SIZE);
@@ -79,12 +97,13 @@ errval_t vspace_mmu_aware_init_aligned(struct vspace_mmu_aware *state,
  * or region of memory). This is to facilitate retrying with different
  * constraints.
  */
-errval_t vspace_mmu_aware_map(struct vspace_mmu_aware *state,
-                              struct capref frame, size_t req_size,
+errval_t vspace_mmu_aware_map(struct vspace_mmu_aware *state, size_t req_size,
                               void **retbuf, size_t *retsize)
 {
     errval_t err;
 
+    struct capref frame;
+
     // Calculate how much still to map in
     size_t origsize = req_size;
     assert(state->mapoffset >= state->offset);
@@ -124,6 +143,11 @@ errval_t vspace_mmu_aware_map(struct vspace_mmu_aware *state,
         }
         // Create frame of appropriate size
 allocate:
+        err = state->slot_alloc->alloc(state->slot_alloc, &frame);
+        if (err_is_fail(err)) {
+            return err_push(err, LIB_ERR_SLOT_ALLOC_NO_SPACE);
+        }
+
         err = frame_create(frame, alloc_size, &ret_size);
         if (err_is_fail(err)) {
             if (err_no(err) == LIB_ERR_RAM_ALLOC_MS_CONSTRAINTS) {
@@ -146,6 +170,7 @@ allocate:
                 debug_err(__FILE__, __func__, __LINE__, err,
                           "cap_delete failed");
             }
+            state->slot_alloc->free(state->slot_alloc, frame);
             return LIB_ERR_VSPACE_MMU_AWARE_NO_SPACE;
         }
 
@@ -180,6 +205,7 @@ errval_t vspace_mmu_aware_reset(struct vspace_mmu_aware *state,
     struct vregion *vregion;
     struct capref oldframe;
     void *vbuf;
+
     // create copy of new region
     err = slot_alloc(&oldframe);
     if (err_is_fail(err)) {