3 * \brief Slot allocator for a single cnode
5 * Allocators should be created with worst case slab requirement.
6 * The worst case requirement is the number of slots in the cnode / 2.
10 * Copyright (c) 2010, ETH Zurich.
11 * Copyright (c) 2015, Hewlett Packard Enterprise Development LP.
12 * All rights reserved.
14 * This file is distributed under the terms in the attached LICENSE file.
15 * If you do not find this file, copies can be found by writing to:
16 * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
19 #include <barrelfish/barrelfish.h>
20 #include <barrelfish/caddr.h>
22 static errval_t salloc(struct slot_allocator *ca, struct capref *ret)
24 struct single_slot_allocator *sca = (struct single_slot_allocator*)ca;
26 if (sca->a.space == 0) {
27 return LIB_ERR_SLOT_ALLOC_NO_SPACE;
30 thread_mutex_lock(&ca->mutex);
33 ret->cnode = sca->cnode;
34 ret->slot = sca->head->slot;
38 debug_print_capref(buf, 256, *ret);
39 debug_printf("%p->salloc: ret = %.*s\n", ca, 256, buf);
48 if (sca->head->space == 0) {
49 struct cnode_meta *walk = sca->head;
50 sca->head = walk->next;
51 slab_free(&sca->slab, walk);
54 thread_mutex_unlock(&ca->mutex);
58 static errval_t free_slots(struct single_slot_allocator *sca, cslot_t slot,
59 cslot_t count, struct thread_mutex *mutex)
61 thread_mutex_lock(mutex);
63 errval_t err = SYS_ERR_OK;
65 struct cnode_meta *walk = sca->head;
66 struct cnode_meta *prev = NULL;
68 // Entire cnode was allocated
70 sca->head = slab_alloc(&sca->slab);
71 sca->head->slot = slot;
72 sca->head->space = count;
73 sca->head->next = NULL;
77 // Freeing one before head
78 if (slot + count == sca->head->slot) {
79 sca->head->slot = slot;
80 sca->head->space += count;
84 // Freeing before head
85 if (slot < sca->head->slot) {
86 struct cnode_meta *new = slab_alloc(&sca->slab);
89 new->next = sca->head;
94 while (walk != NULL) {
95 // Freeing at the edge of walk
96 if (slot == walk->slot + walk->space) {
99 // check if we can merge walk to next
100 struct cnode_meta *next = walk->next;
101 if (next && next->slot == walk->slot + walk->space) {
102 walk->space += next->space;
103 walk->next = next->next;
104 slab_free(&sca->slab, next);
109 else if (slot < walk->slot + walk->space) {
110 err = LIB_ERR_SLOT_UNALLOCATED;
114 // Freing just before walk->next
115 if (walk->next && slot + count == walk->next->slot) {
116 walk->next->slot = slot;
117 walk->next->space += count;
121 // Freeing after walk and before walk->next
122 if (walk->next && slot < walk->next->slot) {
123 struct cnode_meta *new = walk->next;
124 walk->next = slab_alloc(&sca->slab);
125 walk->next->slot = slot;
126 walk->next->space = count;
127 walk->next->next = new;
134 // Freeing after the list
135 prev->next = slab_alloc(&sca->slab);
136 prev->next->slot = slot;
137 prev->next->space = count;
138 prev->next->next = NULL;
141 sca->a.space += count;
144 thread_mutex_unlock(mutex);
148 static errval_t sfree(struct slot_allocator *ca, struct capref cap)
150 struct single_slot_allocator *sca = (struct single_slot_allocator*)ca;
151 if (!cnodecmp(cap.cnode, sca->cnode)) {
152 return LIB_ERR_SLOT_ALLOC_WRONG_CNODE;
155 return free_slots(sca, cap.slot, 1, &ca->mutex);
158 cslot_t single_slot_alloc_freecount(struct single_slot_allocator *this)
161 for (struct cnode_meta *walk = this->head; walk; walk = walk->next) {
167 errval_t single_slot_alloc_resize(struct single_slot_allocator *this,
168 cslot_t newslotcount)
172 cslot_t grow = newslotcount - this->a.nslots;
174 // Refill slab allocator
175 size_t bufgrow = SINGLE_SLOT_ALLOC_BUFLEN(grow);
176 void *buf = malloc(bufgrow);
178 return LIB_ERR_MALLOC_FAIL;
180 slab_grow(&this->slab, buf, bufgrow);
182 // Update free slot metadata
183 err = free_slots(this, this->a.nslots, grow, &this->a.mutex);
184 if (err_is_fail(err)) {
188 // Update generic metadata
189 this->a.nslots = newslotcount;
194 errval_t single_slot_alloc_init_raw(struct single_slot_allocator *ret,
195 struct capref cap, struct cnoderef cnode,
196 cslot_t nslots, void *buf, size_t buflen)
199 ret->a.alloc = salloc;
201 ret->a.space = nslots;
202 ret->a.nslots = nslots;
203 thread_mutex_init(&ret->a.mutex);
209 slab_init(&ret->slab, sizeof(struct cnode_meta), NULL);
211 // check for callers that do not provide enough buffer space
213 //on arm, __builtin_return_address does not take arguments !=0
214 #if !defined(__arm__) && !defined(__aarch64__)
215 size_t buflen_proper = SINGLE_SLOT_ALLOC_BUFLEN(nslots);
216 if (buflen != buflen_proper) {
217 debug_printf("******* FIXME: %s buflen=%zu != buflen_proper=%zu"
218 "call stack: %p %p\n",
219 __FUNCTION__, buflen, buflen_proper,
220 __builtin_return_address(0),
221 __builtin_return_address(1));
225 slab_grow(&ret->slab, buf, buflen);
228 ret->head = slab_alloc(&ret->slab);
229 assert(ret->head != NULL);
231 ret->head->space = nslots;
232 ret->head->next = NULL;
237 errval_t single_slot_alloc_init(struct single_slot_allocator *ret,
238 cslot_t nslots, cslot_t *retslots)
243 struct cnoderef cnode;
244 err = cnode_create(&cap, &cnode, nslots, &nslots);
245 if (err_is_fail(err)) {
246 return err_push(err, LIB_ERR_CNODE_CREATE);
248 size_t buflen = SINGLE_SLOT_ALLOC_BUFLEN(nslots);
249 void *buf = malloc(buflen);
251 return LIB_ERR_MALLOC_FAIL;
254 err = single_slot_alloc_init_raw(ret, cap, cnode, nslots, buf, buflen);
255 if (err_is_fail(err)) {
256 return err_push(err, LIB_ERR_SINGLE_SLOT_ALLOC_INIT_RAW);