failure MULTI_SLOT_ALLOC_INIT "Failure in multi_slot_alloc_init()",
failure MULTI_SLOT_ALLOC_INIT_RAW "Failure in multi_slot_alloc_init_raw()",
failure SINGLE_SLOT_ALLOC "Failure in single_slot_alloc()",
+ failure RANGE_ALLOC_NOT_HEAD "Function called on non-head range allocator",
failure SLOT_ALLOC "Failure in slot_alloc()",
failure SLOT_FREE "Failure in slot_free()",
failure SLOT_UNALLOCATED "slot_free() was called on an unallocated slot",
+ failure SLOT_ALLLOC_REFILL "Failure in slot_alloc_refill()",
// vspace
failure VSPACE_CURRENT_INIT "Failure in vspace_current_init()",
failure SLOT_ALLOC_INIT "Failure initialising slot allocator",
failure MM_INIT "Failure in mm_init()",
failure MM_ADD "Failure in mm_add()",
+ failure MM_ADD_MULTI "Failure in mm_add_multi()",
failure MM_FREE "Failure in mm_free()",
failure NEW_NODE "Failed allocating new node from slot allocator",
failure OUT_OF_BOUNDS "Given memory base address / size exceeds bounds of allocator",
struct cnoderef cnode; ///< cnoderef for the cnode to allocate from
struct cnode_meta *meta; ///< Linked list of meta data
struct slab_allocator slab; ///< Slab allocation
- struct thread_mutex mutex; ///< Mutex for thread safety
+ struct thread_mutex mutex; ///< Mutex for thread safety (used when is_head == true)
+ struct range_slot_allocator *next; ///< Next slot allocator
+ bool is_head; ///< Is this instance head of a chain
};
// single_slot_alloc_init_raw() requires a specific buflen
cslot_t nslots);
errval_t range_slot_alloc_init(struct range_slot_allocator *ret,
cslot_t nslots, cslot_t *retslots);
+size_t range_slot_alloc_freecount(struct range_slot_allocator *alloc);
+errval_t range_slot_alloc_refill(struct range_slot_allocator *alloc, cslot_t slots);
__END_DECLS
struct mem_region {
genpaddr_t mr_base;///< Address of the start of the region
enum region_type mr_type;///< Type of region
- uint8_t mr_bits;///< Size as a power of two shift (not module type)
+ gensize_t mr_bytes;///< Size in bytes
bool mr_consumed;///< Flag for user code to mark region consumed
size_t mrmod_size;///< Size in bytes (module type only)
ptrdiff_t mrmod_data;///< Offset of module string (module type only)
void mm_destroy(struct mm *mm);
errval_t mm_add(struct mm *mm, struct capref cap, uint8_t sizebits,
genpaddr_t base);
+errval_t mm_add_multi(struct mm *mm, struct capref cap, gensize_t size,
+ genpaddr_t base);
errval_t mm_alloc(struct mm *mm, uint8_t sizebits, struct capref *retcap,
genpaddr_t *retbase);
errval_t mm_alloc_range(struct mm *mm, uint8_t sizebits, genpaddr_t minbase,
// non-overlapping child
if (do_range_check) {
int find_range_result = 0;
- struct cte *found_cte;
+ struct cte *found_cte = NULL;
err = mdb_find_range(get_type_root(src_cap->type), base, objsize * count,
MDB_RANGE_FOUND_SURROUNDING, &found_cte, &find_range_result);
// this should never return an error unless we mess up the
// return REVOKE_FIRST, if we found a cap inside the region
// (FOUND_INNER == 2) or overlapping the region (FOUND_PARTIAL == 3)
if (find_range_result >= MDB_RANGE_FOUND_INNER) {
+ printf("found existing region inside, or overlapping requested region\n");
return SYS_ERR_REVOKE_FIRST;
}
// return REVOKE_FIRST, if we found a cap that isn't our source
else if (find_range_result == MDB_RANGE_FOUND_SURROUNDING &&
!is_copy(&found_cte->cap, src_cap))
{
+ printf("found non source region fully covering requested region");
return SYS_ERR_REVOKE_FIRST;
}
}
assert(regions != NULL);
regions[*regions_index].mr_base = base_addr;
regions[*regions_index].mr_type = type;
- regions[*regions_index].mr_bits = size;
+ regions[*regions_index].mr_bytes = size;
regions[*regions_index].mr_consumed = false;
regions[*regions_index].mrmod_size = 0;
regions[*regions_index].mrmod_data = 0;
errval_t range_slot_alloc(struct range_slot_allocator *alloc, cslot_t nslots,
struct capref *ret)
{
- thread_mutex_lock(&alloc->mutex);
+ assert(alloc);
+ if (!alloc->is_head) {
+ return LIB_ERR_RANGE_ALLOC_NOT_HEAD;
+ }
+ struct range_slot_allocator *head = alloc;
+ thread_mutex_lock(&head->mutex);
- struct cnode_meta *prev = NULL;
- struct cnode_meta *walk = alloc->meta;
+ struct cnode_meta *prev = NULL, *walk = NULL;
- /* Look for large enough space */
- while(walk != NULL) {
- if (walk->space >= nslots) {
+ /* Look for large enough space in whole chain */
+ while (alloc) {
+ walk = alloc->meta;
+ prev = NULL;
+ while(walk != NULL) {
+ if (walk->space >= nslots) {
+ break;
+ }
+ prev = walk;
+ walk = walk->next;
+ }
+
+ /* Space not found */
+ if (walk != NULL) {
break;
}
- prev = walk;
- walk = walk->next;
+
+ alloc = alloc->next;
}
- /* Space not found */
- if (walk == NULL) {
- thread_mutex_unlock(&alloc->mutex);
+ if (alloc == NULL) {
+ thread_mutex_unlock(&head->mutex);
return LIB_ERR_SLOT_ALLOC_NO_SPACE;
}
slab_free(&alloc->slab, walk);
}
- thread_mutex_unlock(&alloc->mutex);
+ thread_mutex_unlock(&head->mutex);
return SYS_ERR_OK;
}
errval_t range_slot_free(struct range_slot_allocator *alloc, struct capref cap,
cslot_t nslots)
{
+ if (!alloc->is_head) {
+ return LIB_ERR_RANGE_ALLOC_NOT_HEAD;
+ }
+
errval_t err;
- thread_mutex_lock(&alloc->mutex);
+ struct range_slot_allocator *head = alloc;
+ thread_mutex_lock(&head->mutex);
+
+ // find right allocator
+ while (!cnodecmp(cap.cnode, alloc->cnode)) {
+ alloc = alloc->next;
+ }
+ if (!alloc) {
+ thread_mutex_unlock(&head->mutex);
+ return LIB_ERR_SLOT_ALLOC_WRONG_CNODE;
+ }
+ // alloc now the right chain element
struct cnode_meta *prev = NULL;
struct cnode_meta *walk = alloc->meta;
while(walk != NULL) {
if ((cap.slot > walk->slot) && (walk->next == NULL)) {
err = insert_after(alloc, nslots, cap.slot, walk);
- thread_mutex_unlock(&alloc->mutex);
+ thread_mutex_unlock(&head->mutex);
return err;
}
if (cap.slot < walk->slot) {
err = insert_before(alloc, nslots, cap.slot, prev, walk);
- thread_mutex_unlock(&alloc->mutex);
+ thread_mutex_unlock(&head->mutex);
return err;
}
prev = walk;
assert(alloc->meta == NULL);
alloc->meta = slab_alloc(&alloc->slab);
if (alloc->meta == NULL) {
- thread_mutex_unlock(&alloc->mutex);
+ thread_mutex_unlock(&head->mutex);
return LIB_ERR_SLAB_ALLOC_FAIL;
}
alloc->meta->slot = cap.slot;
alloc->meta->space = nslots;
alloc->meta->next = NULL;
- thread_mutex_unlock(&alloc->mutex);
+ thread_mutex_unlock(&head->mutex);
return SYS_ERR_OK;
}
ret->meta->space = nslots;
ret->meta->next = NULL;
+ // setting is_head true here, internal code can reset by hand
+ ret->is_head = true;
+
+ return SYS_ERR_OK;
+}
+
+size_t range_slot_alloc_freecount(struct range_slot_allocator *alloc)
+{
+ size_t count = 0;
+ if (!alloc->is_head) {
+ return LIB_ERR_RANGE_ALLOC_NOT_HEAD;
+ }
+ struct range_slot_allocator *head = alloc;
+ thread_mutex_lock(&head->mutex);
+
+ struct range_slot_allocator *alloc_w = alloc;
+
+ while (alloc_w) {
+ struct cnode_meta *walk = alloc->meta;
+ while(walk != NULL) {
+ count += walk->space;
+ walk = walk->next;
+ }
+ alloc_w = alloc_w->next;
+ }
+
+ thread_mutex_unlock(&head->mutex);
+ return count;
+}
+
+errval_t range_slot_alloc_refill(struct range_slot_allocator *alloc, cslot_t slots)
+{
+ if (!alloc->is_head) {
+ return LIB_ERR_RANGE_ALLOC_NOT_HEAD;
+ }
+
+ struct range_slot_allocator *head = alloc;
+ thread_mutex_lock(&head->mutex);
+ // find last allocator in chain
+ while(alloc->next) {
+ alloc = alloc->next;
+ }
+ // allocate new instance
+ alloc->next = malloc(sizeof(struct range_slot_allocator));
+ assert(alloc->next);
+
+ // initialize new instance
+ struct range_slot_allocator *n = alloc->next;
+ n->next = NULL;
+ cslot_t retslots;
+ errval_t err = range_slot_alloc_init(n, slots, &retslots);
+ assert(err_is_ok(err));
+ assert(retslots > slots);
+
+ n->is_head = false;
+
+ thread_mutex_unlock(&head->mutex);
return SYS_ERR_OK;
}
#define UNBITS_GENPA(bits) (((genpaddr_t)1) << (bits))
#define FLAGBITS ((uint8_t)-1)
+/// calculate largest power-of-two region that fits into region of size n
+/// starting at base_addr.
+static inline int bitaddralign(size_t n, lpaddr_t base_addr)
+{
+ int exponent = sizeof(size_t) * NBBY - 1;
+
+ if(n == 0) {
+ return 0;
+ }
+
+ while ((exponent > 0) && ((base_addr % (1UL << exponent)) != 0)){
+ exponent--;
+ }
+ return((1UL << exponent) > n ? log2floor(n) : exponent);
+}
+
/// Allocate a new node of given type/size. Does NOT initialise children pointers.
static struct mmnode *new_node(struct mm *mm, enum nodetype type,
uint8_t childbits)
}
// retype node into 2^(maxchildbits) smaller nodes
+ DEBUG("retype: current size: %zu, child size: %zu, count: %u\n",
+ 1UL << *nodesizebits, 1UL << (*nodesizebits - childbits), UNBITS_CA(childbits));
err = cap_retype2(cap, node->cap, 0, mm->objtype,
1UL << (*nodesizebits - childbits),
UNBITS_CA(childbits));
/* init fields */
assert(mm != NULL);
mm->objtype = objtype;
- assert((base & (UNBITS_GENPA(sizebits) - 1)) == 0);
+ // We do not care about alignment anymore?!
+ //assert((base & (UNBITS_GENPA(sizebits) - 1)) == 0);
mm->base = base;
mm->sizebits = sizebits;
assert(maxchildbits > 0 && maxchildbits != FLAGBITS);
}
/* check that base is properly aligned to size */
- assert((base & (UNBITS_GENPA(sizebits) - 1)) == 0);
+ // We do not care about alignment anymore?!
+ //assert((base & (UNBITS_GENPA(sizebits) - 1)) == 0);
/* construct root node if we need one */
if (mm->root == NULL) {
}
/**
+ * \brief Add a new region to the memory manager. The region does not need to
+ * be power-of-two sized or aligned.
+ *
+ * It is an error if any part of the region has already been added, or the
+ * region doesn't fit within the base and size specified for the allocator.
+ *
+ * \param mm Memory manager instance
+ * \param cap Capability to newly-added region
+ * \param size Size of region
+ * \param base Physical base address of region
+ */
+errval_t mm_add_multi(struct mm *mm, struct capref cap, gensize_t size, genpaddr_t base)
+{
+ DEBUG("%s: mm=%p, base=%#"PRIxGENPADDR", bytes=%zu\n", __FUNCTION__, mm, base, size);
+ gensize_t offset = 0;
+ errval_t err;
+ size_t rcount = 0;
+ // if we got aligned block; skip retype
+ if (1UL << bitaddralign(size, base) == size) {
+ DEBUG("%s: aligned region: adding original cap\n", __FUNCTION__);
+ return mm_add(mm, cap, log2ceil(size), base);
+ }
+
+ while (size > 0) {
+ uint8_t blockbits = bitaddralign(size, base);
+ gensize_t blockbytes = 1UL << blockbits;
+
+ /* get dest slot for retype */
+ struct capref temp;
+ err = mm->slot_alloc(mm->slot_alloc_inst, 1, &temp);
+ if (err_is_fail(err)) {
+ DEBUG_ERR(err, "Allocating slot");
+ return err_push(err, MM_ERR_SLOT_NOSLOTS);
+ }
+
+ err = cap_retype2(temp, cap, offset, mm->objtype, 1UL << blockbits, 1);
+ if (err_is_fail(err)) {
+ DEBUG_ERR(err, "Retyping region");
+ return err_push(err, MM_ERR_MM_ADD_MULTI);
+ }
+
+ err = mm_add(mm, temp, blockbits, base);
+ if (err_is_fail(err)) {
+ DEBUG_ERR(err, "Adding region to allocator");
+ return err_push(err, MM_ERR_MM_ADD_MULTI);
+ }
+ DEBUG("Added block %#"PRIxGENPADDR"--%#"PRIxGENPADDR", %u bits\n",
+ base, base+blockbytes, blockbits);
+
+ // advance block pointers
+ base += blockbytes;
+ offset += blockbytes;
+ size -= blockbytes;
+ rcount ++;
+ }
+
+ DEBUG("%s: done. cap was split into %zu blocks\n", __FUNCTION__, rcount);
+
+ return SYS_ERR_OK;
+}
+
+/**
* \brief Allocate an arbitrary memory region of a given size
*
* \param mm Memory manager instance
}
/* check that base is properly aligned to size */
- assert((base & (UNBITS_GENPA(sizebits) - 1)) == 0);
+ // We do not care about alignment anymore?!
+ //assert((base & (UNBITS_GENPA(sizebits) - 1)) == 0);
if (mm->root == NULL) {
return MM_ERR_NOT_FOUND; // nothing added
vregion_destroy(origbios_vregion);
vregion_destroy(newbios_vregion);
- // TODO: Implement mm_free()
+ err = mm_free(&pci_mm_physaddr, bioscap, 0, BIOS_BITS);
+ assert(err_is_ok(err));
return err;
}
else {
skb_add_fact("memory_region(16'%" PRIxGENPADDR ",%u,%zu,%u,%tu).",
mrp->mr_base,
- mrp->mr_bits,
- ((size_t)1) << mrp->mr_bits,
+ 0,
+ mrp->mr_bytes,
mrp->mr_type,
mrp->mrmod_data);
}
mrp->mr_type == RegionType_PlatformData) {
ACPI_DEBUG("Region %d: %"PRIxGENPADDR" - %"PRIxGENPADDR" %s\n",
i, mrp->mr_base,
- mrp->mr_base + (((size_t)1)<<mrp->mr_bits),
+ mrp->mr_base + mrp->mr_bytes,
mrp->mr_type == RegionType_PhyAddr ?
"physical address" : "platform data");
- err = cap_retype2(devframe, phys_cap, 0, ObjType_DevFrame, 1UL << mrp->mr_bits, 1);
+ err = cap_retype2(devframe, phys_cap, 0, ObjType_DevFrame, mrp->mr_bytes, 1);
if (err_no(err) == SYS_ERR_REVOKE_FIRST) {
printf("cannot retype region %d: need to revoke first; ignoring it\n", i);
} else {
assert(err_is_ok(err));
- err = mm_add(&pci_mm_physaddr, devframe,
- mrp->mr_bits, mrp->mr_base);
+ err = mm_add_multi(&pci_mm_physaddr, devframe, mrp->mr_bytes,
+ mrp->mr_base);
if (err_is_fail(err)) {
USER_PANIC_ERR(err, "adding region %d FAILED\n", i);
}
/* parameters for local memory allocator used until we spawn mem_serv */
#define MM_REQUIREDBITS 24 ///< Required size of memory to boot (16MB)
+#define MM_REQUIREDBYTES (1UL << MM_REQUIREDBITS)
#define MM_MAXSIZEBITS (MM_REQUIREDBITS + 3) ///< Max size of memory in allocator
#define MM_MINSIZEBITS BASE_PAGE_BITS ///< Min size of allocation
#define MM_MAXCHILDBITS 1 ///< Max branching factor of BTree nodes
{
errval_t err;
+ /* init slot allocator */
+ static struct slot_alloc_basecn init_slot_alloc;
+ err = slot_alloc_basecn_init(&init_slot_alloc);
+ if (err_is_fail(err)) {
+ return err_push(err, MM_ERR_SLOT_ALLOC_INIT);
+ }
+
/* walk bootinfo looking for suitable RAM cap to use
* we pick the first cap equal to MM_REQUIREDBITS,
* or else the next closest less than MM_MAXSIZEBITS */
- int mem_region = -1, mem_slot = 0;
+ int mem_slot = 0;
struct capref mem_cap = {
.cnode = cnode_super,
.slot = 0,
};
+ /* get destination slot for retype */
+ genpaddr_t region_base = 0;
+ struct capref region_for_init;
+ err = slot_alloc_basecn(&init_slot_alloc, 1, ®ion_for_init);
+ if (err_is_fail(err)) {
+ return err_push(err, MM_ERR_SLOT_NOSLOTS);
+ }
+
assert(bi != NULL);
for (int i = 0; i < bi->regions_length; i++) {
assert(!bi->regions[i].mr_consumed);
if (bi->regions[i].mr_type == RegionType_Empty) {
- if (bi->regions[i].mr_bits >= MM_REQUIREDBITS &&
- bi->regions[i].mr_bits <= MM_MAXSIZEBITS &&
- (mem_region == -1 || bi->regions[i].mr_bits < bi->regions[mem_region].mr_bits)) {
- mem_region = i;
+ if (bi->regions[i].mr_bytes >= MM_REQUIREDBYTES) {
mem_cap.slot = mem_slot;
- if (bi->regions[i].mr_bits == MM_REQUIREDBITS) {
+ if (bi->regions[i].mr_bytes == MM_REQUIREDBYTES) {
+ bi->regions[i].mr_consumed = true;
break;
}
+
+ /* found cap bigger than required; cut off end */
+ bi->regions[i].mr_bytes -= MM_REQUIREDBYTES;
+ // can use mr_bytes as offset here
+ err = cap_retype2(region_for_init, mem_cap,
+ bi->regions[i].mr_bytes, ObjType_RAM,
+ MM_REQUIREDBYTES, 1);
+ if (err_is_fail(err)) {
+ return err_push(err, MM_ERR_CHUNK_NODE);
+ }
+ mem_cap = region_for_init;
+ region_base = bi->regions[i].mr_base + bi->regions[i].mr_bytes;
+ break;
}
mem_slot++;
}
}
- if (mem_region < 0) {
- printf("Error: no RAM capability found in the size range "
- "2^%d to 2^%d bytes\n", MM_REQUIREDBITS, MM_MAXSIZEBITS);
- return INIT_ERR_NO_MATCHING_RAM_CAP;
- }
- bi->regions[mem_region].mr_consumed = true;
- /* init slot allocator */
- static struct slot_alloc_basecn init_slot_alloc;
- err = slot_alloc_basecn_init(&init_slot_alloc);
- if (err_is_fail(err)) {
- return err_push(err, MM_ERR_SLOT_ALLOC_INIT);
+ if (region_base == 0) {
+ printf("Error: no RAM capability >= %zu MB found", MM_REQUIREDBYTES / 1024 / 1024);
}
/* init MM allocator */
- assert(bi->regions[mem_region].mr_type != RegionType_Module);
- err = mm_init(&mymm, ObjType_RAM, bi->regions[mem_region].mr_base,
- bi->regions[mem_region].mr_bits, MM_MAXCHILDBITS, NULL,
+ err = mm_init(&mymm, ObjType_RAM, region_base,
+ MM_REQUIREDBITS, MM_MAXCHILDBITS, NULL,
slot_alloc_basecn, &init_slot_alloc, true);
if (err_is_fail(err)) {
return err_push(err, MM_ERR_MM_INIT);
slab_grow(&mymm.slabs, nodebuf, sizeof(nodebuf));
/* add single RAM cap to allocator */
- err = mm_add(&mymm, mem_cap, bi->regions[mem_region].mr_bits,
- bi->regions[mem_region].mr_base);
+ err = mm_add(&mymm, mem_cap, MM_REQUIREDBITS, region_base);
if (err_is_fail(err)) {
return err_push(err, MM_ERR_MM_ADD);
}
if (bi->regions[i].mr_type == RegionType_Empty) {
dump_ram_region(i, bi->regions + i);
- mem_total += ((size_t)1) << bi->regions[i].mr_bits;
+ mem_total += bi->regions[i].mr_bytes;
if (bi->regions[i].mr_consumed) {
// region consumed by init, skipped
continue;
}
- err = mm_add(&mm_ram, mem_cap, bi->regions[i].mr_bits,
- bi->regions[i].mr_base);
+ err = mm_add_multi(&mm_ram, mem_cap, bi->regions[i].mr_bytes,
+ bi->regions[i].mr_base);
if (err_is_ok(err)) {
- mem_avail += ((size_t)1) << bi->regions[i].mr_bits;
+ mem_avail += bi->regions[i].mr_bytes;
} else {
- DEBUG_ERR(err, "Warning: adding RAM region %d (%p/%d) FAILED",
- i, bi->regions[i].mr_base, bi->regions[i].mr_bits);
+ DEBUG_ERR(err, "Warning: adding RAM region %d (%p/%zu) FAILED",
+ i, bi->regions[i].mr_base, bi->regions[i].mr_bytes);
}
/* try to refill slot allocator (may fail if the mem allocator is empty) */
err = slot_prealloc_refill(mm_ram.slot_alloc_inst);
if (err_is_fail(err) && err_no(err) != MM_ERR_SLOT_MM_ALLOC) {
DEBUG_ERR(err, "in slot_prealloc_refill() while initialising"
- " memory allocator");
+ " memory allocator");
abort();
}
/* refill slab allocator if needed and possible */
if (slab_freecount(&mm_ram.slabs) <= MINSPARENODES
- && mem_avail > (1UL << (CNODE_BITS + OBJBITS_CTE)) * 2
- + 10 * BASE_PAGE_SIZE) {
+ && mem_avail > (1UL << (CNODE_BITS + OBJBITS_CTE)) * 2
+ + 10 * BASE_PAGE_SIZE) {
slab_default_refill(&mm_ram.slabs); // may fail
}
+
mem_cap.slot++;
}
}
return 1;
}
// retype to selected type
- err = cap_retype2(caps[i], mem, 0, types[i], BASE_PAGE_BYTES, 1);
+ err = cap_retype2(caps[i], mem, 0, types[i], BASE_PAGE_SIZE, 1);
if (err_is_fail(err)) {
debug_printf("cap_retype: %s (%ld)\n", err_getstring(err), err);
return 1;
printf("slot_alloc: %s (%"PRIuERRV")\n", err_getstring(err), err);
return 1;
}
- err = cap_retype2(frame, mem, 0, ObjType_Frame, BASE_PAGE_BYTES, 1);
+ err = cap_retype2(frame, mem, 0, ObjType_Frame, BASE_PAGE_SIZE, 1);
if (err_is_fail(err)) {
printf("cap_retype: %s (%"PRIuERRV")\n", err_getstring(err), err);
return 1;