size_t size;
size_t alignment;
size_t consumed;
+ struct slot_allocator *slot_alloc; ///< slot allocator
struct vregion vregion; ///< Needs just one vregion
struct memobj_anon memobj; ///< Needs just one memobj
lvaddr_t offset; ///< Offset of free space in anon
};
errval_t vspace_mmu_aware_init(struct vspace_mmu_aware *state, size_t size);
+void vspace_mmu_aware_set_slot_alloc(struct vspace_mmu_aware *state,
+ struct slot_allocator *slot_allocator);
errval_t vspace_mmu_aware_init_aligned(struct vspace_mmu_aware *state,
+ struct slot_allocator *slot_alloc,
size_t size, size_t alignment,
vregion_flags_t flags);
errval_t vspace_mmu_aware_reset(struct vspace_mmu_aware *state,
struct capref frame, size_t size);
-errval_t vspace_mmu_aware_map(struct vspace_mmu_aware *state,
- struct capref frame, size_t req_size,
+errval_t vspace_mmu_aware_map(struct vspace_mmu_aware *state, size_t req_size,
void **retbuf, size_t *retsize);
errval_t vspace_mmu_aware_unmap(struct vspace_mmu_aware *state,
lvaddr_t base, size_t bytes);
size_t mapped = 0;
size_t step = bytes;
while (mapped < bytes) {
- struct capref cap;
- err = slot_alloc(&cap);
- if (err_is_fail(err)) {
- USER_PANIC_ERR(err, "slot_alloc failed");
- }
-
void *mid_buf = NULL;
- err = vspace_mmu_aware_map(&state->mmu_state, cap, step,
- &mid_buf, &step);
+ err = vspace_mmu_aware_map(&state->mmu_state, step, &mid_buf, &step);
if (err_is_ok(err)) {
if (buf == NULL) {
buf = mid_buf;
for a very large frame, will try asking for smaller one.
*/
if (err_no(err) == LIB_ERR_FRAME_CREATE_MS_CONSTRAINTS) {
- err = slot_free(cap);
if (err_is_fail(err)) {
debug_err(__FILE__, __func__, __LINE__, err,
"slot_free failed");
#endif
morecore_flags |= (alignment == LARGE_PAGE_SIZE ? VREGION_FLAGS_LARGE : 0);
- err = vspace_mmu_aware_init_aligned(&state->mmu_state, HEAP_REGION,
- alignment, morecore_flags);
+ err = vspace_mmu_aware_init_aligned(&state->mmu_state, NULL, HEAP_REGION,
+ alignment, morecore_flags);
if (err_is_fail(err)) {
return err_push(err, LIB_ERR_VSPACE_MMU_AWARE_INIT);
}
if (!buf) { /* Grow slab */
// Allocate slot out of the list
mca->a.space--;
- struct capref frame;
- err = mca->head->a.a.alloc(&mca->head->a.a, &frame);
- if (err_is_fail(err)) {
- thread_mutex_unlock(&ca->mutex);
- return err_push(err, LIB_ERR_SLOT_ALLOC);
- }
+ // ensure we have the right allocator
+ vspace_mmu_aware_set_slot_alloc(&mca->mmu_state, &mca->head->a.a);
thread_mutex_unlock(&ca->mutex); // following functions may call
// slot_alloc
void *slab_buf;
size_t size;
- err = vspace_mmu_aware_map(&mca->mmu_state, frame,
- mca->slab.blocksize, &slab_buf, &size);
+ err = vspace_mmu_aware_map(&mca->mmu_state, mca->slab.blocksize,
+ &slab_buf, &size);
if (err_is_fail(err)) {
return err_push(err, LIB_ERR_VSPACE_MMU_AWARE_MAP);
}
{
assert(slabs == &thread_slabs);
- struct capref frame;
size_t size;
void *buf;
errval_t err;
- err = slot_alloc(&frame);
- if (err_is_fail(err)) {
- return err_push(err, LIB_ERR_SLOT_ALLOC);
- }
-
size_t blocksize = sizeof(struct thread) + tls_block_total_len;
- err = vspace_mmu_aware_map(&thread_slabs_vm, frame, blocksize, &buf, &size);
+ err = vspace_mmu_aware_map(&thread_slabs_vm, blocksize, &buf, &size);
if (err_is_fail(err)) {
return err_push(err, LIB_ERR_VSPACE_MMU_AWARE_MAP);
}
void *buf;
errval_t err;
- err = slot_alloc(&frame);
- if (err_is_fail(err)) {
- USER_PANIC_ERR(err, "in slot_alloc while prefilling thread slabs\n");
- }
-
size_t blocksize = sizeof(struct thread) + tls_block_total_len;
- err = vspace_mmu_aware_map(&thread_slabs_vm, frame, blocksize,
+ err = vspace_mmu_aware_map(&thread_slabs_vm, blocksize,
&buf, &size);
if (err_is_fail(err)) {
slot_free(frame);
/// Minimum free memory before we return it to memory server
#define MIN_MEM_FOR_FREE (1 * 1024 * 1024)
+void vspace_mmu_aware_set_slot_alloc(struct vspace_mmu_aware *state,
+ struct slot_allocator *slot_allocator)
+{
+ if (slot_allocator) {
+ state->slot_alloc = slot_allocator;
+ } else {
+ state->slot_alloc = get_default_slot_allocator();
+ }
+}
+
/**
* \brief Initialize vspace_mmu_aware struct
*
*/
errval_t vspace_mmu_aware_init(struct vspace_mmu_aware *state, size_t size)
{
- return vspace_mmu_aware_init_aligned(state, size, 0,
+ return vspace_mmu_aware_init_aligned(state, NULL, size, 0,
VREGION_FLAGS_READ_WRITE);
}
errval_t vspace_mmu_aware_init_aligned(struct vspace_mmu_aware *state,
- size_t size, size_t alignment, vregion_flags_t flags)
+ struct slot_allocator *slot_allocator,
+ size_t size, size_t alignment,
+ vregion_flags_t flags)
{
state->size = size;
state->consumed = 0;
state->alignment = alignment;
+ if (slot_allocator) {
+ state->slot_alloc = slot_allocator;
+ } else {
+ state->slot_alloc = get_default_slot_allocator();
+ }
+
errval_t err;
size = ROUND_UP(size, BASE_PAGE_SIZE);
* or region of memory). This is to facilitate retrying with different
* constraints.
*/
-errval_t vspace_mmu_aware_map(struct vspace_mmu_aware *state,
- struct capref frame, size_t req_size,
+errval_t vspace_mmu_aware_map(struct vspace_mmu_aware *state, size_t req_size,
void **retbuf, size_t *retsize)
{
errval_t err;
+ struct capref frame;
+
// Calculate how much still to map in
size_t origsize = req_size;
assert(state->mapoffset >= state->offset);
}
// Create frame of appropriate size
allocate:
+ err = state->slot_alloc->alloc(state->slot_alloc, &frame);
+ if (err_is_fail(err)) {
+ return err_push(err, LIB_ERR_SLOT_ALLOC_NO_SPACE);
+ }
+
err = frame_create(frame, alloc_size, &ret_size);
if (err_is_fail(err)) {
if (err_no(err) == LIB_ERR_RAM_ALLOC_MS_CONSTRAINTS) {
debug_err(__FILE__, __func__, __LINE__, err,
"cap_delete failed");
}
+ state->slot_alloc->free(state->slot_alloc, frame);
return LIB_ERR_VSPACE_MMU_AWARE_NO_SPACE;
}
struct vregion *vregion;
struct capref oldframe;
void *vbuf;
+
// create copy of new region
err = slot_alloc(&oldframe);
if (err_is_fail(err)) {