struct memobj_pinned memobj;
struct vregion vregion;
lvaddr_t offset;
- struct slab_alloc vregion_list_slab;
- struct slab_alloc frame_list_slab;
+ struct slab_allocator vregion_list_slab;
+ struct slab_allocator frame_list_slab;
};
struct core_state_arch {
struct memobj_pinned memobj;
struct vregion vregion;
lvaddr_t offset;
- struct slab_alloc vregion_list_slab;
- struct slab_alloc frame_list_slab;
+ struct slab_allocator vregion_list_slab;
+ struct slab_allocator frame_list_slab;
};
struct core_state_arch {
struct memobj_anon {
struct memobj m;
struct vregion_list *vregion_list; ///< List of vregions mapped into the obj
- struct slab_alloc vregion_slab; ///< Slab to back the vregion list
+ struct slab_allocator vregion_slab; ///< Slab to back the vregion list
struct memobj_frame_list *frame_list; ///< List of frames tracked by the obj
- struct slab_alloc frame_slab; ///< Slab to back the frame list
+ struct slab_allocator frame_slab; ///< Slab to back the frame list
};
/**
__BEGIN_DECLS
// forward declarations
-struct slab_alloc;
+struct slab_allocator;
struct block_head;
-typedef errval_t (*slab_refill_func_t)(struct slab_alloc *slabs);
+typedef errval_t (*slab_refill_func_t)(struct slab_allocator *slabs);
struct slab_head {
struct slab_head *next; ///< Next slab in the allocator
struct slot_allocator;
-struct slab_alloc {
+struct slab_allocator {
struct slab_head *slabs; ///< Pointer to list of slabs
size_t blocksize; ///< Size of blocks managed by this allocator
slab_refill_func_t refill_func; ///< Refill function
};
-void slab_init(struct slab_alloc *slabs, size_t blocksize,
+void slab_init(struct slab_allocator *slabs, size_t blocksize,
slab_refill_func_t refill_func);
-void slab_grow(struct slab_alloc *slabs, void *buf, size_t buflen);
-void *slab_alloc(struct slab_alloc *slabs);
-void slab_free(struct slab_alloc *slabs, void *block);
-size_t slab_freecount(struct slab_alloc *slabs);
-errval_t slab_default_refill(struct slab_alloc *slabs);
+void slab_grow(struct slab_allocator *slabs, void *buf, size_t buflen);
+void *slab_alloc(struct slab_allocator *slabs);
+void slab_free(struct slab_allocator *slabs, void *block);
+size_t slab_freecount(struct slab_allocator *slabs);
+errval_t slab_default_refill(struct slab_allocator *slabs);
// size of block header
#define SLAB_BLOCK_HDRSIZE (sizeof(void *))
struct capref cap; ///< Cap of the cnode the allocator is tracking
struct cnoderef cnode; ///< Cnode the allocator is tracking
struct cnode_meta *head; ///< Linked list of free slots
- struct slab_alloc slab; ///< Slab for backing the list
+ struct slab_allocator slab; ///< Slab for backing the list
};
struct slot_allocator_list {
struct slot_allocator_list *head; ///< List of single slot allocators
struct slot_allocator_list *reserve; ///< One single allocator in reserve
- struct slab_alloc slab; ///< Slab backing the slot_allocator_list
+ struct slab_allocator slab; ///< Slab backing the slot_allocator_list
struct vspace_mmu_aware mmu_state;
};
struct capref cnode_cap; ///< capref for the cnode
struct cnoderef cnode; ///< cnoderef for the cnode to allocate from
struct cnode_meta *meta; ///< Linked list of meta data
- struct slab_alloc slab; ///< Slab allocation
+ struct slab_allocator slab; ///< Slab allocation
struct thread_mutex mutex; ///< Mutex for thread safety
};
* them to allocate its memory, we declare it in the public header.
*/
struct mm {
- struct slab_alloc slabs;///< Slab allocator used for allocating nodes
+ struct slab_allocator slabs;///< Slab allocator used for allocating nodes
slot_alloc_t slot_alloc;///< Slot allocator for allocating cspace
void *slot_alloc_inst; ///< Opaque instance pointer for slot allocator
struct mmnode *root; ///< Root node
struct vregion vregion; ///< Vregion used to reserve virtual address for metadata
genvaddr_t vregion_offset; ///< Offset into amount of reserved virtual address used
struct vnode root; ///< Root of the vnode tree
- struct slab_alloc slab; ///< Slab allocator for the vnode lists
+ struct slab_allocator slab; ///< Slab allocator for the vnode lists
uint8_t slab_buffer[512]; ///< Initial buffer to back the allocator
};
genvaddr_t vregion_offset; ///< Offset into amount of reserved virtual address used
struct vnode root; ///< Root of the vnode tree
errval_t (*refill_slabs)(struct pmap_x86 *); ///< Function to refill slabs
- struct slab_alloc slab; ///< Slab allocator for the vnode lists
+ struct slab_allocator slab; ///< Slab allocator for the vnode lists
genvaddr_t min_mappable_va; ///< Minimum mappable virtual address
genvaddr_t max_mappable_va; ///< Maximum mappable virtual address
uint8_t slab_buffer[512]; ///< Initial buffer to back the allocator
* \param blocksize Size of blocks to be allocated by this allocator
* \param refill_func Pointer to function to call when out of memory (or NULL)
*/
-void slab_init(struct slab_alloc *slabs, size_t blocksize,
+void slab_init(struct slab_allocator *slabs, size_t blocksize,
slab_refill_func_t refill_func)
{
slabs->slabs = NULL;
* \param buf Pointer to start of memory region
* \param buflen Size of memory region (in bytes)
*/
-void slab_grow(struct slab_alloc *slabs, void *buf, size_t buflen)
+void slab_grow(struct slab_allocator *slabs, void *buf, size_t buflen)
{
/* setup slab_head structure at top of buffer */
assert(buflen > sizeof(struct slab_head));
*
* \returns Pointer to block on success, NULL on error (out of memory)
*/
-void *slab_alloc(struct slab_alloc *slabs)
+void *slab_alloc(struct slab_allocator *slabs)
{
errval_t err;
/* find a slab with free blocks */
* \param slabs Pointer to slab allocator instance
* \param block Pointer to block previously returned by #slab_alloc
*/
-void slab_free(struct slab_alloc *slabs, void *block)
+void slab_free(struct slab_allocator *slabs, void *block)
{
if (block == NULL) {
return;
*
* \returns Free block count
*/
-size_t slab_freecount(struct slab_alloc *slabs)
+size_t slab_freecount(struct slab_allocator *slabs)
{
size_t ret = 0;
* \param slabs Pointer to slab allocator instance
* \param bytes (Minimum) amount of memory to map
*/
-static errval_t slab_refill_pages(struct slab_alloc *slabs, size_t bytes)
+static errval_t slab_refill_pages(struct slab_allocator *slabs, size_t bytes)
{
errval_t err;
struct capref frame_cap;
*
* \param slabs Pointer to slab allocator instance
*/
-errval_t slab_default_refill(struct slab_alloc *slabs)
+errval_t slab_default_refill(struct slab_allocator *slabs)
{
return slab_refill_pages(slabs, BASE_PAGE_SIZE);
}
static struct thread_mutex staticthread_lock = THREAD_MUTEX_INITIALIZER;
/// Storage metadata for thread structures (and TLS data)
-static struct slab_alloc thread_slabs;
+static struct slab_allocator thread_slabs;
static struct vspace_mmu_aware thread_slabs_vm;
// XXX: mutex and spinlock protecting thread slabs in spanned domains
}
/// Refill backing storage for thread region
-static errval_t refill_thread_slabs(struct slab_alloc *slabs)
+static errval_t refill_thread_slabs(struct slab_allocator *slabs)
{
assert(slabs == &thread_slabs);
struct pinned_state *state = get_current_pinned_state();
// Select slab type
- struct slab_alloc *slab;
+ struct slab_allocator *slab;
switch(slab_type) {
case VREGION_LIST:
slab = &state->vregion_list_slab;
}
#endif // 0
-errval_t slab_refill(struct slab_alloc *slabs)
+errval_t slab_refill(struct slab_allocator *slabs)
{
errval_t err;
/// Monitor's binding to this mem_serv
extern struct mem_binding *monitor_mem_binding;
-errval_t slab_refill(struct slab_alloc *slabs);
+errval_t slab_refill(struct slab_allocator *slabs);
errval_t percore_free_handler_common(struct capref ramcap, genpaddr_t base,
uint8_t bits);