7 * Copyright (c) 2007, 2008, 2009, 2010, 2011, ETH Zurich.
10 * This file is distributed under the terms in the attached LICENSE file.
11 * If you do not find this file, copies can be found by writing to:
12 * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
18 #include <barrelfish/barrelfish.h>
19 #include <barrelfish/dispatch.h>
21 #include <trace/trace.h>
22 #include <trace_definitions/trace_defs.h>
23 #include <barrelfish/morecore.h>
24 #include <barrelfish/monitor_client.h>
26 #include <if/mem_defs.h>
27 #include <if/monitor_defs.h>
29 size_t mem_total = 0, mem_avail = 0;
31 /* parameters for size of supported RAM and thus required storage */
33 // XXX: Even though we could manage an arbitrary amount of RAM on any
34 // architecture, we use paddr_t as the type to represent region
35 // limits, which limits us its size.
36 #if defined(__x86_64__)
37 # define MAXSIZEBITS 40 ///< Max size of memory in allocator
38 #elif defined(__i386__)
39 # define MAXSIZEBITS 32
40 #elif defined(__arm__)
41 /* XXX This is better if < 32! - but there were no compile time warnings! */
42 # define MAXSIZEBITS 31
44 # error Unknown architecture
47 #define MINSIZEBITS OBJBITS_DISPATCHER ///< Min size of each allocation
48 #define MAXCHILDBITS 4 ///< Max branching of BTree nodes
50 /// Maximum depth of the BTree, assuming only branching by two at each level
51 #define MAXDEPTH (MAXSIZEBITS - MINSIZEBITS + 1)
52 /// Maximum number of BTree nodes
53 #define NNODES ((1UL << MAXDEPTH) - 1)
55 /* Parameters for per-core memserv */
56 #define PERCORE_BITS 24
57 #define PERCORE_MEM (1UL<<PERCORE_BITS) ///< How much memory per-core
59 static struct multi_slot_allocator msa;
60 static struct bootinfo *bi;
63 * \brief Size of CNodes to be created by slot allocator.
66 * #CNODE_BITS >= MAXCHILDBITS (cnode enough for max branching factor)
67 * (1UL << #CNODE_BITS) ** 2 >= #NNODES (total number of slots is enough)
70 #define NCNODES (1UL << CNODE_BITS) ///< Maximum number of CNodes
72 /// Watermark at which we must refill the slab allocator used for nodes
73 #define MINSPARENODES (MAXDEPTH * 8) // XXX: FIXME: experimentally determined!
75 /// MM allocator instance data
76 static struct mm mm_ram;
78 /// Slot allocator for MM
79 static struct slot_prealloc ram_slot_alloc;
81 static errval_t mymm_alloc(struct capref *ret, uint8_t bits, genpaddr_t minbase,
86 assert(bits >= MINSIZEBITS);
89 err = mm_alloc(&mm_ram, bits, ret, NULL);
91 err = mm_alloc_range(&mm_ram, bits, minbase, maxlimit, ret, NULL);
97 static errval_t mymm_free(struct capref ramcap, genpaddr_t base, uint8_t bits)
100 genpaddr_t mem_to_add;
102 mem_to_add = (genpaddr_t)1 << bits;
104 ret = mm_free(&mm_ram, ramcap, base, bits);
105 if (err_is_fail(ret)) {
106 if (err_no(ret) == MM_ERR_NOT_FOUND) {
107 // memory wasn't there initially, add it
108 ret = mm_add(&mm_ram, ramcap, bits, base);
109 if (err_is_fail(ret)) {
110 /* DEBUG_ERR(ret, "failed to add RAM to allocator"); */
113 mem_total += mem_to_add;
115 /* DEBUG_ERR(ret, "failed to free RAM in allocator"); */
120 mem_avail += mem_to_add;
126 /// state for a pending reply
127 // because we have only one message that we send to a client, and there can only
128 // be one outstanding per binding (because this is an RPC interface) this is
130 struct pending_reply {
131 struct mem_binding *b;
137 static void retry_free_reply(void *arg)
139 struct pending_reply *r = arg;
141 struct mem_binding *b = r->b;
144 err = b->tx_vtbl.free_monitor_response(b, NOP_CONT, r->err);
145 if (err_is_ok(err)) {
148 } else if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
149 err = b->register_send(b, get_default_waitset(),
150 MKCONT(retry_free_reply,r));
153 if (err_is_fail(err)) {
154 DEBUG_ERR(err, "failed to reply to free request");
159 static void allocate_response_done(void *arg)
161 struct capref *cap = arg;
163 if(!capref_is_null(*cap)) {
164 errval_t err = cap_delete(*cap);
165 if(err_is_fail(err)) {
166 DEBUG_ERR(err, "cap_delete after send. This memory will leak.");
173 static void retry_reply(void *arg)
175 struct pending_reply *r = arg;
177 struct mem_binding *b = r->b;
180 err = b->tx_vtbl.allocate_response(b, MKCONT(allocate_response_done, r->cap),
182 if (err_is_ok(err)) {
185 } else if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
186 err = b->register_send(b, get_default_waitset(), MKCONT(retry_reply,r));
187 assert(err_is_ok(err));
189 DEBUG_ERR(err, "failed to reply to memory request");
190 allocate_response_done(r->cap);
196 static void mem_free_handler(struct mem_binding *b,
197 struct capref ramcap, genpaddr_t base,
203 ret = mymm_free(ramcap, base, bits);
205 err = b->tx_vtbl.free_monitor_response(b, NOP_CONT, ret);
206 if (err_is_fail(err)) {
207 if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
208 struct pending_reply *r = malloc(sizeof(struct pending_reply));
212 err = b->register_send(b, get_default_waitset(),
213 MKCONT(retry_free_reply,r));
214 assert(err_is_ok(err));
216 DEBUG_ERR(err, "failed to reply to free request");
222 static void mem_available_handler(struct mem_binding *b)
226 err = b->tx_vtbl.available_response(b, NOP_CONT, mem_avail, mem_total);
227 if (err_is_fail(err)) {
228 // FIXME: handle FLOUNDER_ERR_TX_BUSY
229 DEBUG_ERR(err, "failed to reply to memory request");
234 // FIXME: error handling (not asserts) needed in this function
235 static void mem_allocate_handler(struct mem_binding *b, uint8_t bits,
236 genpaddr_t minbase, genpaddr_t maxlimit)
238 struct capref *cap = malloc(sizeof(struct capref));
241 trace_event(TRACE_SUBSYS_MEMSERV, TRACE_EVENT_MEMSERV_ALLOC, bits);
243 /* refill slot allocator if needed */
244 err = slot_prealloc_refill(mm_ram.slot_alloc_inst);
245 assert(err_is_ok(err));
247 /* refill slab allocator if needed */
248 while (slab_freecount(&mm_ram.slabs) <= MINSPARENODES) {
250 err = msa.a.alloc(&msa.a, &frame);
251 assert(err_is_ok(err));
252 err = frame_create(frame, BASE_PAGE_SIZE * 8, NULL);
253 assert(err_is_ok(err));
255 err = vspace_map_one_frame(&buf, BASE_PAGE_SIZE * 8, frame, NULL, NULL);
256 if (err_is_fail(err)) {
257 DEBUG_ERR(err, "vspace_map_one_frame failed");
260 slab_grow(&mm_ram.slabs, buf, BASE_PAGE_SIZE * 8);
263 ret = mymm_alloc(cap, bits, minbase, maxlimit);
264 if (err_is_ok(ret)) {
265 mem_avail -= 1UL << bits;
267 // DEBUG_ERR(ret, "allocation of %d bits in % " PRIxGENPADDR "-%" PRIxGENPADDR " failed",
268 // bits, minbase, maxlimit);
273 err = b->tx_vtbl.allocate_response(b, MKCONT(allocate_response_done, cap),
275 if (err_is_fail(err)) {
276 if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
277 struct pending_reply *r = malloc(sizeof(struct pending_reply));
282 err = b->register_send(b, get_default_waitset(), MKCONT(retry_reply,r));
283 assert(err_is_ok(err));
285 DEBUG_ERR(err, "failed to reply to memory request");
286 allocate_response_done(cap);
291 static void dump_ram_region(int idx, struct mem_region* m)
294 uintptr_t start, limit;
296 start = (uintptr_t)m->mr_base;
297 limit = start + (1UL << m->mr_bits);
300 size_t quantity = 1UL << m->mr_bits;
302 if (m->mr_bits >= 30) {
306 else if (m->mr_bits >= 20) {
310 else if (m->mr_bits >= 10) {
315 printf("RAM region %d: 0x%" PRIxPTR
316 " - 0x%" PRIxPTR " (%zu %cB, %u bits)\n",
317 idx, start, limit, quantity, prefix, m->mr_bits);
321 static genpaddr_t find_smallest_address(void)
324 genpaddr_t smallest_addr = 0;
326 for (int i = 0; i < bi->regions_length; i++) {
327 if (bi->regions[i].mr_type != RegionType_Empty) {
331 if (bi->regions[i].mr_consumed) {
336 smallest_addr = bi->regions[i].mr_base;
341 if (smallest_addr > bi->regions[i].mr_base) {
342 smallest_addr = bi->regions[i].mr_base;
344 } // end for: for every record
345 return smallest_addr;
346 } // end function: find_smallest_address
348 static genpaddr_t guess_physical_addr_start(void)
350 genpaddr_t start_physical = find_smallest_address();
352 if (start_physical > 0x80000000) {
353 // This is most probably a pandaboard!
354 start_physical = 0x80000000;
356 // This is gem5 or some other architecture
362 return start_physical;
363 } // end function: guess_physical_addr_start
365 // FIXME: error handling (not asserts) needed in this function
366 //XXX: workaround for inline bug of arm-gcc 4.6.1 and lower
367 #if defined(__ARM_ARCH_7A__) && defined(__GNUC__) \
368 && __GNUC__ == 4 && __GNUC_MINOR__ <= 6 && __GNUC_PATCHLEVEL__ <= 1
369 static __attribute__((noinline)) errval_t
373 initialize_ram_alloc(void)
377 /* Initialize slot allocator by passing a cnode cap for it to start with */
378 struct capref cnode_cap;
379 err = slot_alloc(&cnode_cap);
380 assert(err_is_ok(err));
381 struct capref cnode_start_cap = { .slot = 0 };
384 err = ram_alloc_fixed(&ram, BASE_PAGE_BITS, 0, 0);
385 assert(err_is_ok(err));
386 err = cnode_create_from_mem(cnode_cap, ram, &cnode_start_cap.cnode,
388 assert(err_is_ok(err));
390 /* location where slot allocator will place its top-level cnode */
391 struct capref top_slot_cap = {
393 .slot = ROOTCN_SLOT_MODULECN, // XXX: we don't have the module CNode
396 /* init slot allocator */
397 err = slot_prealloc_init(&ram_slot_alloc, top_slot_cap, MAXCHILDBITS,
398 CNODE_BITS, cnode_start_cap,
399 1UL << DEFAULT_CNODE_BITS, &mm_ram);
400 assert(err_is_ok(err));
402 err = mm_init(&mm_ram, ObjType_RAM, guess_physical_addr_start(),
403 MAXSIZEBITS, MAXCHILDBITS, NULL,
404 slot_alloc_prealloc, &ram_slot_alloc, true);
405 assert(err_is_ok(err));
407 /* give MM allocator static storage to get it started */
408 static char nodebuf[SLAB_STATIC_SIZE(MINSPARENODES, MM_NODE_SIZE(MAXCHILDBITS))];
409 slab_grow(&mm_ram.slabs, nodebuf, sizeof(nodebuf));
411 /* walk bootinfo and add all unused RAM caps to allocator */
412 struct capref mem_cap = {
413 .cnode = cnode_super0,
417 for (int i = 0; i < bi->regions_length; i++) {
418 if (bi->regions[i].mr_type == RegionType_Empty) {
419 dump_ram_region(i, bi->regions + i);
422 * we may have more memory regions than we have space in a single
423 * CNode, thus we switch to the second.
426 if (mem_cap.slot >= (1UL << mem_cap.cnode.size_bits)) {
428 mem_cap.cnode = cnode_super1;
431 mem_total += ((size_t)1) << bi->regions[i].mr_bits;
433 if (bi->regions[i].mr_consumed) {
434 // region consumed by init, skipped
439 err = mm_add(&mm_ram, mem_cap, bi->regions[i].mr_bits,
440 bi->regions[i].mr_base);
441 if (err_is_ok(err)) {
442 mem_avail += ((size_t)1) << bi->regions[i].mr_bits;
444 DEBUG_ERR(err, "Warning: adding RAM region %d (%p/%d) FAILED",
445 i, bi->regions[i].mr_base, bi->regions[i].mr_bits);
448 /* try to refill slot allocator (may fail if the mem allocator is empty) */
449 err = slot_prealloc_refill(mm_ram.slot_alloc_inst);
450 if (err_is_fail(err) && err_no(err) != MM_ERR_SLOT_MM_ALLOC) {
451 DEBUG_ERR(err, "in slot_prealloc_refill() while initialising"
452 " memory allocator");
456 /* refill slab allocator if needed and possible */
457 if (slab_freecount(&mm_ram.slabs) <= MINSPARENODES
458 && mem_avail > (1UL << (CNODE_BITS + OBJBITS_CTE)) * 2
459 + 10 * BASE_PAGE_SIZE) {
460 slab_default_refill(&mm_ram.slabs); // may fail
466 err = slot_prealloc_refill(mm_ram.slot_alloc_inst);
467 if (err_is_fail(err)) {
468 printf("Fatal internal error in RAM allocator: failed to initialise "
470 DEBUG_ERR(err, "failed to init slot allocator");
474 printf("RAM allocator initialised, %zd MB (of %zd MB) available\n",
475 mem_avail / 1024 / 1024, mem_total / 1024 / 1024);
480 static void export_callback(void *st, errval_t err, iref_t iref)
482 assert(err_is_ok(err));
483 struct monitor_binding *mb = get_monitor_binding();
484 err = mb->tx_vtbl. set_mem_iref_request(mb, NOP_CONT, iref);
485 assert(err_is_ok(err));
488 static struct mem_rx_vtbl rx_vtbl = {
489 .allocate_call = mem_allocate_handler,
490 .available_call = mem_available_handler,
491 .free_monitor_call = mem_free_handler,
494 static errval_t connect_callback(void *st, struct mem_binding *b)
496 b->rx_vtbl = rx_vtbl;
497 // TODO: set error handler
501 int main(int argc, char ** argv)
504 struct waitset *ws = get_default_waitset();
507 fprintf(stderr, "Usage: %s <bootinfo_location>\n", argv[0]);
511 // First argument contains the bootinfo location
512 bi = (struct bootinfo*)strtol(argv[1], NULL, 10);
514 /* construct special-case LMP connection to monitor */
515 static struct monitor_lmp_binding mcb;
516 set_monitor_binding(&mcb.b);
518 err = monitor_client_lmp_accept(&mcb, ws, DEFAULT_LMP_BUF_WORDS);
519 if(err_is_fail(err)) {
520 USER_PANIC_ERR(err, "monitor_client_lmp_accept");
525 /* Send the cap for this endpoint to init, who will pass it to
527 err = lmp_ep_send0(cap_initep, 0, mcb.chan.local_cap);
528 if(err_is_fail(err)) {
529 USER_PANIC_ERR(err, "lmp_ep_send0");
532 // XXX: handle messages (ie. block) until the monitor binding is ready
533 while (capref_is_null(mcb.chan.remote_cap)) {
534 err = event_dispatch(ws);
535 if (err_is_fail(err)) {
536 DEBUG_ERR(err, "in event_dispatch while waiting for monitor");
541 /* Initialize our own memory allocator */
542 err = ram_alloc_set(mymm_alloc);
543 if(err_is_fail(err)) {
544 USER_PANIC_ERR(err, "ram_alloc_set");
547 err = initialize_ram_alloc();
548 if(err_is_fail(err)) {
549 USER_PANIC_ERR(err, "initialize_ram_alloc");
552 /* Initialize self slot_allocator */
553 err = multi_slot_alloc_init(&msa, DEFAULT_CNODE_SLOTS, NULL);
554 if(err_is_fail(err)) {
555 USER_PANIC_ERR(err, "multi_slot_alloc_init");
558 err = mem_export(NULL, export_callback, connect_callback, ws,
559 IDC_EXPORT_FLAGS_DEFAULT);
560 if(err_is_fail(err)) {
561 USER_PANIC_ERR(err, "mem_export");
564 /* initialise tracing */
565 #if defined(TRACING_EXISTS) && defined(CONFIG_TRACE)
566 err = trace_my_setup();
567 if (err_is_fail(err)) {
568 DEBUG_ERR(err, "initialising tracing");
569 // return EXIT_FAILURE;
574 // handle messages on this thread
576 err = event_dispatch(ws);
577 if (err_is_fail(err)) {
578 DEBUG_ERR(err, "in main event_dispatch loop");