7 * Copyright (c) 2007, 2008, 2009, 2010, 2011, ETH Zurich.
10 * This file is distributed under the terms in the attached LICENSE file.
11 * If you do not find this file, copies can be found by writing to:
12 * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
18 #include <barrelfish/barrelfish.h>
19 #include <barrelfish/dispatch.h>
21 #include <trace/trace.h>
22 #include <trace_definitions/trace_defs.h>
23 #include <barrelfish/morecore.h>
24 #include <barrelfish/monitor_client.h>
26 #include <if/mem_defs.h>
27 #include <if/monitor_defs.h>
29 size_t mem_total = 0, mem_avail = 0;
31 /* parameters for size of supported RAM and thus required storage */
32 // architecture, we use paddr_t as the type to represent region
33 // limits, which limits us its size.
34 #if defined(__x86_64__)
35 // x86_64 usually supports 48 bits of physical address space, maybe figure
36 // this out dynamically? -SG,2014-04-30
37 # define MAXSIZEBITS 48 ///< Max size of memory in allocator
38 #elif defined(__i386__)
39 # define MAXSIZEBITS 32
40 #elif defined(__arm__)
41 /* XXX This is better if < 32! - but there were no compile time warnings! */
42 # define MAXSIZEBITS 31
43 #elif defined(__aarch64__)
44 # define MAXSIZEBITS 31
46 # error Unknown architecture
49 #define MINSIZEBITS OBJBITS_DISPATCHER ///< Min size of each allocation
50 #define MAXCHILDBITS 4 ///< Max branching of BTree nodes
52 /// Maximum depth of the BTree, assuming only branching by two at each level
53 #define MAXDEPTH (MAXSIZEBITS - MINSIZEBITS + 1)
54 /// Maximum number of BTree nodes
55 #define NNODES ((1UL << MAXDEPTH) - 1)
57 /* Parameters for per-core memserv */
58 #define PERCORE_BITS 24
59 #define PERCORE_MEM (1UL<<PERCORE_BITS) ///< How much memory per-core
61 //static struct multi_slot_allocator msa;
62 static struct bootinfo *bi;
65 * \brief Size of CNodes to be created by slot allocator.
68 * #CNODE_BITS >= MAXCHILDBITS (cnode enough for max branching factor)
69 * (1UL << #CNODE_BITS) ** 2 >= #NNODES (total number of slots is enough)
72 #define NCNODES (1UL << CNODE_BITS) ///< Maximum number of CNodes
74 /// Watermark at which we must refill the slab allocator used for nodes
75 #define MINSPARENODES (MAXDEPTH * 8) // XXX: FIXME: experimentally determined!
77 /// MM allocator instance data
78 static struct mm mm_ram;
80 /// Slot allocator for MM
81 static struct slot_prealloc_2 ram_slot_alloc;
83 static errval_t mymm_alloc(struct capref *ret, uint8_t bits, genpaddr_t minbase,
88 assert(bits >= MINSIZEBITS);
91 err = mm_alloc(&mm_ram, bits, ret, NULL);
93 err = mm_alloc_range(&mm_ram, bits, minbase, maxlimit, ret, NULL);
99 static errval_t mymm_free(struct capref ramcap, genpaddr_t base, uint8_t bits)
102 genpaddr_t mem_to_add;
104 mem_to_add = (genpaddr_t)1 << bits;
106 ret = mm_free(&mm_ram, ramcap, base, bits);
107 if (err_is_fail(ret)) {
108 if (err_no(ret) == MM_ERR_NOT_FOUND) {
109 // memory wasn't there initially, add it
110 ret = mm_add(&mm_ram, ramcap, bits, base);
111 if (err_is_fail(ret)) {
112 /* DEBUG_ERR(ret, "failed to add RAM to allocator"); */
115 mem_total += mem_to_add;
117 /* DEBUG_ERR(ret, "failed to free RAM in allocator"); */
122 mem_avail += mem_to_add;
128 /// state for a pending reply
129 // because we have only one message that we send to a client, and there can only
130 // be one outstanding per binding (because this is an RPC interface) this is
132 struct pending_reply {
133 struct mem_binding *b;
139 static void retry_free_reply(void *arg)
141 struct pending_reply *r = arg;
143 struct mem_binding *b = r->b;
146 err = b->tx_vtbl.free_monitor_response(b, NOP_CONT, r->err);
147 if (err_is_ok(err)) {
150 } else if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
151 err = b->register_send(b, get_default_waitset(),
152 MKCONT(retry_free_reply,r));
155 if (err_is_fail(err)) {
156 DEBUG_ERR(err, "failed to reply to free request");
161 static void allocate_response_done(void *arg)
163 struct capref *cap = arg;
165 if(!capref_is_null(*cap)) {
166 errval_t err = cap_delete(*cap);
167 if(err_is_fail(err) && err_no(err) != SYS_ERR_CAP_NOT_FOUND) {
168 DEBUG_ERR(err, "cap_delete after send. This memory will leak.");
175 static void retry_reply(void *arg)
177 struct pending_reply *r = arg;
179 struct mem_binding *b = r->b;
182 err = b->tx_vtbl.allocate_response(b, MKCONT(allocate_response_done, r->cap),
184 if (err_is_ok(err)) {
187 } else if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
188 err = b->register_send(b, get_default_waitset(), MKCONT(retry_reply,r));
189 assert(err_is_ok(err));
191 DEBUG_ERR(err, "failed to reply to memory request");
192 allocate_response_done(r->cap);
198 static void mem_free_handler(struct mem_binding *b,
199 struct capref ramcap, genpaddr_t base,
205 ret = mymm_free(ramcap, base, bits);
207 err = b->tx_vtbl.free_monitor_response(b, NOP_CONT, ret);
208 if (err_is_fail(err)) {
209 if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
210 struct pending_reply *r = malloc(sizeof(struct pending_reply));
214 err = b->register_send(b, get_default_waitset(),
215 MKCONT(retry_free_reply,r));
216 assert(err_is_ok(err));
218 DEBUG_ERR(err, "failed to reply to free request");
224 static void mem_available_handler(struct mem_binding *b)
228 err = b->tx_vtbl.available_response(b, NOP_CONT, mem_avail, mem_total);
229 if (err_is_fail(err)) {
230 // FIXME: handle FLOUNDER_ERR_TX_BUSY
231 DEBUG_ERR(err, "failed to reply to memory request");
236 // FIXME: error handling (not asserts) needed in this function
237 static void mem_allocate_handler(struct mem_binding *b, uint8_t bits,
238 genpaddr_t minbase, genpaddr_t maxlimit)
240 struct capref *cap = malloc(sizeof(struct capref));
243 // TODO: do this properly and inform caller, -SG 2016-04-20
244 // XXX: Do we even want to have this restriction here? It's not necessary
245 // for types that are not mappable (e.g. Dispatcher)
246 //if (bits < BASE_PAGE_BITS) {
247 // bits = BASE_PAGE_BITS;
249 //if (bits < BASE_PAGE_BITS) {
250 // debug_printf("WARNING: ALLOCATING RAM CAP WITH %u BITS\n", bits);
253 trace_event(TRACE_SUBSYS_MEMSERV, TRACE_EVENT_MEMSERV_ALLOC, bits);
255 /* refill slot allocator if needed */
256 err = slot_prealloc_refill_2(mm_ram.slot_alloc_inst);
257 if (err_is_fail(err)) {
258 DEBUG_ERR(err, "slot_prealloc_refill in mem_allocate_handler");
260 assert(err_is_ok(err));
262 /* refill slab allocator if needed */
263 while (slab_freecount(&mm_ram.slabs) <= MINSPARENODES) {
265 err = slot_alloc(&frame);
266 assert(err_is_ok(err));
267 err = frame_create(frame, BASE_PAGE_SIZE * 8, NULL);
268 assert(err_is_ok(err));
270 err = vspace_map_one_frame(&buf, BASE_PAGE_SIZE * 8, frame, NULL, NULL);
271 if (err_is_fail(err)) {
272 DEBUG_ERR(err, "vspace_map_one_frame failed");
275 slab_grow(&mm_ram.slabs, buf, BASE_PAGE_SIZE * 8);
278 ret = mymm_alloc(cap, bits, minbase, maxlimit);
279 if (err_is_ok(ret)) {
280 mem_avail -= 1UL << bits;
282 // DEBUG_ERR(ret, "allocation of %d bits in % " PRIxGENPADDR "-%" PRIxGENPADDR " failed",
283 // bits, minbase, maxlimit);
288 err = b->tx_vtbl.allocate_response(b, MKCONT(allocate_response_done, cap),
290 if (err_is_fail(err)) {
291 if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
292 struct pending_reply *r = malloc(sizeof(struct pending_reply));
297 err = b->register_send(b, get_default_waitset(), MKCONT(retry_reply,r));
298 assert(err_is_ok(err));
300 DEBUG_ERR(err, "failed to reply to memory request");
301 allocate_response_done(cap);
306 static void dump_ram_region(int idx, struct mem_region* m)
309 uintptr_t start, limit;
311 start = (uintptr_t)m->mr_base;
312 limit = start + (1UL << m->mr_bits);
315 size_t quantity = 1UL << m->mr_bits;
317 if (m->mr_bits >= 30) {
321 else if (m->mr_bits >= 20) {
325 else if (m->mr_bits >= 10) {
330 printf("RAM region %d: 0x%" PRIxPTR
331 " - 0x%" PRIxPTR " (%zu %cB, %u bits)\n",
332 idx, start, limit, quantity, prefix, m->mr_bits);
336 static genpaddr_t find_smallest_address(void)
339 genpaddr_t smallest_addr = 0;
341 for (int i = 0; i < bi->regions_length; i++) {
342 if (bi->regions[i].mr_type != RegionType_Empty) {
346 if (bi->regions[i].mr_consumed) {
351 smallest_addr = bi->regions[i].mr_base;
356 if (smallest_addr > bi->regions[i].mr_base) {
357 smallest_addr = bi->regions[i].mr_base;
359 } // end for: for every record
360 return smallest_addr;
361 } // end function: find_smallest_address
363 static genpaddr_t guess_physical_addr_start(void)
365 genpaddr_t start_physical = find_smallest_address();
366 #if defined(__arm__) || defined(__aarch64__)
367 if (start_physical > 0x80000000) {
368 // This is most probably a pandaboard!
369 start_physical = 0x80000000;
371 // This is gem5 or some other architecture
377 return start_physical;
378 } // end function: guess_physical_addr_start
380 // FIXME: error handling (not asserts) needed in this function
381 //XXX: workaround for inline bug of arm-gcc 4.6.1 and lower
382 #if defined(__ARM_ARCH_7A__) && defined(__GNUC__) \
383 && __GNUC__ == 4 && __GNUC_MINOR__ <= 6 && __GNUC_PATCHLEVEL__ <= 1
384 static __attribute__((noinline)) errval_t
388 initialize_ram_alloc(void)
392 /* Initialize slot allocator by passing a L2 cnode cap for it to start with */
393 // Use ROOTCN_SLOT_SLOT_ALLOC0 as initial cnode for mm slot allocator
394 struct capref cnode_start_cap = {
396 .croot = CPTR_ROOTCN,
397 .cnode = ROOTCN_SLOT_ADDR(ROOTCN_SLOT_SLOT_ALLOC0),
398 .level = CNODE_TYPE_OTHER,
403 /* init slot allocator */
404 err = slot_prealloc_init_2(&ram_slot_alloc, MAXCHILDBITS,
405 cnode_start_cap, L2_CNODE_SLOTS,
407 assert(err_is_ok(err));
409 err = mm_init(&mm_ram, ObjType_RAM, guess_physical_addr_start(),
410 MAXSIZEBITS, MAXCHILDBITS, NULL,
411 slot_alloc_prealloc_2, NULL, &ram_slot_alloc, true);
412 assert(err_is_ok(err));
414 /* give MM allocator static storage to get it started */
415 static char nodebuf[SLAB_STATIC_SIZE(MINSPARENODES, MM_NODE_SIZE(MAXCHILDBITS))];
416 slab_grow(&mm_ram.slabs, nodebuf, sizeof(nodebuf));
418 /* walk bootinfo and add all unused RAM caps to allocator */
419 struct capref mem_cap = {
420 .cnode = cnode_super,
424 for (int i = 0; i < bi->regions_length; i++) {
425 if (bi->regions[i].mr_type == RegionType_Empty) {
426 dump_ram_region(i, bi->regions + i);
428 mem_total += bi->regions[i].mr_bytes;
430 if (bi->regions[i].mr_consumed) {
431 // region consumed by init, skipped
436 err = mm_add_multi(&mm_ram, mem_cap, bi->regions[i].mr_bytes,
437 bi->regions[i].mr_base);
438 if (err_is_ok(err)) {
439 mem_avail += bi->regions[i].mr_bytes;
441 DEBUG_ERR(err, "Warning: adding RAM region %d (%p/%zu) FAILED",
442 i, bi->regions[i].mr_base, bi->regions[i].mr_bytes);
445 /* try to refill slot allocator (may fail if the mem allocator is empty) */
446 err = slot_prealloc_refill_2(mm_ram.slot_alloc_inst);
447 if (err_is_fail(err) && err_no(err) != MM_ERR_SLOT_MM_ALLOC) {
448 DEBUG_ERR(err, "in slot_prealloc_refill_2() while initialising"
449 " memory allocator");
453 /* refill slab allocator if needed and possible */
454 if (slab_freecount(&mm_ram.slabs) <= MINSPARENODES
455 && mem_avail > (1UL << (CNODE_BITS + OBJBITS_CTE)) * 2
456 + 10 * BASE_PAGE_SIZE) {
457 slab_default_refill(&mm_ram.slabs); // may fail
464 err = slot_prealloc_refill_2(mm_ram.slot_alloc_inst);
465 if (err_is_fail(err)) {
466 printf("Fatal internal error in RAM allocator: failed to initialise "
468 DEBUG_ERR(err, "failed to init slot allocator");
472 printf("RAM allocator initialised, %zd MB (of %zd MB) available\n",
473 mem_avail / 1024 / 1024, mem_total / 1024 / 1024);
478 static void export_callback(void *st, errval_t err, iref_t iref)
480 assert(err_is_ok(err));
481 struct monitor_binding *mb = get_monitor_binding();
482 err = mb->tx_vtbl. set_mem_iref_request(mb, NOP_CONT, iref);
483 assert(err_is_ok(err));
486 static struct mem_rx_vtbl rx_vtbl = {
487 .allocate_call = mem_allocate_handler,
488 .available_call = mem_available_handler,
489 .free_monitor_call = mem_free_handler,
492 static bool do_rpc_init = false;
494 static errval_t connect_callback(void *st, struct mem_binding *b)
497 b->rx_vtbl = rx_vtbl;
498 // TODO: set error handler
502 int main(int argc, char ** argv)
505 struct waitset *ws = get_default_waitset();
508 fprintf(stderr, "Usage: %s <bootinfo_location>\n", argv[0]);
512 // First argument contains the bootinfo location
513 bi = (struct bootinfo*)strtol(argv[1], NULL, 10);
515 /* construct special-case LMP connection to monitor */
516 static struct monitor_lmp_binding mcb;
517 set_monitor_binding(&mcb.b);
519 err = monitor_client_lmp_accept(&mcb, ws, DEFAULT_LMP_BUF_WORDS);
520 if(err_is_fail(err)) {
521 USER_PANIC_ERR(err, "monitor_client_lmp_accept");
526 /* Send the cap for this endpoint to init, who will pass it to
528 err = lmp_ep_send0(cap_initep, 0, mcb.chan.local_cap);
529 if(err_is_fail(err)) {
530 USER_PANIC_ERR(err, "lmp_ep_send0");
533 // XXX: handle messages (ie. block) until the monitor binding is ready
534 while (capref_is_null(mcb.chan.remote_cap)) {
535 err = event_dispatch(ws);
536 if (err_is_fail(err)) {
537 DEBUG_ERR(err, "in event_dispatch while waiting for monitor");
542 /* Initialize our own memory allocator */
543 err = ram_alloc_set(mymm_alloc);
544 if(err_is_fail(err)) {
545 USER_PANIC_ERR(err, "ram_alloc_set");
548 err = initialize_ram_alloc();
549 if(err_is_fail(err)) {
550 USER_PANIC_ERR(err, "initialize_ram_alloc");
553 /* Initialize self slot_allocator */
555 err = two_level_slot_alloc_init(&msa, DEFAULT_CNODE_SLOTS, NULL);
556 if(err_is_fail(err)) {
557 USER_PANIC_ERR(err, "two_level_slot_alloc_init");
561 err = mem_export(NULL, export_callback, connect_callback, ws,
562 IDC_EXPORT_FLAGS_DEFAULT);
563 if(err_is_fail(err)) {
564 USER_PANIC_ERR(err, "mem_export");
567 /* initialise tracing */
568 #if defined(TRACING_EXISTS) && defined(CONFIG_TRACE)
569 err = trace_my_setup();
570 if (err_is_fail(err)) {
571 DEBUG_ERR(err, "initialising tracing");
572 // return EXIT_FAILURE;
577 // handle messages on this thread
579 err = event_dispatch(ws);
580 if (err_is_fail(err)) {
581 DEBUG_ERR(err, "in main event_dispatch loop");
586 static bool in_rpc_init = false;
587 if (do_rpc_init && !in_rpc_init && !get_monitor_blocking_rpc_client()) {
588 // XXX: this is an ugly hack try and get a monitor rpc client once
589 // the monitor is ready
592 /* Bind with monitor's blocking rpc channel */
593 err = monitor_client_blocking_rpc_init();
594 if (err_is_fail(err)) {
595 DEBUG_ERR(err, "monitor_client_blocking_rpc_init");
598 debug_printf("got monitor_blocking_rpc_client\n");