7 * Copyright (c) 2007, 2008, 2009, 2010, 2011, ETH Zurich.
10 * This file is distributed under the terms in the attached LICENSE file.
11 * If you do not find this file, copies can be found by writing to:
12 * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
18 #include <barrelfish/barrelfish.h>
19 #include <barrelfish/dispatch.h>
21 #include <trace/trace.h>
22 #include <trace_definitions/trace_defs.h>
23 #include <barrelfish/morecore.h>
24 #include <barrelfish/monitor_client.h>
26 #include <if/mem_defs.h>
27 #include <if/monitor_defs.h>
29 size_t mem_total = 0, mem_avail = 0;
31 /* parameters for size of supported RAM and thus required storage */
32 // architecture, we use paddr_t as the type to represent region
33 // limits, which limits us its size.
34 #if defined(__x86_64__)
35 // x86_64 usually supports 48 bits of physical address space, maybe figure
36 // this out dynamically? -SG,2014-04-30
37 # define MAXSIZEBITS 48 ///< Max size of memory in allocator
38 #elif defined(__i386__)
39 # define MAXSIZEBITS 32
40 #elif defined(__arm__)
41 /* XXX This is better if < 32! - but there were no compile time warnings! */
42 # define MAXSIZEBITS 31
43 #elif defined(__aarch64__)
44 # define MAXSIZEBITS 31
46 # error Unknown architecture
49 #define MINSIZEBITS OBJBITS_DISPATCHER ///< Min size of each allocation
50 #define MAXCHILDBITS 4 ///< Max branching of BTree nodes
52 /// Maximum depth of the BTree, assuming only branching by two at each level
53 #define MAXDEPTH (MAXSIZEBITS - MINSIZEBITS + 1)
54 /// Maximum number of BTree nodes
55 #define NNODES ((1UL << MAXDEPTH) - 1)
57 /* Parameters for per-core memserv */
58 #define PERCORE_BITS 24
59 #define PERCORE_MEM (1UL<<PERCORE_BITS) ///< How much memory per-core
61 //static struct multi_slot_allocator msa;
62 static struct bootinfo *bi;
65 * \brief Size of CNodes to be created by slot allocator.
68 * #CNODE_BITS >= MAXCHILDBITS (cnode enough for max branching factor)
69 * (1UL << #CNODE_BITS) ** 2 >= #NNODES (total number of slots is enough)
72 #define NCNODES (1UL << CNODE_BITS) ///< Maximum number of CNodes
74 /// Watermark at which we must refill the slab allocator used for nodes
75 #define MINSPARENODES (MAXDEPTH * 8) // XXX: FIXME: experimentally determined!
77 /// MM allocator instance data
78 static struct mm mm_ram;
80 /// Slot allocator for MM
81 static struct slot_prealloc_2 ram_slot_alloc;
83 static errval_t mymm_alloc(struct capref *ret, uint8_t bits, genpaddr_t minbase,
88 assert(bits >= MINSIZEBITS);
91 err = mm_alloc(&mm_ram, bits, ret, NULL);
93 err = mm_alloc_range(&mm_ram, bits, minbase, maxlimit, ret, NULL);
99 static errval_t mymm_free(struct capref ramcap, genpaddr_t base, uint8_t bits)
102 genpaddr_t mem_to_add;
104 mem_to_add = (genpaddr_t)1 << bits;
106 ret = mm_free(&mm_ram, ramcap, base, bits);
107 if (err_is_fail(ret)) {
108 if (err_no(ret) == MM_ERR_NOT_FOUND) {
109 // memory wasn't there initially, add it
110 ret = mm_add(&mm_ram, ramcap, bits, base);
111 if (err_is_fail(ret)) {
112 /* DEBUG_ERR(ret, "failed to add RAM to allocator"); */
115 mem_total += mem_to_add;
117 /* DEBUG_ERR(ret, "failed to free RAM in allocator"); */
122 mem_avail += mem_to_add;
128 /// state for a pending reply
129 // because we have only one message that we send to a client, and there can only
130 // be one outstanding per binding (because this is an RPC interface) this is
132 struct pending_reply {
133 struct mem_binding *b;
139 static void retry_free_reply(void *arg)
141 struct pending_reply *r = arg;
143 struct mem_binding *b = r->b;
146 err = b->tx_vtbl.free_monitor_response(b, NOP_CONT, r->err);
147 if (err_is_ok(err)) {
150 } else if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
151 err = b->register_send(b, get_default_waitset(),
152 MKCONT(retry_free_reply,r));
155 if (err_is_fail(err)) {
156 DEBUG_ERR(err, "failed to reply to free request");
161 static void allocate_response_done(void *arg)
163 struct capref *cap = arg;
165 if(!capref_is_null(*cap)) {
166 errval_t err = cap_delete(*cap);
167 if(err_is_fail(err) && err_no(err) != SYS_ERR_CAP_NOT_FOUND) {
168 DEBUG_ERR(err, "cap_delete after send. This memory will leak.");
175 static void retry_reply(void *arg)
177 struct pending_reply *r = arg;
179 struct mem_binding *b = r->b;
182 err = b->tx_vtbl.allocate_response(b, MKCONT(allocate_response_done, r->cap),
184 if (err_is_ok(err)) {
187 } else if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
188 err = b->register_send(b, get_default_waitset(), MKCONT(retry_reply,r));
189 assert(err_is_ok(err));
191 DEBUG_ERR(err, "failed to reply to memory request");
192 allocate_response_done(r->cap);
198 static void mem_free_handler(struct mem_binding *b,
199 struct capref ramcap, genpaddr_t base,
205 ret = mymm_free(ramcap, base, bits);
207 err = b->tx_vtbl.free_monitor_response(b, NOP_CONT, ret);
208 if (err_is_fail(err)) {
209 if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
210 struct pending_reply *r = malloc(sizeof(struct pending_reply));
214 err = b->register_send(b, get_default_waitset(),
215 MKCONT(retry_free_reply,r));
216 assert(err_is_ok(err));
218 DEBUG_ERR(err, "failed to reply to free request");
224 static void mem_available_handler(struct mem_binding *b)
228 err = b->tx_vtbl.available_response(b, NOP_CONT, mem_avail, mem_total);
229 if (err_is_fail(err)) {
230 // FIXME: handle FLOUNDER_ERR_TX_BUSY
231 DEBUG_ERR(err, "failed to reply to memory request");
236 // FIXME: error handling (not asserts) needed in this function
237 static void mem_allocate_handler(struct mem_binding *b, uint8_t bits,
238 genpaddr_t minbase, genpaddr_t maxlimit)
240 struct capref *cap = malloc(sizeof(struct capref));
243 // TODO: do this properly and inform caller, -SG 2016-04-20
244 // XXX: Do we even want to have this restriction here? It's not necessary
245 // for types that are not mappable (e.g. Dispatcher)
246 //if (bits < BASE_PAGE_BITS) {
247 // bits = BASE_PAGE_BITS;
249 //if (bits < BASE_PAGE_BITS) {
250 // debug_printf("WARNING: ALLOCATING RAM CAP WITH %u BITS\n", bits);
253 trace_event(TRACE_SUBSYS_MEMSERV, TRACE_EVENT_MEMSERV_ALLOC, bits);
255 /* refill slot allocator if needed */
256 err = slot_prealloc_refill_2(mm_ram.slot_alloc_inst);
257 assert(err_is_ok(err));
259 /* refill slab allocator if needed */
260 while (slab_freecount(&mm_ram.slabs) <= MINSPARENODES) {
262 err = slot_alloc(&frame);
263 assert(err_is_ok(err));
264 err = frame_create(frame, BASE_PAGE_SIZE * 8, NULL);
265 assert(err_is_ok(err));
267 err = vspace_map_one_frame(&buf, BASE_PAGE_SIZE * 8, frame, NULL, NULL);
268 if (err_is_fail(err)) {
269 DEBUG_ERR(err, "vspace_map_one_frame failed");
272 slab_grow(&mm_ram.slabs, buf, BASE_PAGE_SIZE * 8);
275 ret = mymm_alloc(cap, bits, minbase, maxlimit);
276 if (err_is_ok(ret)) {
277 mem_avail -= 1UL << bits;
279 // DEBUG_ERR(ret, "allocation of %d bits in % " PRIxGENPADDR "-%" PRIxGENPADDR " failed",
280 // bits, minbase, maxlimit);
285 err = b->tx_vtbl.allocate_response(b, MKCONT(allocate_response_done, cap),
287 if (err_is_fail(err)) {
288 if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
289 struct pending_reply *r = malloc(sizeof(struct pending_reply));
294 err = b->register_send(b, get_default_waitset(), MKCONT(retry_reply,r));
295 assert(err_is_ok(err));
297 DEBUG_ERR(err, "failed to reply to memory request");
298 allocate_response_done(cap);
303 static void dump_ram_region(int idx, struct mem_region* m)
306 uintptr_t start, limit;
308 start = (uintptr_t)m->mr_base;
309 limit = start + (1UL << m->mr_bits);
312 size_t quantity = 1UL << m->mr_bits;
314 if (m->mr_bits >= 30) {
318 else if (m->mr_bits >= 20) {
322 else if (m->mr_bits >= 10) {
327 printf("RAM region %d: 0x%" PRIxPTR
328 " - 0x%" PRIxPTR " (%zu %cB, %u bits)\n",
329 idx, start, limit, quantity, prefix, m->mr_bits);
333 static genpaddr_t find_smallest_address(void)
336 genpaddr_t smallest_addr = 0;
338 for (int i = 0; i < bi->regions_length; i++) {
339 if (bi->regions[i].mr_type != RegionType_Empty) {
343 if (bi->regions[i].mr_consumed) {
348 smallest_addr = bi->regions[i].mr_base;
353 if (smallest_addr > bi->regions[i].mr_base) {
354 smallest_addr = bi->regions[i].mr_base;
356 } // end for: for every record
357 return smallest_addr;
358 } // end function: find_smallest_address
360 static genpaddr_t guess_physical_addr_start(void)
362 genpaddr_t start_physical = find_smallest_address();
363 #if defined(__arm__) || defined(__aarch64__)
364 if (start_physical > 0x80000000) {
365 // This is most probably a pandaboard!
366 start_physical = 0x80000000;
368 // This is gem5 or some other architecture
374 return start_physical;
375 } // end function: guess_physical_addr_start
377 // FIXME: error handling (not asserts) needed in this function
378 //XXX: workaround for inline bug of arm-gcc 4.6.1 and lower
379 #if defined(__ARM_ARCH_7A__) && defined(__GNUC__) \
380 && __GNUC__ == 4 && __GNUC_MINOR__ <= 6 && __GNUC_PATCHLEVEL__ <= 1
381 static __attribute__((noinline)) errval_t
385 initialize_ram_alloc(void)
389 /* Initialize slot allocator by passing a L2 cnode cap for it to start with */
390 // Use ROOTCN_SLOT_SLOT_ALLOC0 as initial cnode for mm slot allocator
391 struct capref cnode_start_cap = {
393 .croot = CPTR_ROOTCN,
394 .cnode = ROOTCN_SLOT_ADDR(ROOTCN_SLOT_SLOT_ALLOC0),
395 .level = CNODE_TYPE_OTHER,
400 /* init slot allocator */
401 err = slot_prealloc_init_2(&ram_slot_alloc, MAXCHILDBITS,
402 cnode_start_cap, DEFAULT_CNODE_SLOTS,
404 assert(err_is_ok(err));
406 err = mm_init(&mm_ram, ObjType_RAM, guess_physical_addr_start(),
407 MAXSIZEBITS, MAXCHILDBITS, NULL,
408 slot_alloc_prealloc_2, NULL, &ram_slot_alloc, true);
409 assert(err_is_ok(err));
411 /* give MM allocator static storage to get it started */
412 static char nodebuf[SLAB_STATIC_SIZE(MINSPARENODES, MM_NODE_SIZE(MAXCHILDBITS))];
413 slab_grow(&mm_ram.slabs, nodebuf, sizeof(nodebuf));
415 /* walk bootinfo and add all unused RAM caps to allocator */
416 struct capref mem_cap = {
417 .cnode = cnode_super,
421 for (int i = 0; i < bi->regions_length; i++) {
422 if (bi->regions[i].mr_type == RegionType_Empty) {
423 dump_ram_region(i, bi->regions + i);
425 mem_total += bi->regions[i].mr_bytes;
427 if (bi->regions[i].mr_consumed) {
428 // region consumed by init, skipped
433 err = mm_add_multi(&mm_ram, mem_cap, bi->regions[i].mr_bytes,
434 bi->regions[i].mr_base);
435 if (err_is_ok(err)) {
436 mem_avail += bi->regions[i].mr_bytes;
438 DEBUG_ERR(err, "Warning: adding RAM region %d (%p/%zu) FAILED",
439 i, bi->regions[i].mr_base, bi->regions[i].mr_bytes);
442 /* try to refill slot allocator (may fail if the mem allocator is empty) */
443 err = slot_prealloc_refill_2(mm_ram.slot_alloc_inst);
444 if (err_is_fail(err) && err_no(err) != MM_ERR_SLOT_MM_ALLOC) {
445 DEBUG_ERR(err, "in slot_prealloc_refill_2() while initialising"
446 " memory allocator");
450 /* refill slab allocator if needed and possible */
451 if (slab_freecount(&mm_ram.slabs) <= MINSPARENODES
452 && mem_avail > (1UL << (CNODE_BITS + OBJBITS_CTE)) * 2
453 + 10 * BASE_PAGE_SIZE) {
454 slab_default_refill(&mm_ram.slabs); // may fail
461 err = slot_prealloc_refill_2(mm_ram.slot_alloc_inst);
462 if (err_is_fail(err)) {
463 printf("Fatal internal error in RAM allocator: failed to initialise "
465 DEBUG_ERR(err, "failed to init slot allocator");
469 printf("RAM allocator initialised, %zd MB (of %zd MB) available\n",
470 mem_avail / 1024 / 1024, mem_total / 1024 / 1024);
475 static void export_callback(void *st, errval_t err, iref_t iref)
477 assert(err_is_ok(err));
478 struct monitor_binding *mb = get_monitor_binding();
479 err = mb->tx_vtbl. set_mem_iref_request(mb, NOP_CONT, iref);
480 assert(err_is_ok(err));
483 static struct mem_rx_vtbl rx_vtbl = {
484 .allocate_call = mem_allocate_handler,
485 .available_call = mem_available_handler,
486 .free_monitor_call = mem_free_handler,
489 static bool do_rpc_init = false;
491 static errval_t connect_callback(void *st, struct mem_binding *b)
494 b->rx_vtbl = rx_vtbl;
495 // TODO: set error handler
499 int main(int argc, char ** argv)
502 struct waitset *ws = get_default_waitset();
505 fprintf(stderr, "Usage: %s <bootinfo_location>\n", argv[0]);
509 // First argument contains the bootinfo location
510 bi = (struct bootinfo*)strtol(argv[1], NULL, 10);
512 /* construct special-case LMP connection to monitor */
513 static struct monitor_lmp_binding mcb;
514 set_monitor_binding(&mcb.b);
516 err = monitor_client_lmp_accept(&mcb, ws, DEFAULT_LMP_BUF_WORDS);
517 if(err_is_fail(err)) {
518 USER_PANIC_ERR(err, "monitor_client_lmp_accept");
523 /* Send the cap for this endpoint to init, who will pass it to
525 err = lmp_ep_send0(cap_initep, 0, mcb.chan.local_cap);
526 if(err_is_fail(err)) {
527 USER_PANIC_ERR(err, "lmp_ep_send0");
530 // XXX: handle messages (ie. block) until the monitor binding is ready
531 while (capref_is_null(mcb.chan.remote_cap)) {
532 err = event_dispatch(ws);
533 if (err_is_fail(err)) {
534 DEBUG_ERR(err, "in event_dispatch while waiting for monitor");
539 /* Initialize our own memory allocator */
540 err = ram_alloc_set(mymm_alloc);
541 if(err_is_fail(err)) {
542 USER_PANIC_ERR(err, "ram_alloc_set");
545 err = initialize_ram_alloc();
546 if(err_is_fail(err)) {
547 USER_PANIC_ERR(err, "initialize_ram_alloc");
550 /* Initialize self slot_allocator */
552 err = two_level_slot_alloc_init(&msa, DEFAULT_CNODE_SLOTS, NULL);
553 if(err_is_fail(err)) {
554 USER_PANIC_ERR(err, "two_level_slot_alloc_init");
558 err = mem_export(NULL, export_callback, connect_callback, ws,
559 IDC_EXPORT_FLAGS_DEFAULT);
560 if(err_is_fail(err)) {
561 USER_PANIC_ERR(err, "mem_export");
564 /* initialise tracing */
565 #if defined(TRACING_EXISTS) && defined(CONFIG_TRACE)
566 err = trace_my_setup();
567 if (err_is_fail(err)) {
568 DEBUG_ERR(err, "initialising tracing");
569 // return EXIT_FAILURE;
574 // handle messages on this thread
576 err = event_dispatch(ws);
577 if (err_is_fail(err)) {
578 DEBUG_ERR(err, "in main event_dispatch loop");
583 static bool in_rpc_init = false;
584 if (do_rpc_init && !in_rpc_init && !get_monitor_blocking_rpc_client()) {
585 // XXX: this is an ugly hack try and get a monitor rpc client once
586 // the monitor is ready
589 /* Bind with monitor's blocking rpc channel */
590 err = monitor_client_blocking_rpc_init();
591 if (err_is_fail(err)) {
592 DEBUG_ERR(err, "monitor_client_blocking_rpc_init");
595 debug_printf("got monitor_blocking_rpc_client\n");