1 #include <barrelfish/barrelfish.h>
2 #include <barrelfish/capabilities.h>
3 #include <barrelfish/nameservice_client.h>
8 #include <if/monitor_blocking_rpcclient_defs.h>
12 #define UNBITS_GENPA(bits) (((genpaddr_t)1) << (bits))
14 static struct mm register_manager;
17 * \brief Maps a physical register location into virtual address space
19 * \param[in] address Physical address of register you want to map.
20 * \param[in] size Size of register space to map
21 * \param[out] return_address Virtual address where the register is mapped at.
23 * \retval SYS_ERR_OK return_address is valid and mapped.
25 errval_t map_device_register(lpaddr_t address, size_t size, lvaddr_t** return_address)
27 struct allocated_range {
29 struct frame_identity id;
30 struct allocated_range* next;
32 static struct allocated_range* allocation_head = NULL;
34 FDIF_DEBUG("map_device_register: %"PRIxLPADDR" %zu %zu\n", address, size, log2ceil(size));
37 assert(return_address != NULL);
38 // TODO(gz) the paging in the kernel wants these two preconditions?
39 assert(size % BASE_PAGE_SIZE == 0);
40 assert(size >= BASE_PAGE_SIZE && "ARM paging breaks when smaller");
43 struct capref devframe = NULL_CAP;
44 errval_t err = mm_alloc_range(®ister_manager, log2ceil(size),
45 address, address+size,
47 if (err_is_fail(err)) {
48 // TODO(gz) Is there a better way to handle duplicated allocations?
49 FDIF_DEBUG("mm_alloc_range failed.\n");
50 FDIF_DEBUG("Do we already have an allocation that covers this range?\n");
51 struct allocated_range* iter = allocation_head;
52 while (iter != NULL) {
53 if (address >= iter->id.base &&
54 (address + size <= (iter->id.base + UNBITS_GENPA(iter->id.bits)))) {
55 FDIF_DEBUG("Apparently, yes. We try to map that one.\n");
58 goto map_it; // yay, recovered!
62 // One way out of here might be to re-try with
64 DEBUG_ERR(err, "mm_alloc_range failed.\n");
69 err = vspace_map_one_frame_attr((void**)return_address, size,
70 devframe, VREGION_FLAGS_READ_WRITE_NOCACHE,
72 if (err_is_fail(err)) {
73 DEBUG_ERR(err, "vspace_map_one_frame_attr failed.\n");
78 struct allocated_range* ar = calloc(sizeof(struct allocated_range), 1);
80 err = invoke_frame_identify(ar->cr, &ar->id);
81 if (err_is_fail(err)) {
83 DEBUG_ERR(err, "frame identity failed.\n");
86 // Insert into the queue to track the allocation
87 struct allocated_range** iter = &allocation_head;
88 while(*iter != NULL) {
89 iter = &(*iter)->next;
97 errval_t init_memory_manager(void)
99 FDIF_DEBUG("init_memory_manager\n");
100 errval_t err, error_code;
103 struct monitor_blocking_rpc_client *cl = get_monitor_blocking_rpc_client();
106 struct capref requested_cap;
107 err = cl->vtbl.get_io_cap(cl, &requested_cap, &error_code);
108 assert(err_is_ok(err) && err_is_ok(error_code));
110 // Initialize the memory allocator to handle PhysAddr caps
111 static struct range_slot_allocator devframes_allocator;
112 err = range_slot_alloc_init(&devframes_allocator, 2048, NULL);
113 if (err_is_fail(err)) {
114 return err_push(err, LIB_ERR_SLOT_ALLOC_INIT);
117 struct frame_identity ret;
118 err = invoke_frame_identify(requested_cap, &ret);
119 assert (err_is_ok(err));
121 err = mm_init(®ister_manager, ObjType_DevFrame, ret.base, ret.bits,
122 1, slab_default_refill, slot_alloc_dynamic,
123 &devframes_allocator, false);
124 if (err_is_fail(err)) {
125 return err_push(err, MM_ERR_MM_INIT);
128 err = mm_add(®ister_manager, requested_cap,
130 assert(err_is_ok(err));
132 FDIF_DEBUG("init_memory_manager DONE\n");