3 * \brief x86_32 kernel bootup code.
7 * Copyright (c) 2007-2013 ETH Zurich.
8 * Copyright (c) 2014, HP Labs.
11 * This file is distributed under the terms in the attached LICENSE file.
12 * If you do not find this file, copies can be found by writing to:
13 * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
18 #include <paging_kernel_arch.h>
20 #include <kernel_multiboot.h>
23 #include <barrelfish_kpi/cpu.h>
25 #include <getopt/getopt.h>
27 #include <barrelfish_kpi/init.h>
28 #include <arch/x86/apic.h>
31 #include <barrelfish_kpi/paging_arch.h>
32 #include <barrelfish_kpi/syscalls.h>
33 #include <target/x86/barrelfish_kpi/coredata_target.h>
37 #include <arch/x86/startup_x86.h>
39 /// Quick way to find the base address of a cnode capability
40 #define CNODE(cte) (cte)->cap.u.cnode.cnode
43 * init's needed boot pages.
45 #define INIT_PDIR_SIZE X86_32_PDIR_ENTRIES(X86_32_INIT_SPACE_LIMIT)
46 #define INIT_PTABLE_SIZE X86_32_PTABLE_ENTRIES(X86_32_INIT_SPACE_LIMIT)
47 #define INIT_PAGE_BITMAP X86_32_PTABLE_PRESENT
49 /// Pointer to bootinfo structure for init
50 static struct bootinfo *bootinfo = (struct bootinfo *)BOOTINFO_BASE;
52 static struct spawn_state spawn_state;
56 * Page directory pointer table for init user address space.
58 static union x86_32_pdpte_entry *init_pdpte; //[INIT_PDPT_SIZE][PTABLE_SIZE]
62 * Page directory for init user address space.
64 static union x86_32_pdir_entry *init_pdir; //[INIT_PDPT_SIZE][INIT_PDIR_SIZE][PTABLE_SIZE]
67 * Page tables for init user address space.
69 static union x86_32_ptable_entry *init_ptable; //[INIT_PDPT_SIZE][INIT_PDIR_SIZE][INIT_PTABLE_SIZE][PTABLE_SIZE]
72 * \brief Convert elf flags to page flags
74 * \param flags ELF64 program segment flags.
78 * Not all combinations may be supported by an architecture
80 static paging_x86_32_flags_t paging_elf_to_page_flags(uint32_t flags)
82 paging_x86_32_flags_t pageflags = 0;
84 pageflags |= flags & PF_R ? PTABLE_USER_SUPERVISOR : 0;
85 pageflags |= flags & PF_W ? PTABLE_READ_WRITE : 0;
86 pageflags |= flags & PF_X ? 0 : PTABLE_EXECUTE_DISABLE;
92 * \brief Map init user-space memory.
94 * This function maps pages of the init user-space module. It expects
95 * the virtual base address 'vbase' of a program segment of the init executable,
96 * its size 'size' and its ELF64 access control flags. It maps pages
97 * to the sequential area of physical memory, given by 'base'. If you
98 * want to allocate physical memory frames as you go, you better use
99 * startup_alloc_init().
101 * \param vbase Virtual base address of program segment.
102 * \param base Physical base address of program segment.
103 * \param size Size of program segment in bytes.
104 * \param flags ELF64 access control flags of program segment.
106 errval_t startup_map_init(lvaddr_t vbase, lpaddr_t base, size_t size,
111 paging_align(&vbase, &base, &size, BASE_PAGE_SIZE);
112 assert(vbase + size < X86_32_INIT_SPACE_LIMIT);
115 for(vaddr = vbase; vaddr < vbase + size;
116 vaddr += BASE_PAGE_SIZE, base += BASE_PAGE_SIZE) {
118 union x86_32_ptable_entry *ptable_base = &init_ptable[
119 + X86_32_PDPTE_BASE(vaddr) * X86_32_PTABLE_SIZE * X86_32_PTABLE_SIZE
120 + X86_32_PDIR_BASE(vaddr) * X86_32_PTABLE_SIZE
121 + X86_32_PTABLE_BASE(vaddr)];
123 debug(SUBSYS_PAGING, "Mapping 4K page: vaddr = 0x%"PRIxLVADDR
124 ", base = 0x%"PRIxLPADDR", PDPTE_BASE = %lu, PDIR_BASE = %lu, "
125 "PTABLE_BASE = %lu -- ", vaddr, base, X86_32_PDPTE_BASE(vaddr),
126 X86_32_PDIR_BASE(vaddr), X86_32_PTABLE_BASE(vaddr));
128 union x86_32_ptable_entry *ptable_base = &init_ptable[
129 X86_32_PDIR_BASE(vaddr) * X86_32_PTABLE_SIZE
130 + X86_32_PTABLE_BASE(vaddr)];
132 debug(SUBSYS_PAGING, "Mapping 4K page: vaddr = 0x%"PRIxLVADDR
133 ", base = 0x%"PRIxLPADDR", "
134 "PDIR_BASE = %"PRIuLPADDR", "
135 "PTABLE_BASE = %"PRIuLPADDR" -- ", vaddr, base,
136 X86_32_PDIR_BASE(vaddr), X86_32_PTABLE_BASE(vaddr));
139 if(!X86_32_IS_PRESENT(ptable_base)) {
140 debug(SUBSYS_PAGING, "mapped!\n");
141 paging_x86_32_map(ptable_base, base,
142 INIT_PAGE_BITMAP | paging_elf_to_page_flags(flags));
144 debug(SUBSYS_PAGING, "already existing!\n");
151 /// Create physical address range or RAM caps to unused physical memory
152 static void create_phys_caps(lpaddr_t init_alloc_addr)
157 // map first meg of RAM, which contains lots of crazy BIOS tables
158 err = create_caps_to_cnode(0, X86_32_START_KERNEL_PHYS,
159 RegionType_PlatformData, &spawn_state, bootinfo);
160 assert(err_is_ok(err));
163 /* Walk multiboot MMAP structure, and create appropriate caps for memory */
164 char *mmap_addr = MBADDR_ASSTRING(glbl_core_data->mmap_addr);
165 genpaddr_t last_end_addr = 0;
167 char *clean_mmap_addr;
168 uint32_t clean_mmap_length;
169 cleanup_bios_regions(mmap_addr, &clean_mmap_addr, &clean_mmap_length);
172 for(char *m = mmap_addr; m < mmap_addr + glbl_core_data->mmap_length;) {
173 struct multiboot_mmap *mmap = (struct multiboot_mmap * SAFE)TC(m);
175 debug(SUBSYS_STARTUP, "MMAP %llx--%llx Type %"PRIu32"\n",
176 mmap->base_addr, mmap->base_addr + mmap->length,
180 // XXX: Remove intersecting regions
182 for(int i = 0; i < bootinfo->regions_length; i++) {
183 struct mem_region *r = &bootinfo->regions[i];
185 // Remove intersecting regions (earlier additions take precedence)
186 if((r->base + (1 << r->bits) >= mmap->base_addr
187 && r->base + (1 << r->bits) <= mmap->base_addr + mmap->length)
188 || (r->base >= mmap->base_addr
189 && r->base <= mmap->base_addr + mmap->length)) {
200 if (last_end_addr >= init_alloc_addr
201 && mmap->base_addr > last_end_addr) {
202 /* we have a gap between regions. add this as a physaddr range */
203 debug(SUBSYS_STARTUP, "physical address range %llx--%llx\n",
204 last_end_addr, mmap->base_addr);
206 err = create_caps_to_cnode(last_end_addr,
207 mmap->base_addr - last_end_addr,
208 RegionType_PhyAddr, &spawn_state, bootinfo);
209 assert(err_is_ok(err));
212 if (mmap->type == MULTIBOOT_MEM_TYPE_RAM) {
213 genpaddr_t base_addr = mmap->base_addr;
214 genpaddr_t end_addr = base_addr + mmap->length;
216 // only map RAM which is greater than init_alloc_addr
217 if (end_addr > local_phys_to_gen_phys(init_alloc_addr)) {
218 if (base_addr < local_phys_to_gen_phys(init_alloc_addr)) {
219 base_addr = local_phys_to_gen_phys(init_alloc_addr);
223 if(base_addr >= X86_32_PADDR_SPACE_SIZE) {
224 printk(LOG_NOTE, "skipping RAM [%llx--%llx] out of "
225 "mappable space\n", base_addr, end_addr);
226 last_end_addr = mmap->base_addr + mmap->length;
230 if(end_addr > X86_32_PADDR_SPACE_SIZE) {
231 printk(LOG_NOTE, "shortening RAM [%llx--%llx] to mappable "
232 "space [0--%llx]\n", base_addr, end_addr,
233 X86_32_PADDR_SPACE_LIMIT);
234 end_addr = X86_32_PADDR_SPACE_SIZE;
239 // XXX: Do not create ram caps for memory the kernel cannot
240 // address to prevent kernel objects from being created there
241 if(base_addr >= PADDR_SPACE_LIMIT) {
242 last_end_addr = mmap->base_addr + mmap->length;
246 if (end_addr > PADDR_SPACE_LIMIT) {
247 end_addr = PADDR_SPACE_LIMIT;
251 debug(SUBSYS_STARTUP, "RAM %llx--%llx\n", base_addr, end_addr);
253 assert(end_addr >= base_addr);
254 err = create_caps_to_cnode(base_addr, end_addr - base_addr,
255 RegionType_Empty, &spawn_state, bootinfo);
256 assert(err_is_ok(err));
258 } else if (mmap->base_addr > local_phys_to_gen_phys(init_alloc_addr)) {
259 /* XXX: The multiboot spec just says that mapping types other than
260 * RAM are "reserved", but GRUB always maps the ACPI tables as type
261 * 3, and things like the IOAPIC tend to show up as type 2 or 4,
262 * so we map all these regions as platform data
264 debug(SUBSYS_STARTUP, "platform %llx--%llx\n", mmap->base_addr,
265 mmap->base_addr + mmap->length);
266 assert(mmap->base_addr > local_phys_to_gen_phys(init_alloc_addr));
267 err = create_caps_to_cnode(mmap->base_addr, mmap->length,
268 RegionType_PlatformData, &spawn_state, bootinfo);
269 assert(err_is_ok(err));
272 last_end_addr = mmap->base_addr + mmap->length;
276 // Assert that we have some physical address space
277 assert(last_end_addr != 0);
279 if (last_end_addr < X86_32_PADDR_SPACE_SIZE) {
281 * FIXME: adding the full range results in too many caps to add
282 * to the cnode (and we can't handle such big caps in user-space
283 * yet anyway) so instead we limit it to something much smaller
285 genpaddr_t size = X86_32_PADDR_SPACE_SIZE - last_end_addr;
286 const genpaddr_t phys_region_limit = 1ULL << 32; // PCI implementation limit
287 if (last_end_addr > phys_region_limit) {
288 size = 0; // end of RAM is already too high!
289 } else if (last_end_addr + size > phys_region_limit) {
290 size = phys_region_limit - last_end_addr;
292 debug(SUBSYS_STARTUP, "end physical address range %llx--%llx\n",
293 last_end_addr, last_end_addr + size);
294 err = create_caps_to_cnode(last_end_addr, size,
295 RegionType_PhyAddr, &spawn_state, bootinfo);
296 assert(err_is_ok(err));
300 #define NEEDED_KERNEL_SPACE \
301 ((SIZE_KERNEL_IMAGE & 0x1000 ) == SIZE_KERNEL_IMAGE ? \
302 SIZE_KERNEL_IMAGE : \
303 (SIZE_KERNEL_IMAGE & 0xfffffffffffff000) + 0x1000)
305 #define OBJSPERPAGE_CTE (1 << (BASE_PAGE_BITS - OBJBITS_CTE))
308 static void init_page_tables(struct spawn_state *st, alloc_phys_func alloc_phys)
310 /* Allocate memory for init's page tables */
312 init_pdpte = (void *)local_phys_to_mem(alloc_phys(X86_32_PDPTE_SIZE
313 * sizeof(union x86_32_pdpte_entry)));
315 init_pdir = (void *)local_phys_to_mem(
316 alloc_phys(X86_32_PTABLE_SIZE * INIT_PDIR_SIZE
317 * sizeof(union x86_32_pdir_entry)));
318 init_ptable = (void *)local_phys_to_mem(
319 alloc_phys(X86_32_PTABLE_SIZE * INIT_PDIR_SIZE
320 * INIT_PTABLE_SIZE * sizeof(union x86_32_ptable_entry)));
322 /* Page table setup */
323 /* Initialize init page tables */
324 for(size_t j = 0; j < INIT_PDIR_SIZE; j++) {
325 paging_x86_32_clear_pdir(&init_pdir[j]);
326 for(size_t k = 0; k < INIT_PTABLE_SIZE; k++) {
327 paging_x86_32_clear_ptable(&init_ptable[j * X86_32_PTABLE_SIZE + k]);
330 /* Map pagetables into pageCN */
331 int pagecn_pagemap = 0;
333 // Map PDPTE into first slot in pagecn
334 caps_create_new(ObjType_VNode_x86_32_pdpt,
335 mem_to_local_phys((lvaddr_t)init_pdpte),
336 BASE_PAGE_BITS, 0, my_core_id,
337 caps_locate_slot(CNODE(st->pagecn), pagecn_pagemap++));
339 // Map PDIR into successive slots in pagecn
340 for(size_t i = 0; i < INIT_PDIR_SIZE; i++) {
341 caps_create_new(ObjType_VNode_x86_32_pdir,
342 mem_to_local_phys((lvaddr_t)init_pdir) + i * BASE_PAGE_SIZE,
343 BASE_PAGE_BITS, 0, my_core_id,
344 caps_locate_slot(CNODE(st->pagecn), pagecn_pagemap++));
346 // Map page tables into successive slots in pagecn
347 for(size_t i = 0; i < INIT_PTABLE_SIZE; i++) {
348 caps_create_new(ObjType_VNode_x86_32_ptable,
349 mem_to_local_phys((lvaddr_t)init_ptable) + i * BASE_PAGE_SIZE,
350 BASE_PAGE_BITS, 0, my_core_id,
351 caps_locate_slot(CNODE(st->pagecn), pagecn_pagemap++));
353 // Connect all page tables to page directories.
354 // init's memory manager expects page tables within the pagecn to
355 // already be connected to the corresponding directories. To avoid
356 // unneccessary special cases, we connect them here.
357 for(lvaddr_t vaddr = 0; vaddr < X86_32_INIT_SPACE_LIMIT;
358 vaddr += BASE_PAGE_SIZE) {
360 union x86_32_pdpte_entry *pdpte_base =
361 &init_pdpte[X86_32_PDPTE_BASE(vaddr)];
362 union x86_32_pdir_entry *pdir_base =
363 &init_pdir[X86_32_PDPTE_BASE(vaddr) * X86_32_PTABLE_SIZE +
364 X86_32_PDIR_BASE(vaddr)];
365 union x86_32_ptable_entry *ptable_base =
366 &init_ptable[X86_32_PDPTE_BASE(vaddr) * X86_32_PTABLE_SIZE *
367 X86_32_PTABLE_SIZE + X86_32_PDIR_BASE(vaddr) *
368 X86_32_PTABLE_SIZE + X86_32_PTABLE_BASE(vaddr)];
370 paging_x86_32_map_pdpte(pdpte_base, mem_to_local_phys((lvaddr_t)pdir_base));
372 union x86_32_pdir_entry *pdir_base =
373 &init_pdir[X86_32_PDIR_BASE(vaddr)];
374 union x86_32_ptable_entry *ptable_base =
375 &init_ptable[X86_32_PDIR_BASE(vaddr) * X86_32_PTABLE_SIZE +
376 X86_32_PTABLE_BASE(vaddr)];
378 paging_x86_32_map_table(pdir_base,
379 mem_to_local_phys((lvaddr_t)ptable_base));
382 /* Switch to init's VSpace */
384 paging_x86_32_context_switch(mem_to_local_phys((lvaddr_t)init_pdpte));
386 paging_x86_32_context_switch(mem_to_local_phys((lvaddr_t)init_pdir));
389 /***** VSpace available *****/
391 /* Map cmdline args R/W into VSpace at ARGS_BASE */
393 paging_x86_32_map_pdpte(&init_pdpte[X86_32_PDPTE_BASE(ARGS_BASE)],
394 mem_to_local_phys((lvaddr_t)init_pdir));
396 paging_x86_32_map_table(&init_pdir[X86_32_PDIR_BASE(ARGS_BASE)],
397 mem_to_local_phys((lvaddr_t)init_ptable));
398 for (int i = 0; i < ARGS_SIZE / BASE_PAGE_SIZE; i++) {
399 paging_x86_32_map(&init_ptable[X86_32_PTABLE_BASE(ARGS_BASE) + i],
400 st->args_page + i * BASE_PAGE_SIZE,
401 INIT_PAGE_BITMAP | paging_elf_to_page_flags(PF_R | PF_W));
405 static struct dcb *spawn_init_common(struct spawn_state *st, const char *name,
406 int argc, const char *argv[],
407 lpaddr_t bootinfo_phys,
408 alloc_phys_func alloc_phys)
412 /* Perform arch-independent spawn */
414 struct dcb *init_dcb = spawn_module(st, name, argc, argv, bootinfo_phys,
415 ARGS_BASE, alloc_phys, ¶maddr);
417 /* Init page tables */
418 init_page_tables(st, alloc_phys);
420 /* Map dispatcher R/W into VSpace starting at vaddr 0x204000
421 * (Starting after Bootinfo pages)*/
423 paging_x86_32_map_pdpte(&init_pdpte[X86_32_PDPTE_BASE(DISPATCHER_BASE)],
424 mem_to_local_phys((lvaddr_t)init_pdir));
426 paging_x86_32_map_table(&init_pdir[X86_32_PDIR_BASE(DISPATCHER_BASE)],
427 mem_to_local_phys((lvaddr_t)init_ptable));
428 for (int i = 0; i < DISPATCHER_SIZE / BASE_PAGE_SIZE; i++) {
429 paging_x86_32_map(&init_ptable[X86_32_PTABLE_BASE(DISPATCHER_BASE) + i],
430 mem_to_local_phys(init_dcb->disp) + i * BASE_PAGE_SIZE,
431 INIT_PAGE_BITMAP | paging_elf_to_page_flags(PF_R | PF_W));
434 struct dispatcher_shared_generic *init_disp =
435 get_dispatcher_shared_generic(init_dcb->disp);
436 struct dispatcher_shared_x86_32 *init_disp_x86_32 =
437 get_dispatcher_shared_x86_32(init_dcb->disp);
439 registers_set_param(&init_disp_x86_32->enabled_save_area, paramaddr);
441 // Map IO cap in task cnode
442 struct cte *iocap = caps_locate_slot(CNODE(st->taskcn), TASKCN_SLOT_IO);
443 err = caps_create_new(ObjType_IO, 0, 0, 0, my_core_id, iocap);
444 assert(err_is_ok(err));
446 /* Set fields in DCB */
449 init_dcb->vspace = mem_to_local_phys((lvaddr_t)init_pdpte);
451 init_dcb->vspace = mem_to_local_phys((lvaddr_t)init_pdir);
454 /* Initialize dispatcher */
455 init_disp->disabled = true;
456 strncpy(init_disp->name, argv[0], DISP_NAME_LEN);
458 /* tell init the vspace addr of its dispatcher */
459 init_disp->udisp = DISPATCHER_BASE;
461 init_disp_x86_32->disabled_save_area.edi = DISPATCHER_BASE;
462 init_disp_x86_32->disabled_save_area.fs = 0;
463 init_disp_x86_32->disabled_save_area.gs = 0;
464 init_disp_x86_32->disabled_save_area.cs = USER_CS;
465 init_disp_x86_32->disabled_save_area.ss = USER_SS;
466 init_disp_x86_32->disabled_save_area.eflags = USER_EFLAGS;
471 struct dcb *spawn_bsp_init(const char *name, alloc_phys_func alloc_phys)
475 /* Only the first core can run this code */
476 assert(apic_is_bsp());
478 /* Allocate bootinfo */
479 lpaddr_t bootinfo_phys = alloc_phys(BOOTINFO_SIZE);
480 memset((void *)local_phys_to_mem(bootinfo_phys), 0, BOOTINFO_SIZE);
482 /* Construct cmdline args */
483 char bootinfochar[16];
484 snprintf(bootinfochar, sizeof(bootinfochar), "%"PRIuLPADDR, BOOTINFO_BASE);
486 const char *argv[6] = { "init", bootinfochar };
490 if(glbl_core_data->urpc_frame_base != 0) {
492 snprintf(coreidchar, sizeof(coreidchar), "%d",
493 glbl_core_data->src_core_id);
494 argv[argc++] = coreidchar;
496 char chan_id_char[30];
497 snprintf(chan_id_char, sizeof(chan_id_char), "chanid=%"PRIu32,
498 glbl_core_data->chan_id);
499 argv[argc++] = chan_id_char;
501 char urpc_frame_base_char[30];
502 snprintf(urpc_frame_base_char, sizeof(urpc_frame_base_char),
503 "frame=%" PRIuGENPADDR, glbl_core_data->urpc_frame_base);
504 argv[argc++] = urpc_frame_base_char;
508 struct dcb *init_dcb = spawn_init_common(&spawn_state, name, argc, argv,
509 bootinfo_phys, alloc_phys);
511 /* Map bootinfo R/W into VSpace at vaddr 0x200000 (BOOTINFO_BASE) */
513 paging_x86_32_map_pdpte(&init_pdpte[0], mem_to_local_phys((lvaddr_t)init_pdir));
514 paging_x86_32_map_table(&init_pdir[1], mem_to_local_phys((lvaddr_t)init_ptable));
515 for (int i = 0; i < BOOTINFO_SIZE / BASE_PAGE_SIZE; i++) {
516 paging_x86_32_map(&init_ptable[i], bootinfo_phys + i * BASE_PAGE_SIZE,
517 INIT_PAGE_BITMAP | paging_elf_to_page_flags(PF_R|PF_W));
520 paging_x86_32_map_table(&init_pdir[0], mem_to_local_phys((lvaddr_t)init_ptable));
521 for (int i = 0; i < BOOTINFO_SIZE / BASE_PAGE_SIZE; i++) {
522 paging_x86_32_map(&init_ptable[i + 512], bootinfo_phys + i * BASE_PAGE_SIZE,
523 INIT_PAGE_BITMAP | paging_elf_to_page_flags(PF_R|PF_W));
527 /* Load init ELF32 binary */
528 struct multiboot_modinfo *module = multiboot_find_module(name);
529 if (module == NULL) {
530 panic("Could not find init module!");
533 err = elf_load(EM_386, startup_alloc_init, &spawn_state,
534 local_phys_to_mem(module->mod_start),
535 MULTIBOOT_MODULE_SIZE(*module), &init_ep);
536 if (err_is_fail(err)) {
537 //err_print_calltrace(err);
538 panic("ELF load of init module failed!");
541 struct dispatcher_shared_x86_32 *init_disp_x86_32 =
542 get_dispatcher_shared_x86_32(init_dcb->disp);
543 init_disp_x86_32->disabled_save_area.eip = init_ep;
545 /* Create caps for init to use */
546 create_module_caps(&spawn_state);
547 lpaddr_t init_alloc_end = alloc_phys(0); // XXX
548 create_phys_caps(init_alloc_end);
550 /* Fill bootinfo struct */
551 bootinfo->mem_spawn_core = NEEDED_KERNEL_SPACE; // Size of kernel
553 /* for (int i = 0; i < bootinfo->regions_length; i++) { */
554 /* printf("%d region %d: 0x%09" PRIxPTR " - 0x%09lx (%lu MB, %u bits)\n", */
555 /* bootinfo->regions[i].mr_type, i, bootinfo->regions[i].mr_base, */
556 /* bootinfo->regions[i].mr_base + (1UL<<bootinfo->regions[i].mr_bits), */
557 /* bootinfo->regions[i].mr_bits >= 20 */
558 /* ? 1UL << (bootinfo->regions[i].mr_bits - 20) : 0, */
559 /* bootinfo->regions[i].mr_bits); */
563 // If app core, map (static) URPC channel
564 if(kernel_scckernel != 0) {
565 printf("SCC app kernel, frame at: 0x%x\n", kernel_scckernel);
566 #define TASKCN_SLOT_MON_URPC (TASKCN_SLOTS_USER+6) ///< Frame cap for urpc comm.
568 err = caps_create_new(ObjType_Frame, kernel_scckernel, 13, 13,
569 caps_locate_slot(CNODE(taskcn), TASKCN_SLOT_MON_URPC));
570 assert(err_is_ok(err));
577 struct dcb *spawn_app_init(struct x86_core_data *core_data,
578 const char *name, alloc_phys_func alloc_phys)
582 /* Construct cmdline args */
583 // Core id of the core that booted this core
585 snprintf(coreidchar, sizeof(coreidchar), "%d", core_data->src_core_id);
587 // IPI channel id of core that booted this core
589 snprintf(chanidchar, sizeof(chanidchar), "chanid=%"PRIu32, core_data->chan_id);
591 // Arch id of the core that booted this core
593 snprintf(archidchar, sizeof(archidchar), "archid=%d",
594 core_data->src_arch_id);
596 const char *argv[5] = { name, coreidchar, chanidchar, archidchar };
600 char urpc_frame_base_char[30];
601 snprintf(urpc_frame_base_char, sizeof(urpc_frame_base_char),
602 "frame=%" PRIuGENPADDR, core_data->urpc_frame_base);
603 argv[argc++] = urpc_frame_base_char;
606 struct dcb *init_dcb = spawn_init_common(&spawn_state, name, argc, argv,
610 struct cte *urpc_frame_cte = caps_locate_slot(CNODE(spawn_state.taskcn),
611 TASKCN_SLOT_MON_URPC);
612 // XXX: Create as devframe so the memory is not zeroed out
613 err = caps_create_new(ObjType_DevFrame, core_data->urpc_frame_base,
614 core_data->urpc_frame_bits,
615 core_data->urpc_frame_bits, core_data->src_core_id,
617 assert(err_is_ok(err));
618 urpc_frame_cte->cap.type = ObjType_Frame;
619 lpaddr_t urpc_ptr = gen_phys_to_local_phys(urpc_frame_cte->cap.u.frame.base);
621 /* Map urpc frame at MON_URPC_BASE */
623 paging_x86_32_map_pdpte(&init_pdpte[X86_32_PDPTE_BASE(MON_URPC_BASE)],
624 mem_to_local_phys((lvaddr_t)init_pdir));
626 paging_x86_32_map_table(&init_pdir[X86_32_PDIR_BASE(MON_URPC_BASE)],
627 mem_to_local_phys((lvaddr_t)init_ptable));
628 for (int i = 0; i < MON_URPC_SIZE / BASE_PAGE_SIZE; i++) {
629 paging_x86_32_map(&init_ptable[X86_32_PTABLE_BASE(MON_URPC_BASE) + i],
630 urpc_ptr + i * BASE_PAGE_SIZE,
631 INIT_PAGE_BITMAP | paging_elf_to_page_flags(PF_R | PF_W));
634 // elf load the domain
635 genvaddr_t entry_point;
636 err = elf_load(EM_386, startup_alloc_init, &spawn_state,
637 local_phys_to_mem(core_data->monitor_binary),
638 core_data->monitor_binary_size, &entry_point);
639 if (err_is_fail(err)) {
640 //err_print_calltrace(err);
641 panic("ELF load of init module failed!");
644 struct dispatcher_shared_x86_32 *init_disp_x86_32 =
645 get_dispatcher_shared_x86_32(init_dcb->disp);
646 init_disp_x86_32->disabled_save_area.eip = entry_point;