2 * Copyright (c) 2016, 2017, ETH Zurich.
3 * Copyright (c) 2016, Hewlett Packard Enterprise Development LP.
6 * This file is distributed under the terms in the attached LICENSE file.
7 * If you do not find this file, copies can be found by writing to:
8 * ETH Zurich D-INFK, Universitaetstrasse 6, CH-8092 Zurich. Attn: Systems Group.
11 /* CPU driver VM initialisation.
13 This is the entry point on booting the first core, and needs to deal with
14 the state left by UEFI. The CPU is mostly configured, in particular
15 translation is enabled, and all RAM is mapped 1-1. We'll also be in either
16 EL3 or EL2. We need to map the EL1 kernel window (TTBR1), drop to EL1, and
17 jump to the next routine, which has already been relocated for us.
23 #include <barrelfish_kpi/types.h>
27 #include <dev/armv8_dev.h>
29 #include <multiboot2.h>
30 #include <barrelfish_kpi/arm_core_data.h>
32 void eret(uint64_t a0, uint64_t a1, uint64_t a2, uint64_t a3);
34 void boot_bsp_init(uint32_t magic, lpaddr_t pointer)
35 __attribute__((noreturn));
36 void boot_app_init(lpaddr_t pointer)
37 __attribute__((noreturn));
39 /* low level debugging facilities */
42 #include <dev/pl011_uart_dev.h>
44 #define CN88XX_MAP_UART0_OFFSET 0x87E024000000UL
46 static pl011_uart_t uart;
48 static void debug_uart_initialize(void) {
49 pl011_uart_initialize(&uart, (mackerel_addr_t) CN88XX_MAP_UART0_OFFSET);
52 static void debug_serial_putc(char c)
54 while(pl011_uart_FR_txff_rdf(&uart) == 1) ;
55 pl011_uart_DR_rawwr(&uart, c);
58 #include <dev/apm88xxxx/apm88xxxx_pc16550_dev.h>
60 #define CN88XX_MAP_UART0_OFFSET 0x87E024000000UL
62 apm88xxxx_pc16550_t uart;
64 static void debug_uart_initialize(void) {
65 apm88xxxx_pc16550_initialize(&uart, (mackerel_addr_t)0x1C020000);
68 static void debug_serial_putc(char c)
70 // Wait until FIFO can hold more characters
71 while(!apm88xxxx_pc16550_LSR_thre_rdf(&uart));
73 apm88xxxx_pc16550_THR_thr_wrf(&uart, c);
76 #include <dev/pl011_uart_dev.h>
78 #define QEMU_MAP_UART0_OFFSET 0x9000000UL
80 static pl011_uart_t uart;
82 static void debug_uart_initialize(void) {
83 pl011_uart_initialize(&uart, (mackerel_addr_t) QEMU_MAP_UART0_OFFSET);
86 static void debug_serial_putc(char c)
88 while(pl011_uart_FR_txff_rdf(&uart) == 1) ;
89 pl011_uart_DR_rawwr(&uart, c);
92 #include <dev/lpuart_dev.h>
94 #define IMX8X8_MAP_UART0_OFFSET 0x5A090000UL
98 static void debug_uart_initialize(void) {
99 lpuart_initialize(&uart, (mackerel_addr_t) IMX8X8_MAP_UART0_OFFSET);
102 static void debug_serial_putc(char c)
104 while(lpuart_stat_tdre_rdf(&uart) == 0);
105 lpuart_data_buf_wrf(&uart, c);
109 static void debug_serial_putchar(char c) {
111 debug_serial_putc('\r');
113 debug_serial_putc(c);
117 static void debug_print_string(char *str)
119 while (str && *str) {
120 debug_serial_putchar(*str);
126 * \brief Very basic hex print support
128 static inline void debug_print_hex(uint64_t num) {
129 static char chars[] = {
149 for (int i = 0; i < 16; i++) {
150 int d = (num >> 4*i) & 0xf;
151 buf[15-i] = chars[d];
154 debug_print_string(buf);
157 #define debug_print_string(x)
158 #define debug_uart_initialize()
159 #define debug_print_hex(x)
163 void (*cpu_driver_entry)(lvaddr_t pointer);
165 static void configure_tcr(void) {
166 armv8_TCR_EL1_t tcr_el1 = armv8_TCR_EL1_rd(NULL);
167 // disable top byte ignored, EL1
168 tcr_el1 = armv8_TCR_EL1_TBI1_insert(tcr_el1, 0);
169 // disable top byte ignored, EL0
170 tcr_el1 = armv8_TCR_EL1_TBI0_insert(tcr_el1, 0);
172 tcr_el1 = armv8_TCR_EL1_IPS_insert(tcr_el1, 5);
174 tcr_el1 = armv8_TCR_EL1_TG1_insert(tcr_el1, armv8_KB_4);
175 // Walks inner shareable
176 tcr_el1 = armv8_TCR_EL1_SH1_insert(tcr_el1, armv8_inner_shareable);
178 tcr_el1 = armv8_TCR_EL1_ORGN1_insert(tcr_el1, armv8_WbRaWa_cache);
180 tcr_el1 = armv8_TCR_EL1_IRGN1_insert(tcr_el1, armv8_WbRaWa_cache);
181 // enable EL1 translation
182 tcr_el1 = armv8_TCR_EL1_EPD1_insert(tcr_el1, 0);
184 tcr_el1 = armv8_TCR_EL1_T1SZ_insert(tcr_el1, 16);
186 tcr_el1 = armv8_TCR_EL1_TG0_insert(tcr_el1, armv8_KB_4);
187 // Walks inner shareable
188 tcr_el1 = armv8_TCR_EL1_SH0_insert(tcr_el1, armv8_inner_shareable);
190 tcr_el1 = armv8_TCR_EL1_ORGN0_insert(tcr_el1, armv8_WbRaWa_cache);
192 tcr_el1 = armv8_TCR_EL1_IRGN0_insert(tcr_el1, armv8_WbRaWa_cache);
193 // enable EL0 translation
194 tcr_el1 = armv8_TCR_EL1_EPD0_insert(tcr_el1, 0);
196 tcr_el1 = armv8_TCR_EL1_T0SZ_insert(tcr_el1, 16);
197 armv8_TCR_EL1_wr(NULL, tcr_el1);
201 #define DAIF_FIQ_BIT (1 << 0)
202 #define DAIF_IRQ_BIT (1 << 1)
205 static void armv8_disable_interrupts(void)
207 __asm volatile("msr DAIFSet, #3\n");
210 static void armv8_set_tcr(uint8_t el)
214 //sysreg_write_ttbr0_el2(addr);
217 armv8_TCR_EL2_t reg = 0;
218 reg = armv8_TCR_EL2_PS_insert(reg, 5);
219 reg = armv8_TCR_EL2_T0SZ_insert(reg, (64 - 48));
220 armv8_TCR_EL2_wr(NULL, reg);
225 armv8_TCR_EL1_t reg = 0;
226 // TODO: figure out what to set reg = armv8_TCR_EL1_PS_insert(reg, 5);
227 reg = armv8_TCR_EL1_T0SZ_insert(reg, (64 - 48));
228 armv8_TCR_EL1_wr(NULL, reg);
232 assert("should not happen");
237 static void armv8_set_ttbr0(uint8_t el, lpaddr_t addr)
241 //sysreg_write_ttbr0_el2(addr);
243 armv8_TTBR0_EL2_wr(NULL, addr);
244 armv8_TTBR0_EL1_wr(NULL, addr);
247 armv8_TTBR0_EL1_wr(NULL, addr);
250 assert("should not happen");
253 __asm volatile("isb");
256 static void armv8_enable_mmu(uint8_t el)
260 armv8_SCTLR_EL3_M_wrf(NULL, 0x1);
261 __asm volatile("tlbi alle3\n isb");
264 armv8_SCTLR_EL2_M_wrf(NULL, 0x1);
265 __asm volatile("tlbi alle2\n isb");
268 armv8_SCTLR_EL1_M_wrf(NULL, 0x1);
269 __asm volatile("tlbi vmalle1\n isb");
272 assert("should not happen");
275 __asm volatile("dsb sy\n isb");
278 static void armv8_invalidate_tlb(uint8_t el)
282 armv8_SCTLR_EL3_M_wrf(NULL, 0x1);
283 __asm volatile("tlbi alle3");
286 armv8_SCTLR_EL2_M_wrf(NULL, 0x1);
287 __asm volatile("tlbi alle2");
290 armv8_SCTLR_EL1_M_wrf(NULL, 0x1);
291 __asm volatile("tlbi vmalle1");
294 assert("should not happen");
297 __asm volatile("dsb sy\n isb");
300 static void armv8_invalidate_icache(void)
309 static void armv8_instruction_synchronization_barrier(void)
311 __asm volatile("isb");
314 static void configure_spsr(uint8_t el) {
315 armv8_SPSR_EL2_t spsr = 0;
316 /* mask the exceptions */
317 spsr = armv8_SPSR_EL2_D_insert(spsr, 1);
318 spsr = armv8_SPSR_EL2_A_insert(spsr, 1);
319 spsr = armv8_SPSR_EL2_I_insert(spsr, 1);
320 spsr = armv8_SPSR_EL2_F_insert(spsr, 1);
322 /* set el1 and use the SP_ELx stack */
323 spsr = armv8_SPSR_EL2_M_lo_insert(spsr, (1<<2) | 1);
327 armv8_SPSR_EL3_wr(NULL, spsr);
330 armv8_SPSR_EL2_wr(NULL, spsr);
333 armv8_SPSR_EL1_wr(NULL, spsr);
340 static void configure_ttbr1(lpaddr_t addr)
342 armv8_TTBR1_EL1_rawwr(NULL, addr);
345 static void configure_mair(void)
347 /* Set memory type 0, for kernel use. */
348 // attr0 = Normal Memory, Inner Write-back non transient
349 // attr1 = Device-nGnRnE memory
350 armv8_MAIR_EL1_wr(NULL, 0x00ff);
353 static void configure_sctlr(void)
354 /* Enable EL0/1 translation. */
357 armv8_SCTLR_EL1_t val = 0;
359 /* Traps EL0 execution of cache maintenance instructions to EL1 */
360 val = armv8_SCTLR_EL1_UCI_insert(val, 0x1);
362 /* write permissions implies execute never */
363 //val = armv8_SCTLR_EL1_WXN_insert(val, 0x1);
365 /* don't trap WFI/WFE instructions to EL1 */
366 val = armv8_SCTLR_EL1_nTWE_insert(val, 0x1);
367 val = armv8_SCTLR_EL1_nTWI_insert(val, 0x1);
369 /* disable Traps EL0 accesses to the CTR_EL0 to EL1*/
370 val = armv8_SCTLR_EL1_UCT_insert(val, 0x1);
372 /* Allow EL0 to do DC ZVA */
373 val = armv8_SCTLR_EL1_DZE_insert(val, 0x1);
375 /* enable instruction cache */
376 val = armv8_SCTLR_EL1_I_insert(val, 0x1);
379 * EL0 execution of MRS , MSR(register) , or MSR(immediate) instructions
380 * that access the DAIF is not trapped to EL1.
382 //val = armv8_SCTLR_EL1_UMA_insert(val, 0x1);
385 * Enables accesses to the DMB, DSB, and ISB System
386 * instructions in the (coproc== 1111 ) encoding space from EL0
388 val = armv8_SCTLR_EL1_CP15BEN_insert(val, 0x1);
390 /* Enable SP alignment checks */
391 val = armv8_SCTLR_EL1_SA0_insert(val, 0x1);
392 val = armv8_SCTLR_EL1_SA_insert(val, 0x1);
394 /* enable data cachable */
395 val = armv8_SCTLR_EL1_C_insert(val, 0x1);
397 /* enable alignment checks */
398 val = armv8_SCTLR_EL1_A_insert(val, 0x1);
401 val = armv8_SCTLR_EL1_M_insert(val, 0x1);
403 armv8_SCTLR_EL1_wr(NULL, val);
406 static void configure_el3_traps(void)
409 /* If we've started in EL3, that most likely means we're in the
410 * simulator. We don't use it at all, so just disable all traps to
411 * EL3, and drop to non-secure EL2 (if it exists). */
413 armv8_SCR_EL3_t val = 0;
415 /* Don't trap secure timer access. */
416 val = armv8_SCR_EL3_ST_insert(val, 0x1);
418 /* Next EL is AArch64. */
419 val = armv8_SCR_EL3_RW_insert(val, 0x1);
421 /* HVC is enabled. */
422 val = armv8_SCR_EL3_HCE_insert(val, 0x1);
424 /* SMC is disabled. */
425 val = armv8_SCR_EL3_SMD_insert(val, 0x1);
427 /* External aborts don't trap to EL3. */
428 val = armv8_SCR_EL3_EA_insert(val, 0x1);
430 /* FIQs don't trap to EL3. */
431 val = armv8_SCR_EL3_FIQ_insert(val, 0x1);
433 /* IRQs don't trap to EL3. */
434 val = armv8_SCR_EL3_IRQ_insert(val, 0x1);
436 /* EL0 and EL1 are non-secure. */
437 val = armv8_SCR_EL3_NS_insert(val, 0x1);
439 armv8_SCR_EL3_wr(NULL, val);
441 /* We don't need to set SCTLR_EL3, as we're not using it. */
443 armv8_MDCR_EL3_t mdcr = 0;
444 /* Allow event counting in secure state. */
445 armv8_MDCR_EL3_SPME_insert(mdcr, 0x1);
446 armv8_MDCR_EL3_wr(NULL, mdcr);
449 static void configure_el2_traps(void)
451 /* check if EL2 is implemented */
452 if (armv8_ID_AA64PFR0_EL1_EL2_rdf(NULL) == armv8_ID_EL_NOT_IMPLEMENTED) {
456 /* configure EL2 traps & mmu */
458 armv8_HCR_EL2_t val = 0;
460 /* For the Non-secure EL1&0 translation regime, for permitted accesses to a
461 * memory location that use a common definition of the Shareability and
462 * Cacheability of the location, there might be a loss of coherency if the
463 * Inner Cacheability attribute for those accesses differs from the Outer
464 * Cacheability attribute.*/
465 val = armv8_HCR_EL2_MIOCNCE_insert(val, 1);
467 /* Set the mode to be AARCH64 */
468 val = armv8_HCR_EL2_RW_insert(val, 1);
470 /* HVC instructions are UNDEFINED at EL2 and Non-secure EL1. Any resulting
471 * exception is taken to the Exception level at which the HVC instruction
474 * XXX: this will disable Hypervisor calls entirely, revisit for ARRAKIS
476 val = armv8_HCR_EL2_HCD_insert(val, 1);
478 armv8_HCR_EL2_wr(NULL, val);
481 /* disable traps to EL2 for timer accesses */
483 armv8_CNTHCTL_EL2_t cnthctl;
484 cnthctl = armv8_CNTHCTL_EL2_rd(NULL);
485 cnthctl = armv8_CNTHCTL_EL2_EL1PCEN_insert(cnthctl, 0x1);
486 cnthctl = armv8_CNTHCTL_EL2_EL1PCTEN_insert(cnthctl, 0x1);
487 armv8_CNTHCTL_EL2_wr(NULL, cnthctl);
490 static void configure_el1_traps(void)
492 /* disable traps for FP/SIMD access */
493 armv8_CPACR_EL1_FPEN_wrf(NULL, armv8_fpen_trap_none);
496 static void drop_to_el2(struct armv8_core_data *pointer)
498 /* write the stack pointer for EL1 */
499 armv8_SP_EL1_wr(NULL, pointer->cpu_driver_stack + KERNEL_OFFSET);
501 /* Set the jump target */
502 armv8_ELR_EL3_wr(NULL, (uint64_t)cpu_driver_entry);
504 /* call exception return */
505 eret((lpaddr_t)pointer + KERNEL_OFFSET, 0, 0, 0);
508 static void drop_to_el1(struct armv8_core_data *pointer)
510 /* write the stack pointer for EL1 */
511 armv8_SP_EL1_wr(NULL, pointer->cpu_driver_stack + KERNEL_OFFSET);
513 /* Set the jump target */
514 armv8_ELR_EL2_wr(NULL, (uint64_t)cpu_driver_entry);
516 /* call exception return */
517 eret((lpaddr_t)pointer + KERNEL_OFFSET, 0, 0, 0);
520 static void jump_to_cpudriver(struct armv8_core_data *pointer)
522 // We are in EL1, so call arch_init directly.
524 // Re-set the stack pointer
525 sysreg_write_sp(pointer->cpu_driver_stack + KERNEL_OFFSET);
526 cpu_driver_entry((lpaddr_t)pointer + KERNEL_OFFSET);
532 Execution is starting in LOW addresses
533 Pointers to stack and multiboot are LOW addresses
534 Single core running (not guaranteed to be core 0)
535 CPU is in highest implemented exception level
536 MMU enabled, 4k translation granule, 1:1 mapping of all RAM
538 Core caches (L1&L2) and TLB enabled
539 Non-architectural caches disabled (e.g. L3)
541 Generic timer initialized and enabled
543 ACPI tables available
544 Register x0 contains a pointer to ARMv8 core data
546 static void boot_generic_init(struct armv8_core_data *core_data) {
548 cpu_driver_entry = (void *)core_data->cpu_driver_entry;
550 uint8_t el = armv8_CurrentEL_EL_rdf(NULL);
552 /* Configure the EL1 translation regime. */
555 /* Configure the kernel page tables for EL1. */
556 configure_ttbr1(core_data->page_table_root);
558 /* configure memory attributes */
561 /* Enable EL0/1 translation. */
567 /* configure EL 1 traps*/
568 configure_el1_traps();
570 debug_print_string("Jumping to CPU driver\n");
574 configure_el3_traps();
575 configure_el2_traps();
576 drop_to_el2(core_data);
579 configure_el2_traps();
580 drop_to_el1(core_data);
583 jump_to_cpudriver(core_data);
591 * @brief initializes an application core
593 * @param state pointer to the armv8_core_data structure
595 * This function is intended to bring the core to the same state as if it
596 * has been booted by the UEFI boot loader.
598 void boot_app_init(lpaddr_t pointer)
600 debug_uart_initialize();
601 debug_print_string("APP BOOTING\n");
603 struct armv8_core_data *core_data = (struct armv8_core_data *)pointer;
605 uint8_t current_el = armv8_CurrentEL_EL_rdf(NULL);
607 if (current_el == 2) {
609 __asm volatile("MSR CPTR_EL2, %[zero]" : : [zero] "r" (zero));
612 // /* disable interrupts */
613 armv8_disable_interrupts();
615 /* set the ttbr0/1 */
616 armv8_set_ttbr0(current_el, core_data->page_table_root);
619 armv8_set_tcr(current_el);
622 armv8_enable_mmu(current_el);
625 armv8_invalidate_tlb(current_el);
627 /* invalidate icache */
628 armv8_invalidate_icache();
629 armv8_instruction_synchronization_barrier();
631 boot_generic_init(core_data);
634 __asm volatile("wfi \n");
640 Execution is starting in LOW addresses
641 Pointers to stack and multiboot are LOW addresses
642 Single core running (not guaranteed to be core 0)
643 CPU is in highest implemented exception level
644 MMU enabled, 4k translation granule, 1:1 mapping of all RAM
646 Core caches (L1&L2) and TLB enabled
647 Non-architectural caches disabled (e.g. L3)
649 Generic timer initialized and enabled
651 ACPI tables available
652 Register x0 contains the multiboot magic value
653 Register x1 contains a pointer to ARMv8 core data
656 boot_bsp_init(uint32_t magic, lpaddr_t pointer) {
658 debug_uart_initialize();
659 debug_print_string("BSP BOOTING\n");
660 debug_print_string("Magic: ");
661 debug_print_hex(magic);
662 debug_print_string(", Pointer: ");
663 debug_print_hex(pointer);
664 debug_print_string("\n");
666 /* Boot magic must be set */
667 if (magic != MULTIBOOT2_BOOTLOADER_MAGIC) {
668 debug_print_string("Invalid bootloader magic\n");
672 struct armv8_core_data *core_data = (struct armv8_core_data *)pointer;
674 debug_print_string("CPU driver entry: ");
675 debug_print_hex(core_data->cpu_driver_entry);
676 debug_print_string("\n");
678 /* disable interrupts */
679 armv8_disable_interrupts();
681 boot_generic_init(core_data);
685 __asm volatile("wfi \n");