3 * \brief Bootstrap the kernel.
6 * Copyright (c) 2009 ETH Zurich.
9 * This file is distributed under the terms in the attached LICENSE file.
10 * If you do not find this file, copies can be found by writing to:
11 * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
15 #define __ASSEMBLER__ 1
18 #include <barrelfish_kpi/flags_arch.h> // ARM_MODE_MASK
19 #include <offsets.h> // BOOT_STACK_PHYS
24 .globl start, halt, got_base
26 // Used to track phys memory allocator limit globally.
30 // Entry constraints same as ARM Linux so QEMU can boot
31 // this file directly and to make it easy to use existing
32 // loaders on real h/w.
34 // The build process has built an ELF kernel image. The first
35 // word is overwritten to branch to entry point, `start', in
36 // the text section of the unpacked ELF file. The start routine
37 // then loads the ELF image, initializes the page table, and
40 // NB Writing the branch instruction into the first word of the
41 // ELF header causes QEMU to treat the file as a Linux kernel
42 // image and it provides the ATAG_HEADER info.
47 // Caches in unknown state, but no lockdown
49 // CPU is in a priveledged mode.
52 // r1 contains board id
53 // r2 contains pointer to kernel args structure
54 // lr contains pointer to elf header + 4
56 sub lr, lr, #4 // lr = address of elf header
57 mov r4, lr // r4 = p_elf_header [KEEP]
59 mrs r3, cpsr // Ensure in SYS mode
60 bic r3, r3, #ARM_MODE_MASK
61 orr r3, r3, #ARM_MODE_SVC
64 mov sp, #BOOT_STACK_PHYS
67 /* Validate ELF header */
69 bl elf_header_is_valid
73 /* Compute size of elf file */
74 mov r0, r4 // r0 = p_elf_header
75 bl elf_get_file_size // r0 = elf_file_size
76 mov r5, r0 // r5 = elf_file_size [KEEP]
79 bl elf_get_expanded_limits
81 mov r6, r0 // r6 = kernel v_addr [KEEP]
82 mov r7, r1 // r7 = kernel v_limit [KEEP]
84 /* Want to map low 1MB section with 1:1 to kernel address space */
85 mov r2, #(512 * 1024 * 1024)
90 The ELF image is in RAM, copy it just above the
91 physical pages of the unpacked kernel (1:1 mapping
92 between kernel v_addr and p_addr).
95 ldr r3, =4095 // Page align kernel v_limit
97 orr r3, r3, r3, LSL #20
98 bic r0, r0, r3 // r0 = new ELF file physical address
99 mov r1, r4 // r1 = old ELF file physical address
100 mov r2, r5 // r2 = ELF size
101 sub r8, r0, r1 // r8 = delta (new, old) ELF physical address
104 add r4, r4, r8 // Update r4, p_elf_header
106 add pc, pc, r8 // Jump to next line, but in relocated ELF image.
112 // Zero physical region
113 sub r1, r7, r6 // r1 = expanded ELF size
115 bic r0, r6, r2 // r0 = physical address of expanded image
120 bic r2, r6, r2 // r2 = kernel p_addr
121 mov r1, r6 // r1 = kernel v_addr
122 mov r0, r4 // r0 = elf_header
125 add alloc_top, r4, r5 // alloc_top = end of ELF file.
127 mov r0, #16384 // allocate L1 page table (16K aligned to 16K).
130 mov r8, r0 // r8 = L1 page table [KEEP]
133 ldr r3, =0xfff00000 // Map kernel section to physical section
136 bl section_map // section_map (l1_addr, v_addr, p_addr)
138 ldr r3, =0xfff00000 // Map section containing program counter 1:1
144 ldr r3, =0xfff00000 // Map ATAG headers 1:1
153 mcr p15, 0, r8, c2, c0, 0 // Load TTBR with L1
154 ldr r0, =0x55555555 // Initial domain permissions - all client [checked]
155 mcr p15, 0, r0, c3, c0, 0
157 ldr lr, =$start_with_mmu_enabled // Address to continue at when paging is enabled
158 ldr r0, =KERNEL_OFFSET
159 add sp, sp, r0 // Prepare stack for relocation
161 // ldr r1, =0x1007 // Enable: D-Cache, I-Cache, Alignment, MMU
162 ldr r1, =0x1005 // Enable: D-Cache, I-Cache, MMU
163 mrc p15, 0, r0, c1, c0, 0
165 mcr p15, 0, r0, c1, c0, 0 // MMU is enabled
166 mov pc, lr // Flat fetched.
167 mov r0, r0 // Flat fetched.
169 // Up until this point PC is in ELF file.
170 start_with_mmu_enabled:
171 // MMU is enabled and PC is in the loaded ELF image.
173 mov r1, #0 // Unmap section with VA = 0
174 mov r0, r8 // r0 = page table address
178 mcr p15, 0, r1, c8, c7, 0 // Invalidate ID-TLB entry for section with VA = 0.
180 start_set_got_register:
181 ldr PIC_REGISTER, =got_base
184 start_set_init_arguments:
185 ldmfd sp!, {r0, r1} // r0 = board id
186 // r1 = paddr of kern args, already mapped 1:1 to vaddr
187 ldr r2, =KERNEL_OFFSET // Convert paddr's to vaddr's
188 add r3, alloc_top, r2 // r3 = alloc_top
189 add r2, r4, r2 // r2 = addr kernel ELF file
194 * bool elf_header_is_valid(struct Elf32_EHdr*)
196 * A cursory check of ELF header. nb first word is known to be invalid
199 ldr r1, [r0, #4]! // e_ident[4..7]
203 ldr r1, [r0, #4]! // e_ident[8..11]
207 ldr r1, [r0, #4]! // e_ident[12..15]
210 ldr r1, [r0, #4]! // (e_type, e_machine)
214 ldr r1, [r0, #4]! // e_version
222 * uint32_t elf_get_file_size(struct Elf32_EHdr*)
225 ldr r1, [r0, #32] // r1 = offset of sections
226 ldrh r2, [r0, #46] // r2 = e_shentsize
227 ldrh r3, [r0, #48] // r3 = e_shnum
233 * (vaddr_t, size_t) elf_get_expanded_limits(struct Elf32_EHdr*)
235 elf_get_expanded_limits:
236 stmfd sp!, {r4-r6, lr}
237 mov r5, #0 // r5 = max vaddr
238 sub r4, r5, #1 // r4 = min vaddr
239 ldr r1, [r0, #28] // r1 = e_phoff
240 ldrh r2, [r0, #42] // r2 = e_phentsize
241 ldrh r3, [r0, #44] // r3 = e_phnum
242 add r1, r1, r0 // r1 = start of prog headers
243 mul r0, r2, r3 // r0 = size of prog headers
244 add r3, r0, r1 // r3 = end of prog headers
247 ldr r6, [r1, #20] // r6 = memsz
249 beq loopinc // SKIP If memsz = 0
250 ldr r0, [r1, #8] // r0 = vaddr
252 movlo r4, r0 // r4 = min(r4, vaddr)
253 add r6, r0, r6 // r0 = vaddr + memsz
255 movhs r5, r6 // r5 = max(r5, vaddr + memsz)
256 ldr r0, [r1, #28] // r0 = alignment
264 ldmfd sp!, {r4-r6, pc}
267 * void elf_expand(Struct Elf32_EHdr*, vaddr_t kernel_v, paddr_t kernel_p)
270 stmfd sp!, {r4-r7, lr}
271 ldr r3, [r0, #28] // r3 = e_phoff
272 ldrh r4, [r0, #44] // r4 = e_phnum
273 add r3, r0, r3 // r3 = addr phdr[0]
277 ldr r5, [r3, #4] // r5 = p_offset
280 ldr r6, [r3, #8] // r6 = p_vaddr
283 stmfd sp!, {r0-r3} // Save scratch registers
284 mov r7, r1 // r7 = kernel_v
285 add r1, r0, r5 // r1 = ptr to segment in file
286 sub r0, r6, r7 // r0 = kernel_v_offset
287 add r0, r0, r2 // r0 = phys addr of segment
288 ldr r2, [r3, #16] // r2 = p_filesz
290 ldmfd sp!, {r0-r3} // Restore scratch registers
292 ldrh r5, [r0, #42] // r5 = e_phentsize
293 add r3, r3, r5 // r3 = addr phdr[next]
296 ldmfd sp!, {r4-r7, pc}
299 * void memzero(uintptr_t addr, size_t bytes)
300 * Assumes addr is 4-byte aligned and bytes is a multiple of 4.
316 stmgeia r0!, {r2, r3}
325 * void memcpy(uintptr_t dest, uintptr_t src, size_t bytes)
327 * Assumes addr is 4-byte aligned and bytes is a multiple of 4.
330 stmfd sp!, {r4, r5, lr}
342 ldmgeia r1!, {r4, r5}
344 stmgeia r0!, {r4, r5}
350 ldmfd sp!, {r4, r5, pc}
353 * Allocate Physical Memory.
355 * uintptr_t alloc_phys(size_t bytes, size_t align)
360 add r3, r3, r1 // Align start address
361 bic r3, r3, r1 // r3 = alloc address
362 add r0, r0, r3 // r0 = new alloc_top value
371 * void section_map(L1PageTable *p, vaddr_t v, paddr_t p)
374 lsr r1, r1, #20 // r1 is table offset
376 ldr r3, =0x41e // AP = 01, Domain = 0, CB, Section
377 orr r2, r3, r2, LSL #20 // r2 = Section Entry
378 str r2, [r0, r1, LSL #2] // table[v >> 20] = r2
382 * void section_unmap(L1PageTable *p, vaddr_t v)
385 mov r2, #0 // Invalid L1 PTE
386 str r2, [r0, r1, LSL #18]
390 * extern "C" void halt(void) __attribute__((noreturn))
398 .word // Initialized by linker