2 * Copyright (c) 2007-2016 ETH Zurich.
5 * This file is distributed under the terms in the attached LICENSE file.
6 * If you do not find this file, copies can be found by writing to:
7 * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
12 #endif // __ASSEMBLER__
14 #include <asmoffsets.h> // OFFSETOF etc.
15 #include <barrelfish_kpi/registers_arch.h> // CPSR_REG etc.
16 #include <barrelfish_kpi/flags_arch.h> // CPSR_IF_MASK etc.
17 #include <exceptions.h>
20 /*** Macros used in later routines. ***/
23 // Macro to initialize system mode stack.
24 // Assumes that the GOT pointer is set.
27 ldr sp, cpu_stack_got_offset
28 ldr sp, [PIC_REGISTER, sp]
29 add sp, sp, #KERNEL_STACK_SIZE
32 // Macro definition to get pointer to arch-specific dispatcher
34 // Logical equivalent of C function with same name
36 .macro get_dispatcher_shared_arm out tmp
38 ldr \out, dcb_current_idx
39 ldr \out, [\tmp, \out]
40 ldr \out, [\out] // out = dcb_current
41 ldr \out, [\out, #OFFSETOF_DCB_DISP] //now ptr to dispatcher_shared_arm
45 // Macro to determine if dispatcher is disabled.
47 // pc and disp arguments are unmodified.
48 // out contains result
50 .macro disp_is_disabled disp, pc, out
51 // disp->disabled || (disp->crit_pc_lo <= pc && pc < disp->crit_pc_hi)
52 ldrb \out, [\disp, #OFFSETOF_DISP_DISABLED]
54 bhs 0f // disp->disabled >= 0 | disabled
55 // disp->disabled = false
56 ldr \out, [\disp, #OFFSETOF_DISP_CRIT_PC_LOW]
59 bhi 0f // disp->crit_pc_low > pc | enabled
60 ldr \out, [\disp, #OFFSETOF_DISP_CRIT_PC_HIGH]
62 movhs \out, #0 // pc >= disp->crit_pc_high | enabled
63 movlo \out, #1 // pc < disp->crit_pc_high | disable
68 // Macro to spill registers
71 // - context is in scratch registers set {r0-r3}.
72 // - spsr is also in scratch register set.
73 // - stack holds spilled scratch registers.
74 // - lr contains pc for context
77 // - pops scratch registers off stack (sp -> sp + 16).
80 .macro save_context context, spsr_reg
82 .err "Invariant failure: CPSR offset != 0"
85 .err "Invariant failure: PC offset != 16"
87 str \spsr_reg, [\context, #(CPSR_REG * 4)]
88 str lr, [\context, #(PC_REG * 4)]
89 add \spsr_reg, \context, #(LR_REG * 4)
90 stmda \spsr_reg, {r4-r14}^
91 add \spsr_reg, \context, #(R3_REG * 4)
92 pop {r4-r7} // Pop spilled scratch registers
93 stmda \spsr_reg!, {r4-r7} // And Save them
97 // Macro to initialize SVC pic register
100 // Read the PL1 thread ID register, where we stored the GOT address on
102 mrc p15, 0, \reg, c13, c0, 4
106 // Macro to enter SYS mode with interrupts disabled.
107 // Set up stack and GOT pointer.
109 .macro enter_sys scratch
111 mov \scratch, #(CPSR_IF_MASK | ARM_MODE_SYS)
113 load_got PIC_REGISTER
117 /*** From here, this is one contiguous block of code. ***/
120 /* The vector table and handler stubs are linked together, at a 4k-aligned
121 * address, so that they can be remapped to the high vector address. */
122 .section .text.vectors
124 .globl exception_vectors
126 /*** The exception vector table. ***/
128 /* This needs to be at the beginning of a 4k frame, that we'll map to the high
129 * vectors address. It also needs to long jump, as it will be jumping down
130 * into the regular kernel window. As there's no room to load the GOT in the
131 * vector table itself, the handler stubs are linked in the same 4k frame, so
132 * that we can reach them with a short jump. They then load the GOT base, and
133 * long jump into the C handlers. */
139 /* Undefined instruction */
147 /* Hypervisor trap */
154 /*** The exception handlers. ***/
158 /* Different instances of the CPU driver will have their own stacks. On the
159 * BSP core, this is initialised in bsp_start, to the bsp kernel stack
160 * allocated alongside the first CPU driver image. */
161 .type cpu_stack_got_offset, STT_OBJECT
162 cpu_stack_got_offset:
163 .word kernel_stack(GOT)
166 /* The GOT offset of dcb_current. */
167 .type dcb_current_idx, STT_OBJECT
169 .word dcb_current(GOT)
171 /* The vector table above uses short jumps to reach these, so they must also
172 * fit inside the 4kB high vectors page at 0xfffff000. */
174 /* These are the GOT offsets of the C handler functions, to which we've now
175 * got to long jump. */
177 .type got_fatal_kernel, STT_OBJECT
179 .word fatal_kernel_fault(GOT)
181 .type got_user_undef, STT_OBJECT
183 .word handle_user_undef(GOT)
185 .type got_sys_syscall, STT_OBJECT
187 .word sys_syscall(GOT)
189 .type got_syscall_kernel, STT_OBJECT
191 .word sys_syscall_kernel(GOT)
193 .type got_page_fault, STT_OBJECT
195 .word handle_user_page_fault(GOT)
197 .type got_handle_irq, STT_OBJECT
199 .word handle_irq(GOT)
201 .type got_kernel_irq, STT_OBJECT
203 .word handle_irq_kernel(GOT)
205 .type got_handle_fiq, STT_OBJECT
207 .word handle_fiq(GOT)
209 .type got_kernel_fiq, STT_OBJECT
211 .word handle_fiq_kernel(GOT)
214 // void undef_handler(void)
216 // Entered in UNDEF mode, IRQ disabled, ARM state.
218 // NB Identical to PABT except for final jump in undef_user and
219 // code doesn't adjust lr to point to faulting instruction since
220 // it was undefined and there's no point re-executing it.
223 stmfd sp!, {r0-r3} // Save for scratch use
224 mrs r3, spsr // r3 = spsr until save_context
225 ands r1, r3, #ARM_MODE_PRIV
228 get_dispatcher_shared_arm r2 r0
229 sub r0, lr, #4 // r0 = faulting pc
230 disp_is_disabled r2, r0, r1 // r1 = 1 if disabled, else 0
232 addeq r1, r2, #OFFSETOF_DISP_ENABLED_AREA
233 addne r1, r2, #OFFSETOF_DISP_TRAP_AREA
234 save_context r1, r3 // r1 = save area
236 ldr r3, got_user_undef
237 ldr pc, [PIC_REGISTER, r3] // f(fault_addr, save_area)
239 sub r2, sp, #(NUM_REGS * 4) // Save to stack
240 save_context r2, r3 // r2 = saved context
241 sub r1, lr, #4 // r1 = fault address
242 mov r0, #ARM_EVECTOR_UNDEF
244 ldr r3, got_fatal_kernel
245 ldr pc, [PIC_REGISTER, r3] // f(evector, addr, save_area)
248 // void swi_handler(void)
250 // Entered in SVC mode, IRQ disabled, ARM state.
252 // r0 = encoded syscall ordinal
258 // For now the system saves the caller's context here, because
259 // some fraction of system calls do not return directly.
263 .error "Syscall entry broken. Expected ordinal reg to be r0."
266 // Are we in kernel mode or not?
267 stmfd sp!, {r0-r3} // Save for scratch use
268 mrs r3, spsr // r3 = spsr until save_context
269 ands r1, r3, #ARM_MODE_PRIV
272 // System call from user space. Save state.
273 get_dispatcher_shared_arm r2 r0
274 disp_is_disabled r2, lr, r1 // r1 = 1 if disabled, else 0
276 addeq r0, r2, #OFFSETOF_DISP_ENABLED_AREA
277 addne r0, r2, #OFFSETOF_DISP_DISABLED_AREA
278 save_context r0, r3 // r0 = save area, r3 = scratch
280 // Removing these two instructions: they don't do anything
282 // ldr r11, [r0, #48] // context->fp
283 // ldr lr, [r0, #60] // context->lr
284 // Now we call sys_syscall:
285 // __attribute__((noreturn))
286 // void sys_syscall(arch_registers_state_t* context,
287 // uint32_t disabled,
288 // struct dispatcher_shared_generic *disp);
289 // r0 = address of area context was saved to
290 // r1 = 0 if not disabled, != 0 if disabled
291 // r2 = kernel address of dispatcher
292 // r3 = scratch value
293 ldr r3, got_sys_syscall
294 ldr pc, [PIC_REGISTER, r3]
296 ldr r3, got_syscall_kernel
297 ldr pc, [PIC_REGISTER, r3]
300 // void pabt_handler(void)
302 // Entered in ABT mode, IRQ disabled, ARM state.
305 stmfd sp!, {r0-r3} // Save for scratch use
306 sub lr, lr, #4 // lr = faulting pc
307 mrs r3, spsr // r3 = spsr until save_context
308 ands r1, r3, #ARM_MODE_PRIV
311 get_dispatcher_shared_arm r2 r0
312 mov r0, lr // r0 = faulting pc
313 disp_is_disabled r2, r0, r1 // r1 = 1 if disabled, else 0
315 addeq r1, r2, #OFFSETOF_DISP_ENABLED_AREA
316 addne r1, r2, #OFFSETOF_DISP_TRAP_AREA
317 save_context r1, r3 // r1 = save area
319 ldr r3, got_page_fault
320 ldr pc, [PIC_REGISTER, r3] // f(fault_addr, save_area)
322 // {r0-r3} spilled to stack
323 sub r2, sp, #(NUM_REGS * 4) // Reserve stack space for save
324 save_context r2, r3 // r2 = save_area
325 mov r1, lr // r1 = faulting pc
326 mov r0, #ARM_EVECTOR_PABT
328 ldr r3, got_fatal_kernel
329 ldr pc, [PIC_REGISTER, r3] // f(evector, addr, save_area)
332 // void dabt_handler(void)
334 // Entered in ABT mode, IRQ disabled, ARM state.
337 stmfd sp!, {r0-r3} // Save for scratch use
338 sub lr, lr, #8 // lr = faulting instruction
339 mrs r3, spsr // r3 = spsr until save_context
340 ands r1, r3, #ARM_MODE_PRIV
343 get_dispatcher_shared_arm r2 r0
344 mov r0, lr // r0 = faulting pc
345 disp_is_disabled r2, r0, r1 // r1 = disp_is_disabled
347 addeq r1, r2, #OFFSETOF_DISP_ENABLED_AREA
348 addne r1, r2, #OFFSETOF_DISP_TRAP_AREA
349 save_context r1, r3 // r1 = save_area
350 mrc p15, 0, r0, c6, c0, 0 // r0 = fault address
352 ldr r3, got_page_fault
353 ldr pc, [PIC_REGISTER, r3] // f(fault_addr, save_area)
355 // {r0-r3} spilled to stack
356 sub r2, sp, #(NUM_REGS * 4) // Reserve stack space for save
357 save_context r2, r3 // r2 = save_area
358 mrc p15, 0, r1, c6, c0, 0 // r1 = fault address
359 mov r0, #ARM_EVECTOR_DABT
361 ldr r3, got_fatal_kernel
362 ldr pc, [PIC_REGISTER, r3] // f(evector, addr, save_area)
365 // void irq_handler(void)
367 // Entered in IRQ mode, IRQ disabled, ARM state
370 stmfd sp!, {r0-r3} // Save for scratch use
371 sub lr, lr, #4 // lr = return address
372 mrs r3, spsr // r3 = spsr until save_context
373 ands r1, r3, #ARM_MODE_PRIV
376 get_dispatcher_shared_arm r2 r1 // r2 = cur_dcb->disp
377 mov r1, lr // r1 = return address
378 disp_is_disabled r2, r1, r0 // r0 = 1 if disabled, else 0
380 addeq r0, r2, #OFFSETOF_DISP_ENABLED_AREA
381 addne r0, r2, #OFFSETOF_DISP_DISABLED_AREA
382 save_context r0, r3 // r0 = save area
384 // Call: void handle_irq(arch_registers_state_t* save_area,
385 // uintptr_t fault_pc,
386 // struct dispatcher_shared_generic *disp)
387 // __attribute__((noreturn));
388 ldr r3, got_handle_irq
389 ldr pc, [PIC_REGISTER, r3] // f(save_area, fault_pc)
391 // IRQs in the kernel only occur in the wfi loop, and we don't really care
392 // about the register context.
394 add sp, sp, #16 // Discard scratch registers
396 // Call: void handle_irq_kernel(arch_registers_state_t* NULL,
397 // uintptr_t fault_pc,
398 // struct dispatcher_shared_generic *disp)
399 // __attribute__((noreturn));
400 ldr r3, got_kernel_irq
401 ldr pc, [PIC_REGISTER, r3] // f(save_area, fault_pc)
404 // void fiq_handler(void)
406 // Entered in FIQ mode, IRQ disabled, ARM state
409 stmfd sp!, {r0-r3} // Save for scratch use
410 sub lr, lr, #4 // lr = return address
411 mrs r3, spsr // r0 = spsr until save_context
412 ands r1, r3, #ARM_MODE_PRIV
415 get_dispatcher_shared_arm r2 r1
417 disp_is_disabled r2, r1, r0 // r0 = 1 if disabled, else 0
419 addeq r0, r2, #OFFSETOF_DISP_ENABLED_AREA
420 addne r0, r2, #OFFSETOF_DISP_DISABLED_AREA
421 save_context r0, r3 // r0 = save area
425 // Call: void handle_fiq(arch_registers_state_t* save_area,
426 // uintptr_t fault_pc,
427 // struct dispatcher_shared_generic *disp)
428 // __attribute__((noreturn));
429 ldr r3, got_handle_fiq
430 ldr pc, [PIC_REGISTER, r3] // f(save_area, fault_pc)
432 // CPU was in System mode.
434 add sp, sp, #16 // Discard scratch registers
437 // Call: void handle_fiq_kernel(arch_registers_state_t* save_area,
438 // uintptr_t fault_pc)
439 // __attribute__((noreturn));
440 ldr r3, got_kernel_fiq
441 ldr pc, [PIC_REGISTER, r3] // f(save_area, fault_pc)
446 // There is no SPSR in system mode, so switch to supervisor.
447 msr CPSR_c, #(CPSR_IF_MASK | ARM_MODE_SVC)
448 // Load cpsr into LR and move regs to next entry (postindex op)
449 // LR = r14, used as scratch register.
450 // LDR = read word from memory
452 // / use register containing "regs" as base register
453 // / / post index: only base register is used for
454 // / / / addressing and the offset added afterwards
456 // set SPSR to value of lr == regs.cpsr
458 // bits indicating SPSR
459 // / read from register lr
462 // Restore register r0 to r15, "^" means: cpsr := spsr
463 // Restore the non-banked registers. Use LR as the index.
465 // will increment the base pointer
468 // Restore the user stack pointer and link register. n.b. LR is
469 // banked in SVC mode, so *our* LR isn't affected. Also, this can't
470 // write back, so we've got to add the offset ourselves.
472 // Load the (banked SVC) LR with the return address (add the offset
473 // that the last ldmia couldn't).
475 // Exception return - LR_svc -> PC_usr, SPSR_svc -> CPSR
478 /* Any load targets for the instructions above must be within the same 4k
479 * page, so we flush constants here to make sure. */