2 * Copyright (c) 2007-2016 ETH Zurich.
5 * This file is distributed under the terms in the attached LICENSE file.
6 * If you do not find this file, copies can be found by writing to:
7 * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
12 #endif // __ASSEMBLER__
14 #include <asmoffsets.h> // OFFSETOF etc.
15 #include <barrelfish_kpi/registers_arch.h> // CPSR_REG etc.
16 #include <barrelfish_kpi/flags_arch.h> // CPSR_IF_MASK etc.
17 #include <exceptions.h>
20 /*** Macros used in later routines. ***/
23 // Macro to initialize system mode stack.
24 // Assumes that the GOT pointer is set.
27 ldr sp, cpu_stack_got_offset
28 ldr sp, [PIC_REGISTER, sp]
29 add sp, sp, #KERNEL_STACK_SIZE
32 // Macro definition to get pointer to arch-specific dispatcher
34 // Logical equivalent of C function with same name
36 .macro get_dispatcher_shared_arm out tmp
38 ldr \out, dcb_current_idx
39 ldr \out, [\tmp, \out]
40 ldr \out, [\out] // out = dcb_current
41 ldr \out, [\out, #OFFSETOF_DCB_DISP] //now ptr to dispatcher_shared_arm
45 // Macro to determine if dispatcher is disabled.
47 // pc and disp arguments are unmodified.
48 // out contains result
50 .macro disp_is_disabled disp, pc, out
51 // disp->disabled || (disp->crit_pc_lo <= pc && pc < disp->crit_pc_hi)
52 ldrb \out, [\disp, #OFFSETOF_DISP_DISABLED]
54 bhs 0f // disp->disabled >= 0 | disabled
55 // disp->disabled = false
56 ldr \out, [\disp, #OFFSETOF_DISP_CRIT_PC_LOW]
59 bhi 0f // disp->crit_pc_low > pc | enabled
60 ldr \out, [\disp, #OFFSETOF_DISP_CRIT_PC_HIGH]
62 movhs \out, #0 // pc >= disp->crit_pc_high | enabled
63 movlo \out, #1 // pc < disp->crit_pc_high | disable
68 // Macro to spill registers
71 // - context is in scratch registers set {r0-r3}.
72 // - spsr is also in scratch register set.
73 // - stack holds spilled scratch registers.
74 // - lr contains pc for context
77 // - pops scratch registers off stack (sp -> sp + 16).
80 .macro save_context context, spsr_reg
82 .err "Invariant failure: CPSR offset != 0"
85 .err "Invariant failure: PC offset != 16"
87 str \spsr_reg, [\context, #(CPSR_REG * 4)]
88 str lr, [\context, #(PC_REG * 4)]
89 add \spsr_reg, \context, #(R4_REG * 4)
90 stmia \spsr_reg!, {r4-r14}^
92 vstmia \spsr_reg!, {d0-d15}
93 vstmia \spsr_reg!, {d16-d31}
94 add \spsr_reg, \context, #(R3_REG * 4)
95 pop {r4-r7} // Pop spilled scratch registers
96 stmda \spsr_reg!, {r4-r7} // And Save them
100 // Macro to initialize SVC pic register
103 // Read the PL1 thread ID register, where we stored the GOT address on
105 mrc p15, 0, \reg, c13, c0, 4
109 // Macro to enter SYS mode with interrupts disabled.
110 // Set up stack and GOT pointer.
112 .macro enter_sys scratch
114 mov \scratch, #(CPSR_IF_MASK | ARM_MODE_SYS)
116 load_got PIC_REGISTER
120 /*** From here, this is one contiguous block of code. ***/
123 /* The vector table and handler stubs are linked together, at a 4k-aligned
124 * address, so that they can be remapped to the high vector address. */
125 .section .text.vectors
127 .globl exception_vectors
129 /*** The exception vector table. ***/
131 /* This needs to be at the beginning of a 4k frame, that we'll map to the high
132 * vectors address. It also needs to long jump, as it will be jumping down
133 * into the regular kernel window. As there's no room to load the GOT in the
134 * vector table itself, the handler stubs are linked in the same 4k frame, so
135 * that we can reach them with a short jump. They then load the GOT base, and
136 * long jump into the C handlers. */
142 /* Undefined instruction */
150 /* Hypervisor trap */
157 /*** The exception handlers. ***/
161 /* Different instances of the CPU driver will have their own stacks. On the
162 * BSP core, this is initialised in bsp_start, to the bsp kernel stack
163 * allocated alongside the first CPU driver image. */
164 .type cpu_stack_got_offset, STT_OBJECT
165 cpu_stack_got_offset:
166 .word kernel_stack(GOT)
169 /* The GOT offset of dcb_current. */
170 .type dcb_current_idx, STT_OBJECT
172 .word dcb_current(GOT)
174 /* The vector table above uses short jumps to reach these, so they must also
175 * fit inside the 4kB high vectors page at 0xfffff000. */
177 /* These are the GOT offsets of the C handler functions, to which we've now
178 * got to long jump. */
180 .type got_fatal_kernel, STT_OBJECT
182 .word fatal_kernel_fault(GOT)
184 .type got_user_undef, STT_OBJECT
186 .word handle_user_undef(GOT)
188 .type got_sys_syscall, STT_OBJECT
190 .word sys_syscall(GOT)
192 .type got_syscall_kernel, STT_OBJECT
194 .word sys_syscall_kernel(GOT)
196 .type got_page_fault, STT_OBJECT
198 .word handle_user_page_fault(GOT)
200 .type got_handle_irq, STT_OBJECT
202 .word handle_irq(GOT)
204 .type got_kernel_irq, STT_OBJECT
206 .word handle_irq_kernel(GOT)
208 .type got_handle_fiq, STT_OBJECT
210 .word handle_fiq(GOT)
212 .type got_kernel_fiq, STT_OBJECT
214 .word handle_fiq_kernel(GOT)
217 // void undef_handler(void)
219 // Entered in UNDEF mode, IRQ disabled, ARM state.
221 // NB Identical to PABT except for final jump in undef_user and
222 // code doesn't adjust lr to point to faulting instruction since
223 // it was undefined and there's no point re-executing it.
226 stmfd sp!, {r0-r3} // Save for scratch use
227 mrs r3, spsr // r3 = spsr until save_context
228 ands r1, r3, #ARM_MODE_PRIV
231 get_dispatcher_shared_arm r2 r0
232 sub r0, lr, #4 // r0 = faulting pc
233 disp_is_disabled r2, r0, r1 // r1 = 1 if disabled, else 0
235 addeq r1, r2, #OFFSETOF_DISP_ENABLED_AREA
236 addne r1, r2, #OFFSETOF_DISP_TRAP_AREA
237 save_context r1, r3 // r1 = save area
239 ldr r3, got_user_undef
240 ldr pc, [PIC_REGISTER, r3] // f(fault_addr, save_area)
242 sub r2, sp, #(NUM_REGS * 4) // Save to stack
243 save_context r2, r3 // r2 = saved context
244 sub r1, lr, #4 // r1 = fault address
245 mov r0, #ARM_EVECTOR_UNDEF
247 ldr r3, got_fatal_kernel
248 ldr pc, [PIC_REGISTER, r3] // f(evector, addr, save_area)
251 // void swi_handler(void)
253 // Entered in SVC mode, IRQ disabled, ARM state.
255 // r0 = encoded syscall ordinal
261 // For now the system saves the caller's context here, because
262 // some fraction of system calls do not return directly.
266 .error "Syscall entry broken. Expected ordinal reg to be r0."
269 // Are we in kernel mode or not?
270 stmfd sp!, {r0-r3} // Save for scratch use
271 mrs r3, spsr // r3 = spsr until save_context
272 ands r1, r3, #ARM_MODE_PRIV
275 // System call from user space. Save state.
276 get_dispatcher_shared_arm r2 r0
277 disp_is_disabled r2, lr, r1 // r1 = 1 if disabled, else 0
279 addeq r0, r2, #OFFSETOF_DISP_ENABLED_AREA
280 addne r0, r2, #OFFSETOF_DISP_DISABLED_AREA
281 save_context r0, r3 // r0 = save area, r3 = scratch
283 // Removing these two instructions: they don't do anything
285 // ldr r11, [r0, #48] // context->fp
286 // ldr lr, [r0, #60] // context->lr
287 // Now we call sys_syscall:
288 // __attribute__((noreturn))
289 // void sys_syscall(arch_registers_state_t* context,
290 // uint32_t disabled,
291 // struct dispatcher_shared_generic *disp);
292 // r0 = address of area context was saved to
293 // r1 = 0 if not disabled, != 0 if disabled
294 // r2 = kernel address of dispatcher
295 // r3 = scratch value
296 ldr r3, got_sys_syscall
297 ldr pc, [PIC_REGISTER, r3]
299 ldr r3, got_syscall_kernel
300 ldr pc, [PIC_REGISTER, r3]
303 // void pabt_handler(void)
305 // Entered in ABT mode, IRQ disabled, ARM state.
308 stmfd sp!, {r0-r3} // Save for scratch use
309 sub lr, lr, #4 // lr = faulting pc
310 mrs r3, spsr // r3 = spsr until save_context
311 ands r1, r3, #ARM_MODE_PRIV
314 get_dispatcher_shared_arm r2 r0
315 mov r0, lr // r0 = faulting pc
316 disp_is_disabled r2, r0, r1 // r1 = 1 if disabled, else 0
318 addeq r1, r2, #OFFSETOF_DISP_ENABLED_AREA
319 addne r1, r2, #OFFSETOF_DISP_TRAP_AREA
320 save_context r1, r3 // r1 = save area
322 ldr r3, got_page_fault
323 ldr pc, [PIC_REGISTER, r3] // f(fault_addr, save_area)
325 // {r0-r3} spilled to stack
326 sub r2, sp, #(NUM_REGS * 4) // Reserve stack space for save
327 save_context r2, r3 // r2 = save_area
328 mov r1, lr // r1 = faulting pc
329 mov r0, #ARM_EVECTOR_PABT
331 ldr r3, got_fatal_kernel
332 ldr pc, [PIC_REGISTER, r3] // f(evector, addr, save_area)
335 // void dabt_handler(void)
337 // Entered in ABT mode, IRQ disabled, ARM state.
340 stmfd sp!, {r0-r3} // Save for scratch use
341 sub lr, lr, #8 // lr = faulting instruction
342 mrs r3, spsr // r3 = spsr until save_context
343 ands r1, r3, #ARM_MODE_PRIV
346 get_dispatcher_shared_arm r2 r0
347 mov r0, lr // r0 = faulting pc
348 disp_is_disabled r2, r0, r1 // r1 = disp_is_disabled
350 addeq r1, r2, #OFFSETOF_DISP_ENABLED_AREA
351 addne r1, r2, #OFFSETOF_DISP_TRAP_AREA
352 save_context r1, r3 // r1 = save_area
353 mrc p15, 0, r0, c6, c0, 0 // r0 = fault address
355 ldr r3, got_page_fault
356 ldr pc, [PIC_REGISTER, r3] // f(fault_addr, save_area)
358 // {r0-r3} spilled to stack
359 sub r2, sp, #(NUM_REGS * 4) // Reserve stack space for save
360 save_context r2, r3 // r2 = save_area
361 mrc p15, 0, r1, c6, c0, 0 // r1 = fault address
362 mov r0, #ARM_EVECTOR_DABT
364 ldr r3, got_fatal_kernel
365 ldr pc, [PIC_REGISTER, r3] // f(evector, addr, save_area)
368 // void irq_handler(void)
370 // Entered in IRQ mode, IRQ disabled, ARM state
373 stmfd sp!, {r0-r3} // Save for scratch use
374 sub lr, lr, #4 // lr = return address
375 mrs r3, spsr // r3 = spsr until save_context
376 ands r1, r3, #ARM_MODE_PRIV
379 get_dispatcher_shared_arm r2 r1 // r2 = cur_dcb->disp
380 mov r1, lr // r1 = return address
381 disp_is_disabled r2, r1, r0 // r0 = 1 if disabled, else 0
383 addeq r0, r2, #OFFSETOF_DISP_ENABLED_AREA
384 addne r0, r2, #OFFSETOF_DISP_DISABLED_AREA
385 save_context r0, r3 // r0 = save area
387 // Call: void handle_irq(arch_registers_state_t* save_area,
388 // uintptr_t fault_pc,
389 // struct dispatcher_shared_generic *disp)
390 // __attribute__((noreturn));
391 ldr r3, got_handle_irq
392 ldr pc, [PIC_REGISTER, r3] // f(save_area, fault_pc)
394 // IRQs in the kernel only occur in the wfi loop, and we don't really care
395 // about the register context.
397 add sp, sp, #16 // Discard scratch registers
399 // Call: void handle_irq_kernel(arch_registers_state_t* NULL,
400 // uintptr_t fault_pc,
401 // struct dispatcher_shared_generic *disp)
402 // __attribute__((noreturn));
403 ldr r3, got_kernel_irq
404 ldr pc, [PIC_REGISTER, r3] // f(save_area, fault_pc)
407 // void fiq_handler(void)
409 // Entered in FIQ mode, IRQ disabled, ARM state
412 stmfd sp!, {r0-r3} // Save for scratch use
413 sub lr, lr, #4 // lr = return address
414 mrs r3, spsr // r0 = spsr until save_context
415 ands r1, r3, #ARM_MODE_PRIV
418 get_dispatcher_shared_arm r2 r1
420 disp_is_disabled r2, r1, r0 // r0 = 1 if disabled, else 0
422 addeq r0, r2, #OFFSETOF_DISP_ENABLED_AREA
423 addne r0, r2, #OFFSETOF_DISP_DISABLED_AREA
424 save_context r0, r3 // r0 = save area
428 // Call: void handle_fiq(arch_registers_state_t* save_area,
429 // uintptr_t fault_pc,
430 // struct dispatcher_shared_generic *disp)
431 // __attribute__((noreturn));
432 ldr r3, got_handle_fiq
433 ldr pc, [PIC_REGISTER, r3] // f(save_area, fault_pc)
435 // CPU was in System mode.
437 add sp, sp, #16 // Discard scratch registers
440 // Call: void handle_fiq_kernel(arch_registers_state_t* save_area,
441 // uintptr_t fault_pc)
442 // __attribute__((noreturn));
443 ldr r3, got_kernel_fiq
444 ldr pc, [PIC_REGISTER, r3] // f(save_area, fault_pc)
449 // There is no SPSR in system mode, so switch to supervisor.
450 msr CPSR_c, #(CPSR_IF_MASK | ARM_MODE_SVC)
451 // Load cpsr into LR and move regs to next entry (postindex op)
452 // LR = r14, used as scratch register.
453 // LDR = read word from memory
455 // / use register containing "regs" as base register
456 // / / post index: only base register is used for
457 // / / / addressing and the offset added afterwards
459 // set SPSR to value of lr == regs.cpsr
461 // bits indicating SPSR
462 // / read from register lr
465 // Restore register r0 to r15, "^" means: cpsr := spsr
466 // Restore the non-banked registers. Use LR as the index.
468 // will increment the base pointer
471 // Restore the user stack pointer and link register. n.b. LR is
472 // banked in SVC mode, so *our* LR isn't affected. Also, this can't
473 // write back, so we've got to add the offset ourselves.
478 ldmia lr, {r13, r14}^
479 // Load the (banked SVC) LR with the return address (add the offset
480 // that the last ldmia couldn't).
482 // Exception return - LR_svc -> PC_usr, SPSR_svc -> CPSR
485 /* Any load targets for the instructions above must be within the same 4k
486 * page, so we flush constants here to make sure. */