armv8: Start with all interrupts disabled
[barrelfish] / kernel / arch / armv8 / exn.c
1 /*
2  * Copyright (c) 2009-2013 ETH Zurich.
3  * Copyright (c) 2015, Hewlett Packard Enterprise Development LP.
4  * All rights reserved.
5  *
6  * This file is distributed under the terms in the attached LICENSE file.
7  * If you do not find this file, copies can be found by writing to:
8  * ETH Zurich D-INFK, Universitaetstrasse 6, CH-8092 Zurich. Attn: Systems Group.
9  */
10
11 #include <kernel.h>
12 #include <dispatch.h>
13 #include <systime.h>
14 #include <arm_hal.h>
15 #include <sysreg.h>
16 #include <exceptions.h>
17 #include <exec.h>
18 #include <misc.h>
19 #include <stdio.h>
20 #include <wakeup.h>
21 #include <irq.h>
22 #include <arch/arm/arm.h>
23 #include <arch/arm/gic.h>
24 #include <arch/arm/platform.h>
25 #include <dev/armv8_dev.h>
26
27 void handle_user_page_fault(lvaddr_t                fault_address,
28                             arch_registers_state_t* save_area,
29                             union registers_aarch64 *resume_area)
30 {
31     lvaddr_t handler;
32     struct dispatcher_shared_aarch64 *disp =
33         get_dispatcher_shared_aarch64(dcb_current->disp);
34     uintptr_t saved_pc = save_area->named.pc;
35
36     disp->d.disabled = dispatcher_is_disabled_ip(dcb_current->disp, saved_pc);
37     bool disabled = (disp->d.disabled != 0);
38
39     assert(dcb_current->disp_cte.cap.type == ObjType_Frame);
40
41     printk(LOG_WARN, "user page fault%s in '%.*s': addr %"PRIxLVADDR
42                       " IP %"PRIxPTR"\n",
43            disabled ? " WHILE DISABLED" : "", DISP_NAME_LEN,
44            disp->d.name, fault_address, saved_pc);
45
46     if (disabled) {
47         assert(save_area == &disp->trap_save_area);
48         handler = disp->d.dispatcher_pagefault_disabled;
49         dcb_current->faults_taken++;
50     }
51     else {
52         assert(save_area == &disp->enabled_save_area);
53         handler = disp->d.dispatcher_pagefault;
54     }
55
56     if (dcb_current->faults_taken > 2) {
57         printk(LOG_WARN, "handle_user_page_fault: too many faults, "
58                "making domain unrunnable\n");
59         dcb_current->faults_taken = 0; // just in case it gets restarted
60         scheduler_remove(dcb_current);
61         dispatch(schedule());
62     }
63     else {
64         //
65         // Upcall to dispatcher
66         //
67         // NB System might be cleaner with a prototype
68         // dispatch context that has R0-R3 to be overwritten
69         // plus initial stack, thread, and gic registers. Could do
70         // a faster resume_for_upcall().
71         //
72
73         struct dispatcher_shared_generic *disp_gen =
74             get_dispatcher_shared_generic(dcb_current->disp);
75
76         /* XXX - This code leaks the contents of the kernel stack to the
77          * user-level fault handler. */
78
79         resume_area->named.x0   = disp_gen->udisp;
80         resume_area->named.x1   = fault_address;
81         resume_area->named.x2   = 0;
82         resume_area->named.x3   = saved_pc;
83         /* Why does the kernel do this? */
84         resume_area->named.x10  = disp->got_base;
85         resume_area->named.pc   = handler;
86         resume_area->named.spsr = CPSR_F_MASK | AARCH64_MODE_USR;
87
88         // SP is set by handler routine.
89
90         // Upcall user to save area
91         disp->d.disabled = true;
92                 printk(LOG_WARN, "page fault at %p calling handler %p\n",
93                fault_address, handler);
94     }
95 }
96
97 void handle_user_undef(lvaddr_t fault_address, enum aarch64_exception_class cause,
98                        arch_registers_state_t* save_area,
99                        union registers_aarch64 *resume_area)
100 {
101     struct dispatcher_shared_aarch64 *disp =
102         get_dispatcher_shared_aarch64(dcb_current->disp);
103
104     bool disabled =
105         dispatcher_is_disabled_ip(dcb_current->disp, save_area->named.pc);
106     disp->d.disabled = disabled;
107
108     assert(dcb_current->disp_cte.cap.type == ObjType_Frame);
109     if (disabled) {
110         //        assert(save_area == &disp->trap_save_area);
111     }
112     else {
113         assert(save_area == &disp->enabled_save_area);
114     }
115
116     printk(LOG_WARN, "user undef fault (0x%lx)%s in '%.*s': IP 0x%lx x29:%lx x30:%lx sp:%lx\n",
117            cause, disabled ? " WHILE DISABLED" : "", DISP_NAME_LEN,
118            disp->d.name, fault_address, save_area->named.x29, save_area->named.x30, save_area->named.stack);
119
120     struct dispatcher_shared_generic *disp_gen =
121         get_dispatcher_shared_generic(dcb_current->disp);
122
123     resume_area->named.x0   = disp_gen->udisp;
124     resume_area->named.x1   = AARCH64_EVECTOR_UNDEF;
125     resume_area->named.x2   = 0;
126     resume_area->named.x3   = fault_address;
127     /* Why does the kernel do this? */
128     resume_area->named.x10  = disp->got_base;
129     resume_area->named.pc   = disp->d.dispatcher_trap;
130     resume_area->named.spsr = CPSR_F_MASK | AARCH64_MODE_USR;
131
132     // Upcall user to save area
133     disp->d.disabled = true;
134 }
135
136 void handle_user_fault(lvaddr_t fault_address, uintptr_t cause,
137                        arch_registers_state_t* save_area)
138 {
139     union registers_aarch64 resume_area;
140
141     switch(cause) {
142         case aarch64_ec_unknown :
143         case aarch64_ec_wfi :
144         case aarch64_ec_mcr_cp15 :
145         case aarch64_ec_mcrr_cp15 :
146         case aarch64_ec_mcr_cp14 :
147         case aarch64_ec_ldc_cp14 :
148         case aarch64_ec_fpen :
149         case aarch64_ec_mcr_cp10 :
150         case aarch64_ec_mcrr_cp14 :
151         case aarch64_ec_il :
152             handle_user_undef(fault_address, cause, save_area, &resume_area);
153             break;
154         case aarch64_ec_svc_aa32 :
155         case aarch64_ec_hvc_aa32 :
156         case aarch64_ec_smc_aa32 :
157         case aarch64_ec_svc_aa64 :
158         case aarch64_ec_hvc_aa64 :
159         case aarch64_ec_smc_aa64 :
160             panic("syscall ended up in exception handler ? Yuck.");
161             break;
162         case aarch64_ec_mrs :
163         case aarch64_ec_impl :
164             handle_user_undef(fault_address, cause, save_area, &resume_area);
165             break;
166         case aarch64_ec_iabt_low  :
167             handle_user_page_fault(fault_address, save_area, &resume_area);
168             break;
169         case aarch64_ec_iabt_high :
170             panic("pagefault while in kernel? Yuck.");
171             break;
172         case aarch64_ec_pc_align :
173             handle_user_undef(fault_address, cause, save_area, &resume_area);
174             break;
175         case aarch64_ec_dabt_low :
176             handle_user_page_fault(fault_address, save_area, &resume_area);
177             break;
178         case aarch64_ec_dabt_high :
179             panic("pagefault while in kernel? Yuck.");
180             break;
181         case aarch64_ec_sp_align :
182         case aarch64_ec_fpu_aa32 :
183         case aarch64_ec_fpu_aa64 :
184         case aarch64_ec_serror :
185         case aarch64_ec_bkpt_low :
186         case aarch64_ec_bkpt_high :
187         case aarch64_ec_step_low :
188         case aarch64_ec_step_high :
189         case aarch64_ec_wpt_low :
190         case aarch64_ec_wpt_high :
191         case aarch64_ec_bkpt_soft :
192         case aarch64_ec_bkpt_el2 :
193         case aarch64_ec_brk :
194             handle_user_undef(fault_address, cause, save_area, &resume_area);
195             break;
196         default:
197             panic("Unknown exception syndrome: %u", cause);
198         break;
199     }
200
201     resume(&resume_area);
202 }
203
204 void handle_irq(arch_registers_state_t* save_area, uintptr_t fault_pc,
205                 uint64_t x0, uint64_t x1, uint64_t x2, uint64_t x3)
206 {
207     uint32_t irq = 0;
208
209     /* Save the FPU registers */
210     __asm volatile(
211         "   stp q0, q1, [%x0, #0]\n\t"
212         "   stp q2, q3, [%x0, #0x20]\n\t"
213         "   stp q4, q5, [%x0, #0x40]\n\t"
214         "   stp q6, q7, [%x0, #0x60]\n\t"
215         "   stp q8, q9, [%x0, #0x80]\n\t"
216         "   stp q10, q11, [%x0, #0xa0]\n\t"
217         "   stp q12, q13, [%x0, #0xc0]\n\t"
218         "   stp q14, q15, [%x0, #0xe0]\n\t"
219         "   stp q16, q17, [%x0, #0x100]\n\t"
220         "   stp q18, q19, [%x0, #0x120]\n\t"
221         "   stp q20, q21, [%x0, #0x140]\n\t"
222         "   stp q22, q23, [%x0, #0x160]\n\t"
223         "   stp q24, q25, [%x0, #0x180]\n\t"
224         "   stp q26, q27, [%x0, #0x1a0]\n\t"
225         "   stp q28, q29, [%x0, #0x1c0]\n\t"
226         "   stp q30, q31, [%x0, #0x1e0]\n\t"
227          :: "r" (&save_area->named.v));
228
229     /* The assembly stub leaves the first 4 registers, the stack pointer,
230      * the exception PC, and the SPSR for us to save, as it's run out of room for
231      * the necessary instructions. */
232     save_area->named.x0    = x0;
233     save_area->named.x1    = x1;
234     save_area->named.x2    = x2;
235     save_area->named.x3    = x3;
236     save_area->named.stack = armv8_SP_EL0_rd(NULL);
237     save_area->named.spsr  = armv8_SPSR_EL1_rd(NULL);
238     save_area->named.pc    = fault_pc;
239
240     irq = platform_get_active_irq();
241
242     debug(SUBSYS_DISPATCH, "IRQ %"PRIu32" while %s\n", irq,
243           dcb_current ? (dcb_current->disabled ? "disabled": "enabled") :
244                         "in kernel");
245
246     if (dcb_current != NULL) {
247         dispatcher_handle_t handle = dcb_current->disp;
248         if (save_area == dispatcher_get_disabled_save_area(handle)) {
249             assert(dispatcher_is_disabled_ip(handle, fault_pc));
250             dcb_current->disabled = true;
251         } else {
252 /*            debug(SUBSYS_DISPATCH,
253                   "save_area=%p, dispatcher_get_enabled_save_are(handle)=%p\n",
254                    save_area, dispatcher_get_enabled_save_area(handle));
255 */
256
257             assert(save_area == dispatcher_get_enabled_save_area(handle));
258             assert(!dispatcher_is_disabled_ip(handle, fault_pc));
259             dcb_current->disabled = false;
260         }
261     }
262     static int first_timer_interrupt_fired = 0;
263     // Offer it to the timer
264     if (platform_is_timer_interrupt(irq)) {
265         if(!first_timer_interrupt_fired){
266             printk(LOG_NOTE, "ARMv8-A: Timer interrupt received\n");
267             first_timer_interrupt_fired = 1;
268         }
269         platform_acknowledge_irq(irq);
270         wakeup_check(systime_now());
271 #ifndef CONFIG_ONESHOT_TIMER
272         // Set next trigger
273         systime_set_timer(kernel_timeslice);
274 #endif
275         dispatch(schedule());
276     } else {
277         printf("%s: %d\n", __func__, irq);
278         platform_acknowledge_irq(irq);
279         send_user_interrupt(irq);
280         panic("Unhandled IRQ %"PRIu32"\n", irq);
281     }
282
283 }
284
285 #define STACK_DUMP_LIMIT 32
286
287 /* For unhandled faults, we print a register dump and panic. */
288 void fatal_kernel_fault(lvaddr_t epc, uint64_t spsr, uint64_t esr,
289                         uint64_t vector, arch_registers_state_t* save_area)
290 {
291     size_t i;
292     enum aarch64_exception_class exception_class = FIELD(26,6,esr);
293     /* int instruction_length = FIELD(25,1,esr); */
294     int iss                = FIELD(0,25,esr);
295
296     /* Save the FPU registers */
297     __asm volatile(
298         "   stp q0, q1, [%x0, #0]\n\t"
299         "   stp q2, q3, [%x0, #0x20]\n\t"
300         "   stp q4, q5, [%x0, #0x40]\n\t"
301         "   stp q6, q7, [%x0, #0x60]\n\t"
302         "   stp q8, q9, [%x0, #0x80]\n\t"
303         "   stp q10, q11, [%x0, #0xa0]\n\t"
304         "   stp q12, q13, [%x0, #0xc0]\n\t"
305         "   stp q14, q15, [%x0, #0xe0]\n\t"
306         "   stp q16, q17, [%x0, #0x100]\n\t"
307         "   stp q18, q19, [%x0, #0x120]\n\t"
308         "   stp q20, q21, [%x0, #0x140]\n\t"
309         "   stp q22, q23, [%x0, #0x160]\n\t"
310         "   stp q24, q25, [%x0, #0x180]\n\t"
311         "   stp q26, q27, [%x0, #0x1a0]\n\t"
312         "   stp q28, q29, [%x0, #0x1c0]\n\t"
313         "   stp q30, q31, [%x0, #0x1e0]\n\t"
314          :: "r" (&save_area->named.v));
315
316     printk(LOG_PANIC, "Fatal (unexpected) fault at 0x%"PRIx64 " (%#" PRIx64 ")\n\n", epc, epc - (uintptr_t)&kernel_first_byte);
317     printk(LOG_PANIC, "Register context saved at: %p\n", save_area);
318     printk(LOG_PANIC, "Vector: ");
319     switch(vector) {
320         case AARCH64_EVECTOR_UNDEF:
321             printk(LOG_PANIC, "UNDEF\n");
322             break;
323         case AARCH64_EVECTOR_EL0_SYNC:
324             printk(LOG_PANIC, "EL0_SYNC\n");
325             break;
326         case AARCH64_EVECTOR_EL0_IRQ:
327             printk(LOG_PANIC, "EL0_IRQ\n");
328             break;
329         case AARCH64_EVECTOR_EL0_FIQ:
330             printk(LOG_PANIC, "EL0_FIQ\n");
331             break;
332         case AARCH64_EVECTOR_EL0_SERROR:
333             printk(LOG_PANIC, "EL0_SERROR\n");
334             break;
335         case AARCH64_EVECTOR_EL1_SYNC:
336             printk(LOG_PANIC, "EL1_SYNC\n");
337             break;
338         case AARCH64_EVECTOR_EL1_IRQ:
339             printk(LOG_PANIC, "EL1_IRQ\n");
340             break;
341         case AARCH64_EVECTOR_EL1_FIQ:
342             printk(LOG_PANIC, "EL1_FIQ\n");
343             break;
344         case AARCH64_EVECTOR_EL1_SERROR:
345             printk(LOG_PANIC, "EL1_SERROR\n");
346             break;
347         case AARCH64_EVECTOR_EL2_SYNC:
348             printk(LOG_PANIC, "EL2_SYNC\n");
349             break;
350         case AARCH64_EVECTOR_EL2_IRQ:
351             printk(LOG_PANIC, "EL2_IRQ\n");
352             break;
353         case AARCH64_EVECTOR_EL2_FIQ:
354             printk(LOG_PANIC, "EL2_FIQ\n");
355             break;
356         case AARCH64_EVECTOR_EL2_SERROR:
357             printk(LOG_PANIC, "EL2_SERROR\n");
358             break;
359         case AARCH32_EVECTOR_EL0_SYNC:
360             printk(LOG_PANIC, "AARCH32_EL0_SYNC\n");
361             break;
362         case AARCH32_EVECTOR_EL0_IRQ:
363             printk(LOG_PANIC, "AARCH32_EL0_IRQ\n");
364             break;
365         case AARCH32_EVECTOR_EL0_FIQ:
366             printk(LOG_PANIC, "AARCH32_EL0_FIQ\n");
367             break;
368         case AARCH32_EVECTOR_EL0_SERROR:
369             printk(LOG_PANIC, "AARCH32_EL0_SERROR\n");
370             break;
371     }
372
373     for (i = 0; i < 31; i++) {
374         uint64_t reg = save_area->regs[i];
375         if (reg >= (uintptr_t)&kernel_first_byte && reg <= (uintptr_t)&kernel_text_final_byte) {
376             printk(LOG_PANIC, "x%d\t%"PRIx64" (%#" PRIx64 ")\n", i, reg, reg - (uintptr_t)&kernel_first_byte);
377         } else {
378             printk(LOG_PANIC, "x%d\t%"PRIx64"\n", i, reg);
379         }
380     }
381
382     printk(LOG_PANIC, "sp\t%"PRIx64"\n", save_area->regs[SP_REG]);
383     printk(LOG_PANIC, "pc\t%"PRIx64"\n", epc);
384     printk(LOG_PANIC, "spsr\t%"PRIx64"\n", spsr);
385     printk(LOG_PANIC, "instruction-specific syndrome\t%x\n", iss);
386
387     /* Skip the trap frame to dump the prior stack. */
388     uint64_t *kstack_base= (void *)save_area + (NUM_REGS * 8);
389
390     if((((uintptr_t)kstack_base) & MASK(3)) != 0) {
391         kstack_base= (uint64_t *)((uint64_t)kstack_base & ~MASK(3));
392         printk(LOG_PANIC,
393                "Kernel stack is misaligned, dumping from %p\n",
394                kstack_base);
395     }
396
397     uint64_t kstack_len =
398         (((uint64_t)kernel_stack + KERNEL_STACK_SIZE) -
399          (uint64_t)kstack_base) /
400         sizeof(uint64_t);
401
402     printk(LOG_PANIC,
403            "Kernel stack (0x%p - 0x%p):\n",
404            kstack_base,
405            (void *)kernel_stack + KERNEL_STACK_SIZE);
406
407     for(i= 0; i < kstack_len-2; i+=2) {
408         if(i > STACK_DUMP_LIMIT) {
409             printk(LOG_PANIC, "...\n");
410             break;
411         }
412
413         printk(LOG_PANIC,
414                "%016"PRIx64"  %016"PRIx64"  %016"PRIx64"\n",
415                (uint64_t)(kstack_base + i),
416                kstack_base[i],
417                kstack_base[i+1]);
418     }
419
420     switch(exception_class) {
421         case aarch64_ec_unknown:
422             panic("Unknown reason/instruction.\n");
423
424         case aarch64_ec_wfi:
425             panic("Trapped WFI/WFI.\n");
426
427         case aarch64_ec_mcr_cp15:
428         case aarch64_ec_mcrr_cp15:
429             panic("CP15 abort.\n");
430
431         case aarch64_ec_mcr_cp14:
432         case aarch64_ec_ldc_cp14:
433         case aarch64_ec_mcrr_cp14:
434             panic("CP14 abort.\n");
435
436         case aarch64_ec_fpen:
437         case aarch64_ec_fpu_aa32:
438         case aarch64_ec_fpu_aa64:
439             panic("FPU abort.\n");
440
441         case aarch64_ec_mcr_cp10:
442             panic("CP10 abort.\n");
443
444         case aarch64_ec_il:
445             panic("PSTATE.IL == 1.\n");
446
447         case aarch64_ec_svc_aa32:
448         case aarch64_ec_hvc_aa32:
449         case aarch64_ec_svc_aa64:
450         case aarch64_ec_hvc_aa64:
451         case aarch64_ec_smc_aa64:
452             panic("Unhandled system/hypervisor/monitor call.\n");
453
454         case aarch64_ec_mrs:
455             panic("Exception caused by MSR/MRS.\n");
456
457         case aarch64_ec_impl:
458             panic("Implementation-specific exception.\n");
459
460         case aarch64_ec_iabt_low:
461             panic("Instruction abort at user level.\n");
462
463         case aarch64_ec_iabt_high:
464             panic("Instruction abort in the kernel.\n");
465
466         case aarch64_ec_pc_align:
467             panic("Misaligned PC @0x%"PRIx64".\n",
468                   sysreg_read_far());
469
470         case aarch64_ec_dabt_low:
471             panic("Data abort at user level @0x%"PRIx64".\n",
472                   sysreg_read_far());
473
474         case aarch64_ec_dabt_high:
475             printk(LOG_PANIC,
476                    "Data abort in the kernel @0x%"PRIx64".\n",
477                    sysreg_read_far());
478             printk(LOG_PANIC, "Abort type: ");
479             switch(iss) {
480                 case aarch64_dsfc_size_l0:
481                     printk(LOG_PANIC, "address size fault, L0/TTBR\n");
482                     break;
483                 case aarch64_dsfc_size_l1:
484                     printk(LOG_PANIC, "address size fault, L1\n");
485                     break;
486                 case aarch64_dsfc_size_l2:
487                     printk(LOG_PANIC, "address size fault, L2\n");
488                     break;
489                 case aarch64_dsfc_size_l3:
490                     printk(LOG_PANIC, "address size fault, L3\n");
491                     break;
492                 case aarch64_dsfc_trans_l0:
493                     printk(LOG_PANIC, "translation fault, L0/TTBR\n");
494                     break;
495                 case aarch64_dsfc_trans_l1:
496                     printk(LOG_PANIC, "translation fault, L1\n");
497                     break;
498                 case aarch64_dsfc_trans_l2:
499                     printk(LOG_PANIC, "translation fault, L2\n");
500                     break;
501                 case aarch64_dsfc_trans_l3:
502                     printk(LOG_PANIC, "translation fault, L3\n");
503                     break;
504                 case aarch64_dsfc_flag_l1:
505                     printk(LOG_PANIC, "access flag fault, L1\n");
506                     break;
507                 case aarch64_dsfc_flag_l2:
508                     printk(LOG_PANIC, "access flag fault, L2\n");
509                     break;
510                 case aarch64_dsfc_flag_l3:
511                     printk(LOG_PANIC, "access flag fault, L3\n");
512                     break;
513                 case aarch64_dsfc_perm_l1:
514                     printk(LOG_PANIC, "permission fault, L1\n");
515                     break;
516                 case aarch64_dsfc_perm_l2:
517                     printk(LOG_PANIC, "permission fault, L2\n");
518                     break;
519                 case aarch64_dsfc_perm_l3:
520                     printk(LOG_PANIC, "permission fault, L3\n");
521                     break;
522                 case aarch64_dsfc_external:
523                     printk(LOG_PANIC, "external abort\n");
524                     break;
525                 case aarch64_dsfc_external_l0:
526                     printk(LOG_PANIC, "external abort on walk, L0/TTBR\n");
527                     break;
528                 case aarch64_dsfc_external_l1:
529                     printk(LOG_PANIC, "external abort on walk, L1\n");
530                     break;
531                 case aarch64_dsfc_external_l2:
532                     printk(LOG_PANIC, "external abort on walk, L2\n");
533                     break;
534                 case aarch64_dsfc_external_l3:
535                     printk(LOG_PANIC, "external abort on walk, L3\n");
536                     break;
537                 case aarch64_dsfc_parity:
538                     printk(LOG_PANIC, "parity error\n");
539                     break;
540                 case aarch64_dsfc_parity_l0:
541                     printk(LOG_PANIC, "parity error on walk, L0/TTBR\n");
542                     break;
543                 case aarch64_dsfc_parity_l1:
544                     printk(LOG_PANIC, "parity error on walk, L1\n");
545                     break;
546                 case aarch64_dsfc_parity_l2:
547                     printk(LOG_PANIC, "parity error on walk, L2\n");
548                     break;
549                 case aarch64_dsfc_parity_l3:
550                     printk(LOG_PANIC, "parity error on walk, L3\n");
551                     break;
552                 case aarch64_dsfc_alighment:
553                     printk(LOG_PANIC, "alignment fault\n");
554                     break;
555                 case aarch64_dsfc_tlb_confl:
556                     printk(LOG_PANIC, "TLB conflict\n");
557                     break;
558                 case aarch64_dsfc_impl1:
559                     printk(LOG_PANIC, "implementation-defined fault 1\n");
560                     break;
561                 case aarch64_dsfc_impl2:
562                     printk(LOG_PANIC, "implementation-defined fault 2\n");
563                     break;
564                 case aarch64_dsfc_sect_dom:
565                     printk(LOG_PANIC, "domain fault on section\n");
566                     break;
567                 case aarch64_dsfc_page_dom:
568                     printk(LOG_PANIC, "domain fault on page\n");
569                     break;
570                 default:
571                     printk(LOG_PANIC, "unknown\n");
572                     break;
573             }
574             panic("halting.\n");
575
576         case aarch64_ec_sp_align:
577             panic("Misaligned SP.\n");
578
579         case aarch64_ec_serror:
580             panic("Delayed memory abort.\n");
581
582         case aarch64_ec_bkpt_low:
583             panic("HW Breakpoint in user code.\n");
584
585         case aarch64_ec_bkpt_high:
586             panic("HW Breakpoint in the kernel.\n");
587
588         case aarch64_ec_step_low:
589             panic("Single step in user code.\n");
590
591         case aarch64_ec_step_high:
592             panic("Single step in the kernel.\n");
593
594         case aarch64_ec_wpt_low:
595             panic("HW Watchpoint in user code @0x%"PRIx64".\n",
596                   sysreg_read_far());
597
598         case aarch64_ec_wpt_high:
599             panic("HW Watchpoint in the kernel @0x%"PRIx64".\n",
600                   sysreg_read_far());
601
602         case aarch64_ec_bkpt_soft:
603             panic("AArch32 soft breakpoint.\n");
604
605         case aarch64_ec_bkpt_el2:
606             panic("AArch32 Breakpoint trapped to EL2.\n");
607
608         case aarch64_ec_brk:
609             panic("AArch64 soft breakpoint.\n");
610
611         default:
612             panic("Unrecognised exception.\n");
613     }
614 }