3 * \brief x86-64 interrupt/exception handling utility functions
7 * Copyright (c) 2007, 2008, 2009, 2010, 2011, 2013, ETH Zurich.
10 * This file is distributed under the terms in the attached LICENSE file.
11 * If you do not find this file, copies can be found by writing to:
12 * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
15 /*********************************************************************
17 * Copyright (C) 2003-2004, Karlsruhe University
19 * File path: glue/v4-amd64/hwirq.h
20 * Description: Macros to define interrupt handler stubs for AMD64
22 * Redistribution and use in source and binary forms, with or without
23 * modification, are permitted provided that the following conditions
25 * 1. Redistributions of source code must retain the above copyright
26 * notice, this list of conditions and the following disclaimer.
27 * 2. Redistributions in binary form must reproduce the above copyright
28 * notice, this list of conditions and the following disclaimer in the
29 * documentation and/or other materials provided with the distribution.
31 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
32 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
35 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
39 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
40 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
43 * $Id: hwirq.h,v 1.3 2006/10/19 22:57:35 ud3 Exp $
45 ********************************************************************/
53 #include <arch_gdb_stub.h>
57 #include <arch/x86/perfmon.h>
58 #include <arch/x86/barrelfish_kpi/perfmon.h>
59 #include <arch/x86/pic.h>
60 #include <arch/x86/apic.h>
61 #include <barrelfish_kpi/dispatcher_shared_target.h>
62 #include <asmoffsets.h>
63 #include <trace/trace.h>
64 #include <trace_definitions/trace_defs.h>
65 #include <arch/x86/timing.h>
66 #include <arch/x86/syscall.h>
67 #include <arch/x86/ipi_notify.h>
68 #include <barrelfish_kpi/cpu_arch.h>
70 #include <mdb/mdb_tree.h>
71 #include <sys_debug.h>
73 #include <dev/ia32_dev.h>
75 #ifdef FPU_LAZY_CONTEXT_SWITCH
80 # define START_KERNEL_PHYS K1OM_START_KERNEL_PHYS
82 # define START_KERNEL_PHYS X86_64_START_KERNEL_PHYS
85 static const char *idt_descs[] =
87 [IDT_DE] = "#DE: Divide Error",
88 [IDT_DB] = "#DB: Debug",
89 [IDT_NMI] = "Nonmaskable External Interrupt",
90 [IDT_BP] = "#BP: Breakpoint",
91 [IDT_OF] = "#OF: Overflow",
92 [IDT_BR] = "#BR: Bound Range Exceeded",
93 [IDT_UD] = "#UD: Undefined/Invalid Opcode",
94 [IDT_NM] = "#NM: No Math Coprocessor",
95 [IDT_DF] = "#DF: Double Fault",
96 [IDT_FPUGP] = "Coprocessor Segment Overrun",
97 [IDT_TS] = "#TS: Invalid TSS",
98 [IDT_NP] = "#NP: Segment Not Present",
99 [IDT_SS] = "#SS: Stack Segment Fault",
100 [IDT_GP] = "#GP: General Protection Fault",
101 [IDT_PF] = "#PF: Page Fault",
102 [IDT_MF] = "#MF: FPU Floating-Point Error",
103 [IDT_AC] = "#AC: Alignment Check",
104 [IDT_MC] = "#MC: Machine Check",
105 [IDT_XF] = "#XF: SIMD Floating-Point Exception",
109 * \brief Define IRQ handler number 'num'.
111 * This defines an interrupt handler for vector #num. The way this is done is
112 * quite tricky: A block of assembly is emitted, with a label pointing to
113 * the beginning of that block. The label is made known as a symbol by
114 * having a C function _declaration_ directly in front of the block. The
115 * symbol has to be defined extern, so it is global, but its ELF visibility
116 * is set "hidden", so that the symbol does not end up in the GOT. This is
117 * very important for keeping the code position-independent.
119 * The NOERR/ERR variants depend on whether the hardware delivers an error code.
121 #define HW_EXCEPTION_NOERR(num) \
122 void __attribute__ ((visibility ("hidden"))) hwexc_##num(void); \
125 "\t.type hwexc_"#num",@function \n\t" \
126 "hwexc_"#num": \n\t" \
127 "pushq $0 /* dummy error code */ \n\t" \
128 "pushq $"#num" /* vector number */ \n\t" \
129 "jmp hwexc_common /* common stuff */ \n\t" \
132 #define HW_EXCEPTION_ERR(num) \
133 void __attribute__ ((visibility ("hidden"))) hwexc_##num(void); \
136 "\t.type hwexc_"#num",@function \n\t" \
137 "hwexc_"#num": \n\t" \
138 "pushq $"#num" /* vector number */ \n\t" \
139 "jmp hwexc_common /* common stuff */ \n\t" \
142 #define XHW_IRQ(num) \
143 void __attribute__ ((visibility ("hidden"))) hwirq_##num(void); \
146 "\t.type hwirq_"#num",@function \n\t" \
147 "hwirq_"#num": \n\t" \
148 "pushq $"#num" /* vector number */ \n\t" \
149 "jmp hwirq_common /* common stuff */ \n\t" \
151 /// Noop wrapper for HW_IRQ to deal with CPP stringification problems
152 #define HW_IRQ(num) XHW_IRQ(num)
155 #define XTR(x) STR(x)
159 " .type hwexc_common ,@function \n\t"
161 "testb $3, 24(%rsp) /* if CS.CPL == 0 */ \n\t"
162 "jz kernel_fault \n\t"
164 /* User exception: save full state and return to the user.
165 * This path could be optimized by only saving the non-volatile
166 * registers (since the kernel's C path will maintain them), and
167 * having the user-mode code save them if needed. Since the
168 * current user code always does need them, we just save the full
171 /* decide where to save the state, the options are:
172 * pagefault and enabled -> enabled save area
173 * pagefault while disabled or any other trap -> trap save area
176 "movq dcb_current(%rip), %rcx /* rcx = dcb_current */ \n\t"
177 "movq "XTR(OFFSETOF_DCB_DISP)"(%rcx), %rcx /* rcx = dcb_current->disp */\n\t"
178 "cmpq $14, 8(%rsp) /* is pagefault? */ \n\t"
180 "cmpl $0, "XTR(OFFSETOF_DISP_DISABLED)"(%rcx) /* disp->disabled ? */\n\t"
183 "movq 4*8(%rsp), %rbx /* rbx = faulting IP */ \n\t"
184 "cmpq "XTR(OFFSETOF_DISP_X86_64_CRIT_PC_LOW)"(%rcx), %rbx /* crit_pc_low <= rip? */\n\t"
185 "jae disabled_test \n\t"
186 "\nsave_enabled: \n\t"
188 "addq $"XTR(OFFSETOF_DISP_X86_64_ENABLED_AREA)", %rcx /* rcx = enabled_save_area */\n\t"
190 "\ndisabled_test: \n\t"
191 "cmpq "XTR(OFFSETOF_DISP_X86_64_CRIT_PC_HIGH)"(%rcx), %rbx /* crit_pc_high > rip? */\n\t"
192 "jae save_enabled \n\t"
195 "addq $"XTR(OFFSETOF_DISP_X86_64_TRAP_AREA)", %rcx /* trap_save_area */\n\t"
197 /* save to the save area. at this point, rcx = save area ptr,
198 * rsp+8 = exception num, rsp+16 = CPU-stacked error and registers */
200 "movq %rax, 0*8(%rcx) \n\t"
201 "popq %rax /* original rcx */ \n\t"
202 "movq %rbx, 1*8(%rcx) \n\t"
203 "movq %rax, 2*8(%rcx) \n\t"
204 "movq %rdx, 3*8(%rcx) \n\t"
205 "movq %rsi, 4*8(%rcx) \n\t"
206 "movq %rdi, 5*8(%rcx) \n\t"
207 "movq %rbp, 6*8(%rcx) \n\t"
208 "movq %r8, 8*8(%rcx) \n\t"
209 "movq %r9, 9*8(%rcx) \n\t"
210 "movq %r10, 10*8(%rcx) \n\t"
211 "movq %r11, 11*8(%rcx) \n\t"
212 "movq %r12, 12*8(%rcx) \n\t"
213 "movq %r13, 13*8(%rcx) \n\t"
214 "movq %r14, 14*8(%rcx) \n\t"
215 "movq %r15, 15*8(%rcx) \n\t"
216 "mov %fs, "XTR(OFFSETOF_FS_REG)"(%rcx) \n\t"
217 "mov %gs, "XTR(OFFSETOF_GS_REG)"(%rcx) \n\t"
218 "popq %rdi /* vector number */ \n\t"
219 "popq %rsi /* error code */ \n\t"
220 "movq %rsp, %rdx /* CPU save area */ \n\t"
221 "callq generic_handle_user_exception \n\t"
224 /* a kernel fault means something bad happened, so we stack
225 * everything for the debugger to use, in the GDB frame format */
226 "\nkernel_fault: \n\t"
227 "pushq 6*8(%rsp) /* SS */ \n\t"
228 "pushq 4*8(%rsp) /* CS */ \n\t"
229 "pushq 7*8(%rsp) /* EFLAGS */ \n\t"
230 "pushq 5*8(%rsp) /* RIP */ \n\t"
231 /* TODO: extend frame size and save FS/GS so we can resume afterwards */
240 "pushq 17*8(%rsp) /* RSP */ \n\t"
248 "movq 20*8(%rsp), %rdi /* vector number */ \n\t"
249 "movq 21*8(%rsp), %rsi /* error code */ \n\t"
250 "movq %rsp, %rdx /* save area ptr*/ \n\t"
251 "jmp generic_handle_kernel_exception \n\t"
254 /* (Device) interrupt. */
255 " .type hwirq_common ,@function \n\t"
257 /* If it happened in kernel_mode, simply make userspace runnable.
258 * This is a special case, since interrupts are normally disabled when
259 * entering the kernel. However, they are enabled when there is nothing
260 * to do, and the kernel goes to sleep using wait_for_interrupts() */
261 "testb $3, 16(%rsp) /* if CS.CPL == 0 */ \n\t"
262 "jz call_handle_irq \n\t"
264 /* Happened in user mode.
265 * we need to save everything to the dispatcher. */
266 /* decide where to save the state, either enabled or disabled save areas */
268 "movq dcb_current(%rip), %rdx /* rdx = dcb_current */ \n\t"
269 "movq "XTR(OFFSETOF_DCB_DISP)"(%rdx), %rdx /* rdx = dcb_current->disp */\n\t"
270 "cmpl $0, "XTR(OFFSETOF_DISP_DISABLED)"(%rdx) /* disp->disabled ? */\n\t"
271 "jne irq_save_disabled \n\t"
273 "movq 24(%rsp), %rbx /* rbx = faulting IP */ \n\t"
274 "cmpq "XTR(OFFSETOF_DISP_X86_64_CRIT_PC_LOW)"(%rdx), %rbx /* crit_pc_low <= rip? */\n\t"
275 "jae irq_disabled_test \n\t"
276 "\nirq_save_enabled: \n\t"
278 "addq $"XTR(OFFSETOF_DISP_X86_64_ENABLED_AREA)", %rdx /* rdx = enabled_save_area */\n\t"
279 "jmp irq_do_save \n\t"
280 "\nirq_disabled_test: \n\t"
281 "cmpq "XTR(OFFSETOF_DISP_X86_64_CRIT_PC_HIGH)"(%rdx), %rbx /* crit_pc_high > rip? */\n\t"
282 "jae irq_save_enabled \n\t"
284 "\nirq_save_disabled: \n\t"
285 "addq $"XTR(OFFSETOF_DISP_X86_64_DISABLED_AREA)", %rdx /* disabled_save_area */\n\t"
287 /* save to the save area. at this point, rdx = save area ptr,
288 * rsp+8 = vector number, rsp+16 = CPU-stacked regisers */
289 "\nirq_do_save: \n\t"
290 "movq %rax, 0*8(%rdx) \n\t"
291 "movq %rbx, 1*8(%rdx) \n\t"
292 "movq %rcx, 2*8(%rdx) \n\t"
293 "popq %rax /* original rdx */ \n\t"
294 "movq %rax, 3*8(%rdx) \n\t"
295 "movq %rsi, 4*8(%rdx) \n\t"
296 "movq %rdi, 5*8(%rdx) \n\t"
297 "movq %rbp, 6*8(%rdx) \n\t"
298 "movq %r8, 8*8(%rdx) \n\t"
299 "movq %r9, 9*8(%rdx) \n\t"
300 "movq %r10, 10*8(%rdx) \n\t"
301 "movq %r11, 11*8(%rdx) \n\t"
302 "movq %r12, 12*8(%rdx) \n\t"
303 "movq %r13, 13*8(%rdx) \n\t"
304 "movq %r14, 14*8(%rdx) \n\t"
305 "movq %r15, 15*8(%rdx) \n\t"
306 "mov %fs, "XTR(OFFSETOF_FS_REG)"(%rdx) \n\t"
307 "mov %gs, "XTR(OFFSETOF_GS_REG)"(%rdx) \n\t"
308 "popq %rdi /* vector number */ \n\t"
309 "movq %rsp, %rsi /* CPU save area */ \n\t"
310 "jmp generic_handle_irq /* NB: rdx = disp save ptr*/\n\t"
312 "\ncall_handle_irq: \n\t"
314 "callq handle_irq \n\t"
318 HW_EXCEPTION_NOERR(0);
319 HW_EXCEPTION_NOERR(1);
320 HW_EXCEPTION_NOERR(2);
321 HW_EXCEPTION_NOERR(3);
322 HW_EXCEPTION_NOERR(4);
323 HW_EXCEPTION_NOERR(5);
324 HW_EXCEPTION_NOERR(6);
325 HW_EXCEPTION_NOERR(7);
327 HW_EXCEPTION_NOERR(9);
328 HW_EXCEPTION_ERR(10);
329 HW_EXCEPTION_ERR(11);
330 HW_EXCEPTION_ERR(12);
331 HW_EXCEPTION_ERR(13);
332 HW_EXCEPTION_ERR(14);
333 HW_EXCEPTION_NOERR(16);
334 HW_EXCEPTION_ERR(17);
335 HW_EXCEPTION_NOERR(18);
336 HW_EXCEPTION_NOERR(19);
338 // Classic PIC interrupts
356 // Generic interrupts
376 // Local APIC interrupts
385 // Reserved as "unhandled exception" handler
386 HW_EXCEPTION_NOERR(666);
388 #define ERR_PF_PRESENT (1 << 0)
389 #define ERR_PF_READ_WRITE (1 << 1)
390 #define ERR_PF_USER_SUPERVISOR (1 << 2)
391 #define ERR_PF_RESERVED (1 << 3)
392 #define ERR_PF_INSTRUCTION (1 << 4)
395 * \brief Interrupt Descriptor Table (IDT) for processor this kernel is running
398 static struct gate_descriptor idt[NIDT] __attribute__ ((aligned (16)));
400 static int timer_fired = 0;
402 #if CONFIG_TRACE && NETWORK_STACK_TRACE
403 #define TRACE_ETHERSRV_MODE 1
404 #endif // CONFIG_TRACE && NETWORK_STACK_TRACE
408 * \brief Send interrupt notification to user-space listener.
410 * Sends an interrupt notification IDC to a local endpoint that
411 * listens for IRQ notifications.
413 * \param irq IRQ# to send in notification.
415 static uint32_t pkt_interrupt_count = 0;
416 static void send_user_interrupt(int irq)
418 assert(irq >= 0 && irq < NDISPATCH);
419 struct kcb *k = kcb_current;
421 if (k->irq_dest_caps[irq].cap.type == ObjType_IRQVector) {
425 } while (k && k != kcb_current);
426 // if k == NULL we don't need to switch as we only have a single kcb
430 // from here: kcb_current is the kcb for which the interrupt was intended
431 struct capability *cap = &kcb_current->irq_dest_caps[irq].cap;
433 // Return on null cap (unhandled interrupt)
434 if(cap->type == ObjType_Null) {
435 printk(LOG_WARN, "unhandled IRQ %d\n", irq);
437 } else if (cap->type == ObjType_IRQVector && cap->u.irqvector.ep == NULL){
438 printk(LOG_WARN, "unhandled IRQ (no endpoint) %d\n", irq);
440 } else if (cap->type > ObjType_Num) {
441 // XXX: HACK: this doesn't fix the root cause of having weird entries
442 // in kcb_current->irq_dispatch[], but it allows us to test the system
443 // more reliably for now. -SG
444 // Also complain to SG if this gets checked in to the main tree!
445 printk(LOG_WARN, "receiver type > %d, %d, assume unhandled\n", ObjType_Num, cap->type);
450 ++pkt_interrupt_count;
451 #if NETWORK_STACK_TRACE
452 trace_event(TRACE_SUBSYS_NNET, TRACE_EVENT_NNET_UIRQ, pkt_interrupt_count);
453 #endif // NETWORK_STACK_TRACE
456 // Otherwise, cap needs to be an endpoint
457 assert(cap->type == ObjType_IRQVector);
459 struct capability * ep = cap->u.irqvector.ep;
462 // send empty message as notification
463 errval_t err = lmp_deliver_notification(ep);
464 if (err_is_fail(err)) {
465 if (err_no(err) == SYS_ERR_LMP_BUF_OVERFLOW) {
466 struct dispatcher_shared_generic *disp =
467 get_dispatcher_shared_generic(ep->u.endpoint.listener->disp);
468 printk(LOG_DEBUG, "%.*s: IRQ message buffer overflow on IRQ %d\n",
469 DISP_NAME_LEN, disp->name, irq);
471 printk(LOG_ERR, "Unexpected error delivering IRQ\n");
476 /* XXX: run the handler dispatcher immediately
477 * we shouldn't do this (we should let the scheduler decide), but because
478 * our default scheduler is braindead, this is a quick hack to make sure
479 * that mostly-sane things happen
481 dispatch(cap->u.endpoint.listener);
483 dispatch(schedule());
488 * This interface is deprecated. Use irq_table_alloc_dest_caps
490 errval_t irq_table_alloc(int *outvec)
493 // XXX: this is O(#kcb*NDISPATCH)
495 for (i = 0; i < NDISPATCH; i++) {
496 struct kcb *k = kcb_current;
497 bool found_free = true;
499 if (k->irq_dest_caps[i].cap.type == ObjType_IRQVector) {
504 } while(k && k != kcb_current);
509 if (i == NDISPATCH) {
511 return SYS_ERR_IRQ_NO_FREE_VECTOR;
513 //TODO Luki: Somehow we must put here a cap in the table
519 errval_t irq_table_alloc_dest_cap(uint8_t dcn_vbits, capaddr_t dcn, capaddr_t out_cap_addr)
523 memset(&out_cap, 0, sizeof(struct cte));
526 // TODO Luki: Figure out why it was working with i=0 before
527 for (i = 1; i < NDISPATCH; i++) {
528 //struct kcb * k = kcb_current;
529 assert(kcb_current->irq_dest_caps[i].cap.type == ObjType_Null ||
530 kcb_current->irq_dest_caps[i].cap.type == ObjType_IRQVector);
531 //TODO Luki: iterate over kcb
532 if (kcb_current->irq_dest_caps[i].cap.type != ObjType_IRQVector) {
536 if (i == NDISPATCH) {
537 return SYS_ERR_IRQ_NO_FREE_VECTOR;
539 out_cap.cap.type = ObjType_IRQVector;
541 //TODO Luki: Set the lapic_controller_id
542 const uint32_t lapic_controller_id = 0;
543 out_cap.cap.u.irqvector.controller = lapic_controller_id;
544 out_cap.cap.u.irqvector.vector = i;
547 err = caps_lookup_slot(&dcb_current->cspace.cap, dcn, dcn_vbits, &cn, CAPRIGHTS_WRITE);
548 if(err_is_fail(err)){
551 err = caps_copy_to_cnode(cn, out_cap_addr, &out_cap, 0, 0, 0);
552 if(err_is_fail(err)){
559 errval_t irq_connect(struct capability *dest_cap, capaddr_t endpoint_adr)
562 struct cte *endpoint;
564 // Lookup & check message endpoint cap
565 err = caps_lookup_slot(&dcb_current->cspace.cap, endpoint_adr,
566 CPTR_BITS, &endpoint, CAPRIGHTS_WRITE);
567 if (err_is_fail(err)) {
568 return err_push(err, SYS_ERR_IRQ_LOOKUP_EP);
571 assert(endpoint != NULL);
573 // Return w/error if cap is not an endpoint
574 if(endpoint->cap.type != ObjType_EndPoint) {
575 return SYS_ERR_IRQ_NOT_ENDPOINT;
578 // Return w/error if no listener on endpoint
579 if(endpoint->cap.u.endpoint.listener == NULL) {
580 return SYS_ERR_IRQ_NO_LISTENER;
583 assert(dest_cap->type == ObjType_IRQVector);
584 dest_cap->u.irqvector.ep = &endpoint->cap;
590 * Deprecated. Use capabilities.
592 errval_t irq_table_set(unsigned int nidt, capaddr_t endpoint)
597 err = caps_lookup_slot(&dcb_current->cspace.cap, endpoint,
598 CPTR_BITS, &recv, CAPRIGHTS_WRITE);
599 if (err_is_fail(err)) {
600 return err_push(err, SYS_ERR_IRQ_LOOKUP);
603 assert(recv != NULL);
605 // Return w/error if cap is not an endpoint
606 if(recv->cap.type != ObjType_EndPoint) {
607 return SYS_ERR_IRQ_NOT_ENDPOINT;
610 // Return w/error if no listener on endpoint
611 if(recv->cap.u.endpoint.listener == NULL) {
612 return SYS_ERR_IRQ_NO_LISTENER;
615 printk(LOG_ERR, "Used deprecated irq_table_set. Not setting interrupt\n");
616 return SYS_ERR_IRQ_INVALID;
619 if(nidt < NDISPATCH) {
620 // check that we don't overwrite someone else's handler
621 if (kcb_current->irq_dispatch[nidt].cap.type != ObjType_Null) {
622 printf("kernel: installing new handler for IRQ %d\n", nidt);
624 err = caps_copy_to_cte(&kcb_current->irq_dispatch[nidt], recv, false, 0, 0);
627 return SYS_ERR_IRQ_INVALID;
631 errval_t irq_table_delete(unsigned int nidt)
633 printk(LOG_ERR, "Used deprecated irq_table_set. Not setting interrupt\n");
634 return SYS_ERR_IRQ_INVALID;
636 if(nidt < NDISPATCH) {
637 kcb_current->irq_dispatch[nidt].cap.type = ObjType_Null;
640 return SYS_ERR_IRQ_INVALID;
644 errval_t irq_table_notify_domains(struct kcb *kcb)
646 //TODO Luki: Check if this stuff is correct with multiple kcbs
647 uintptr_t msg[] = { 1 };
648 for (int i = 0; i < NDISPATCH; i++) {
649 struct capability * dest_cap = &kcb->irq_dest_caps[i].cap;
650 if (dest_cap->type == ObjType_IRQVector) {
651 struct capability * ep_cap = dest_cap->u.irqvector.ep;
653 // 1 word message as notification
654 errval_t err = lmp_deliver_payload(ep_cap, NULL, msg, 1, false);
655 if (err_is_fail(err)) {
656 if (err_no(err) == SYS_ERR_LMP_BUF_OVERFLOW) {
657 struct dispatcher_shared_generic *disp =
658 get_dispatcher_shared_generic(ep_cap->u.endpoint.listener->disp);
659 printk(LOG_DEBUG, "%.*s: IRQ message buffer overflow\n",
660 DISP_NAME_LEN, disp->name);
662 printk(LOG_ERR, "Unexpected error delivering IRQ\n");
666 // Remove endpoint. Domains must re-register by calling connect again.
667 kcb->irq_dest_caps[i].cap.u.irqvector.ep->type = ObjType_Null;
674 * \brief Handles kernel exceptions
676 * \param vec Vector number of exception
677 * \param error Error code from CPU, or 0 for an exception without an error code
678 * \param gdb_save_frame Pointer to save area for registers stacked by trap handler
680 static __attribute__ ((used,noreturn))
681 void generic_handle_kernel_exception(uint64_t vec, uint64_t error,
682 uintptr_t *gdb_save_frame)
684 lvaddr_t fault_address;
688 panic("unhandled kernel exception (vector 666)");
691 assert(vec < NEXCEPTIONS);
693 printk(LOG_PANIC, "exception %d (error code 0x%lx): ", (int)vec, error);
695 if (vec == ia32_vec_pf) {
696 printf("%s page fault due to %s%s, while in %s mode%s\n",
697 error & ERR_PF_READ_WRITE ? "write" : "read",
698 error & ERR_PF_PRESENT ? "access violation" : "page not present",
699 error & ERR_PF_RESERVED ? ", reserved bits set in page table"
701 error & ERR_PF_USER_SUPERVISOR ? "user" : "supervisor",
702 error & ERR_PF_INSTRUCTION ? ", by instruction fetch" : "");
704 __asm volatile("mov %%cr2, %[fault_address]"
705 : [fault_address] "=r" (fault_address));
706 printf("Address that caused the fault: 0x%lx\n", fault_address);
708 } else if ((descr = ia32_exc_vec_describe(vec))) {
709 printf("%s\n", descr);
711 printf("unhandled exception!\n");
714 // Print faulting instruction pointer
715 uintptr_t rip = gdb_save_frame[GDB_X86_64_RIP_REG];
716 printf("Faulting instruction pointer (or next instruction): 0x%lx\n", rip);
717 printf(" => i.e. unrelocated kernel address 0x%lx\n",
718 rip - (uintptr_t)&_start_kernel + START_KERNEL_PHYS);
720 printf("Registers:\n");
721 printf(" rax: 0x%016lx r8 : 0x%016lx\n",
722 gdb_save_frame[GDB_X86_64_RAX_REG],
723 gdb_save_frame[GDB_X86_64_R8_REG]);
724 printf(" rbx: 0x%016lx r9 : 0x%016lx\n",
725 gdb_save_frame[GDB_X86_64_RBX_REG],
726 gdb_save_frame[GDB_X86_64_R9_REG]);
727 printf(" rcx: 0x%016lx r10: 0x%016lx\n",
728 gdb_save_frame[GDB_X86_64_RCX_REG],
729 gdb_save_frame[GDB_X86_64_R10_REG]);
730 printf(" rdx: 0x%016lx r11: 0x%016lx\n",
731 gdb_save_frame[GDB_X86_64_RDX_REG],
732 gdb_save_frame[GDB_X86_64_R11_REG]);
733 printf(" rsp: 0x%016lx r12: 0x%016lx\n",
734 gdb_save_frame[GDB_X86_64_RSP_REG],
735 gdb_save_frame[GDB_X86_64_R12_REG]);
736 printf(" rdi: 0x%016lx r13: 0x%016lx\n",
737 gdb_save_frame[GDB_X86_64_RDI_REG],
738 gdb_save_frame[GDB_X86_64_R13_REG]);
739 printf(" rsi: 0x%016lx r14: 0x%016lx\n",
740 gdb_save_frame[GDB_X86_64_RSI_REG],
741 gdb_save_frame[GDB_X86_64_R14_REG]);
742 printf(" rip: 0x%016lx r15: 0x%016lx\n",
743 gdb_save_frame[GDB_X86_64_RIP_REG],
744 gdb_save_frame[GDB_X86_64_R15_REG]);
746 // Print the top 10 stack words
747 printf("Top o' stack:\n");
748 for(int i = 0; i < 10; i++) {
749 unsigned long *p = (unsigned long *)gdb_save_frame[GDB_X86_64_RSP_REG] + i;
750 printf(" %d \t 0x%016lx (%lu)\n", i, *p, *p);
753 // Drop to the debugger
754 gdb_handle_exception(vec, gdb_save_frame);
755 panic("gdb_handle_exception returned");
759 * \brief copies CPU-stacked registers to a dispatcher save area
761 static void copy_cpu_frame_to_dispatcher(
762 uintptr_t * NONNULL COUNT(X86_SAVE_AREA_SIZE) cpu_save_area,
763 struct registers_x86_64 *disp_save_area)
766 assert((cpu_save_area[X86_SAVE_EFLAGS] & USER_EFLAGS) == USER_EFLAGS);
768 disp_save_area->rsp = cpu_save_area[X86_SAVE_RSP];
769 disp_save_area->eflags = cpu_save_area[X86_SAVE_EFLAGS];
770 disp_save_area->rip = cpu_save_area[X86_SAVE_RIP];
774 * \brief Handles user-mode exceptions
776 * \param vec Vector number of exception
777 * \param error Error code from CPU, or 0 for an exception without an error code
778 * \param cpu_save_area Pointer to save area for registers stacked by CPU
779 * \param disp_save_area Pointer to save area in dispatcher
781 static __attribute__ ((used))
782 void generic_handle_user_exception(int vec, uint64_t error,
783 uintptr_t * NONNULL COUNT(X86_SAVE_AREA_SIZE) cpu_save_area,
784 struct registers_x86_64 *disp_save_area)
786 assert(dcb_current->disp_cte.cap.type == ObjType_Frame);
787 dispatcher_handle_t handle = dcb_current->disp;
788 struct dispatcher_shared_generic *disp =
789 get_dispatcher_shared_generic(handle);
790 uint64_t rip = cpu_save_area[X86_SAVE_RIP];
791 uint64_t rsp = cpu_save_area[X86_SAVE_RSP];
792 lvaddr_t fault_address, handler = 0, param = 0;
794 assert(vec < NEXCEPTIONS);
795 assert((cpu_save_area[X86_SAVE_CS] & 0x3) != 0); // CS.CPL > 0
797 copy_cpu_frame_to_dispatcher(cpu_save_area, disp_save_area);
799 bool disabled = dispatcher_is_disabled_ip(handle, rip);
800 dcb_current->disabled = disabled;
803 dcb_current->faults_taken++;
806 // Store FPU state if it's used
807 // Do this for every trap when the current domain used the FPU
808 // Do it for FPU not available traps in any case (to save the last FPU user)
809 // XXX: Need to reset fpu_dcb when that DCB is deleted
810 if(fpu_dcb != NULL &&
811 (fpu_dcb == dcb_current || vec == IDT_NM)) {
812 struct dispatcher_shared_generic *dst =
813 get_dispatcher_shared_generic(fpu_dcb->disp);
815 // Turn FPU trap off temporarily for saving its state
816 bool trap = fpu_trap_get();
819 if(fpu_dcb->disabled) {
820 fpu_save(dispatcher_get_disabled_fpu_save_area(fpu_dcb->disp));
823 assert(!fpu_dcb->disabled);
824 fpu_save(dispatcher_get_enabled_fpu_save_area(fpu_dcb->disp));
833 if (vec == IDT_PF) { // Page fault
835 __asm volatile("mov %%cr2, %[fault_address]"
836 : [fault_address] "=r" (fault_address));
838 printk(LOG_WARN, "user page fault%s in '%.*s': addr %lx IP %lx SP %lx "
840 disabled ? " WHILE DISABLED" : "", DISP_NAME_LEN,
841 disp->name, fault_address, rip, rsp, error);
843 /* sanity-check that the trap handler saved in the right place */
844 assert((disabled && disp_save_area == dispatcher_get_trap_save_area(handle))
845 || (!disabled && disp_save_area == dispatcher_get_enabled_save_area(handle)));
847 handler = disp->dispatcher_pagefault_disabled;
849 handler = disp->dispatcher_pagefault;
851 param = fault_address;
852 } else if (vec == IDT_NMI) {
853 printk(LOG_WARN, "NMI - ignoring\n");
854 dispatch(dcb_current);
855 } else if (vec == IDT_NM) { // device not available (FPU) exception
856 debug(SUBSYS_DISPATCH, "FPU trap in %.*s at 0x%" PRIxPTR "\n",
857 DISP_NAME_LEN, disp->name, rip);
858 assert(!dcb_current->is_vm_guest);
860 /* Intel system programming part 1: 2.3.1, 2.5, 11, 12.5.1
861 * clear the TS flag (flag that says, that the FPU is not available)
865 // Remember FPU-using DCB
866 fpu_dcb = dcb_current;
868 // Wipe FPU for protection and to initialize it in case we trapped while
873 // Initialize FPU (done earlier) and ignore trap
874 dispatch(dcb_current);
876 // defer trap to user-space
877 // FPUs are switched eagerly while disabled, there should be no trap
878 assert(disp_save_area == dispatcher_get_trap_save_area(handle));
879 handler = disp->dispatcher_trap;
882 } else if (vec == IDT_MF) {
885 __asm volatile("fnstsw %0" : "=a" (fpu_status));
887 printk(LOG_WARN, "FPU error%s in '%.*s': IP %" PRIxPTR " FPU status %x\n",
888 disabled ? " WHILE DISABLED" : "", DISP_NAME_LEN,
889 disp->name, rip, fpu_status);
891 handler = disp->dispatcher_trap;
893 } else if (vec == IDT_MC) {
894 // TODO: provide more useful information about the cause
895 panic("machine check exception while in user mode");
896 } else { // All other traps
897 printk(LOG_WARN, "user trap #%d: %s%s in '%.*s': IP %lx, error %lx\n",
898 vec, idt_descs[vec], disabled ? " WHILE DISABLED" : "",
899 DISP_NAME_LEN, disp->name, rip, error);
900 assert(disp_save_area == dispatcher_get_trap_save_area(handle));
902 if (vec == IDT_DB) { // debug exception: just continue
903 resume(dispatcher_get_trap_save_area(handle));
905 // can't handle a trap while disabled: nowhere safe to deliver it
906 scheduler_remove(dcb_current);
907 dispatch(schedule());
910 handler = disp->dispatcher_trap;
915 // Make unrunnable if it has taken too many faults
916 if (dcb_current->faults_taken > 2) {
917 printk(LOG_WARN, "generic_handle_user_exception: too many faults, "
918 "making domain unrunnable\n");
919 dcb_current->faults_taken = 0; // just in case it gets restarted
920 scheduler_remove(dcb_current);
921 dispatch(schedule());
924 /* resume user to save area */
927 printk(LOG_WARN, "no suitable handler for this type of fault, "
928 "making domain unrunnable\n");
929 scheduler_remove(dcb_current);
930 dispatch(schedule());
932 cpu_save_area[X86_SAVE_RIP] = handler;
933 cpu_save_area[X86_SAVE_EFLAGS] = USER_EFLAGS;
936 /* XXX: get GCC to load up the argument registers before returning */
937 register uintptr_t arg0 __asm ("%rdi") = disp->udisp;
938 register uintptr_t arg1 __asm ("%rsi") = param;
939 register uintptr_t arg2 __asm ("%rdx") = error;
940 register uintptr_t arg3 __asm ("%rcx") = rip;
941 __asm volatile("" :: "r" (arg0), "r" (arg1), "r" (arg2), "r" (arg3));
945 update_kernel_now(void)
947 uint64_t tsc_now = rdtsc();
948 #ifdef CONFIG_ONESHOT_TIMER
949 uint64_t ticks = tsc_now - tsc_lasttime;
950 kernel_now += ticks / timing_get_tsc_per_ms();
951 #else // !CONFIG_ONESHOT_TIMER
952 // maintain compatibility with old behaviour. Not sure if it is
953 // actually needed. -AKK
955 // Ignore timeslice if it happens too closely (less than half
956 // of the TSC ticks that are supposed to pass) to the last.
957 // In that case we have just synced timers and see a spurious
958 // APIC timer interrupt.
959 if(tsc_now - tsc_lasttime >
960 (kernel_timeslice * timing_get_tsc_per_ms()) / 2) {
961 kernel_now += kernel_timeslice;
963 #endif // CONFIG_ONESHOT_TIMER
964 tsc_lasttime = tsc_now;
967 /// Handle an IRQ that arrived, either while in user or kernel mode (HLT)
968 static __attribute__ ((used)) void handle_irq(int vector)
970 int irq = vector - NEXCEPTIONS;
971 debug(SUBSYS_DISPATCH, "IRQ vector %d (irq %d) while %s\n", vector, irq,
972 dcb_current ? (dcb_current->disabled ? "disabled": "enabled") : "in kernel");
975 // if we were in wait_for_interrupt(), unmask timer before running userspace
976 if (dcb_current == NULL && kernel_ticks_enabled) {
980 #if TRACE_ETHERSRV_MODE
981 trace_event(TRACE_SUBSYS_NNET, TRACE_EVENT_NNET_IRQ, vector);
982 #endif // TRACE_ETHERSRV_MODE
984 // APIC timer interrupt: handle in kernel and reschedule
985 if (vector == APIC_TIMER_INTERRUPT_VECTOR) {
989 // switch kcb every other timeslice
990 if (!kcb_sched_suspended && timer_fired % 2 == 0 && kcb_current->next) {
991 //printk(LOG_NOTE, "switching from kcb(%p) to kcb(%p)\n", kcb_current, kcb_current->next);
992 switch_kcb(kcb_current->next);
996 assert(kernel_ticks_enabled);
998 trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_TIMER, kernel_now);
999 wakeup_check(kernel_now+kcb_current->kernel_off);
1000 } else if (vector == APIC_PERFORMANCE_INTERRUPT_VECTOR) {
1001 // Handle performance counter overflow
1003 perfmon_measure_reset();
1004 if(dcb_current!=NULL) {
1005 // Get faulting instruction pointer
1006 struct registers_x86_64 *disp_save_area = dcb_current->disabled ?
1007 dispatcher_get_disabled_save_area(dcb_current->disp) :
1008 dispatcher_get_enabled_save_area(dcb_current->disp);
1009 struct dispatcher_shared_generic *disp =
1010 get_dispatcher_shared_generic(dcb_current->disp);
1012 // Setup data structure for LMP transfer to user level handler
1013 struct perfmon_overflow_data data = {
1014 .ip = disp_save_area->rip
1016 strncpy(data.name, disp->name, PERFMON_DISP_NAME_LEN);
1018 // Call overflow handler represented by endpoint
1019 extern struct capability perfmon_callback_ep;
1020 size_t payload_len = sizeof(struct perfmon_overflow_data)/ sizeof(uintptr_t)+1;
1021 errval_t err = lmp_deliver_payload(&perfmon_callback_ep,
1027 // Make sure delivery was okay. SYS_ERR_LMP_BUF_OVERFLOW is okay for now
1028 assert(err_is_ok(err) || err_no(err)==SYS_ERR_LMP_BUF_OVERFLOW);
1030 // This should never happen, as interrupts are disabled in kernel
1031 printf("Performance counter overflow interrupt from "
1032 "apic in kernel level\n");
1035 } else if (vector == APIC_ERROR_INTERRUPT_VECTOR) {
1036 printk(LOG_ERR, "APIC error interrupt fired!\n");
1037 xapic_esr_t esr = apic_get_esr();
1039 xapic_esr_prtval(str, 256, esr);
1040 printf("%s\n", str);
1042 } else if (vector == APIC_INTER_CORE_VECTOR) {
1044 ipi_handle_notify();
1045 } else if (vector == APIC_INTER_HALT_VECTOR) {
1047 // Update kernel_off for all KCBs
1048 struct kcb *k = kcb_current;
1050 k->kernel_off = kernel_now;
1052 } while(k && k!=kcb_current);
1055 } else if (vector == APIC_SPURIOUS_INTERRUPT_VECTOR) {
1057 printk(LOG_DEBUG, "spurious interrupt\n");
1061 else if (irq >= 0 && irq <= 15) { // classic PIC device interrupt
1062 printk(LOG_NOTE, "got interrupt %d!\n", irq);
1066 // only handle PIC interrupts on the BSP core
1067 if (apic_is_bsp()) {
1068 if (pic_have_interrupt(irq)) {
1070 send_user_interrupt(irq);
1071 } else { // no interrupt pending, check for a different one (!)
1072 irq = pic_pending_interrupt();
1073 if (irq == -1) { // really nothing pending
1074 printk(LOG_NOTE, "spurious interrupt (IRQ %d)\n", irq);
1075 } else { // why does this happen?! -AB
1076 printk(LOG_NOTE, "IRQ %d reported on wrong vector (%d)\n",
1077 irq, vector - NEXCEPTIONS);
1079 send_user_interrupt(irq);
1085 else { // APIC device interrupt (or IPI)
1086 //printk(LOG_NOTE, "interrupt %d vector %d!\n", irq, vector);
1088 send_user_interrupt(irq);
1091 // reschedule (because the runnable processes may have changed) and dispatch
1092 /* FIXME: the round-robin scheduler doesn't do the best thing here:
1093 * it always picks the next task, but we only really want to do that on
1096 dispatch(schedule());
1097 panic("dispatch() returned");
1101 * \brief Handles device interrupts that arrive while in user mode
1103 * \param vector Vector number
1104 * \param cpu_save_area Pointer to save area for registers stacked by CPU
1105 * \param disp_save_area Pointer to save area in dispatcher
1107 static __attribute__ ((used, noreturn)) void
1108 generic_handle_irq(int vector,
1109 uintptr_t * NONNULL COUNT(X86_SAVE_AREA_SIZE) cpu_save_area,
1110 struct registers_x86_64 *disp_save_area)
1112 assert(dcb_current->disp_cte.cap.type == ObjType_Frame);
1113 dispatcher_handle_t handle = dcb_current->disp;
1114 uint64_t rip = cpu_save_area[X86_SAVE_RIP];
1115 assert(vector < NIDT && vector >= NEXCEPTIONS);
1117 // Copy CPU-saved registers to dispatcher save area
1118 copy_cpu_frame_to_dispatcher(cpu_save_area, disp_save_area);
1120 /* sanity-check that the trap handler saved in the right place,
1121 * and update disabled flag in DCB */
1122 if (disp_save_area == dispatcher_get_disabled_save_area(handle)) {
1123 assert(dispatcher_is_disabled_ip(handle, rip));
1124 dcb_current->disabled = true;
1126 assert(disp_save_area == dispatcher_get_enabled_save_area(handle));
1127 assert(!dispatcher_is_disabled_ip(handle, rip));
1128 dcb_current->disabled = false;
1132 resume(disp_save_area);
1135 /* Utility function for code below; initialises a gate_descriptor */
1136 static void setgd(struct gate_descriptor *gd, void (* handler)(void),
1137 int ist, int type, int dpl, int selector)
1139 memset(gd, 0, sizeof(struct gate_descriptor));
1140 gd->gd_looffset = (uintptr_t)handler & ((1UL << 16) - 1);
1141 gd->gd_hioffset = (uintptr_t)handler >> 16;
1142 gd->gd_selector = selector;
1150 * \brief Sets up the default IDT for current CPU.
1152 void setup_default_idt(void)
1154 struct region_descriptor region = { // set default IDT
1155 .rd_limit = NIDT * sizeof(idt[0]) - 1,
1156 .rd_base = (uint64_t)&idt
1161 memset((void *)&idt, 0, NIDT * sizeof(idt[0]));
1163 // initialize IDT with default generic handlers
1164 for (i = 0; i < NIDT; i++)
1165 setgd(&idt[i], hwexc_666, 0, SDT_SYSIGT, SEL_KPL,
1166 GSEL(KCODE_SEL, SEL_KPL));
1168 /* Setup exception handlers */
1169 setgd(&idt[0], hwexc_0, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1170 setgd(&idt[1], hwexc_1, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1171 setgd(&idt[2], hwexc_2, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1172 setgd(&idt[3], hwexc_3, 0, SDT_SYSIGT, SEL_UPL, GSEL(KCODE_SEL, SEL_KPL));
1173 setgd(&idt[4], hwexc_4, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1174 setgd(&idt[5], hwexc_5, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1175 setgd(&idt[6], hwexc_6, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1176 setgd(&idt[7], hwexc_7, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1177 setgd(&idt[8], hwexc_8, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1178 setgd(&idt[9], hwexc_9, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1179 setgd(&idt[10], hwexc_10, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1180 setgd(&idt[11], hwexc_11, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1181 setgd(&idt[12], hwexc_12, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1182 setgd(&idt[13], hwexc_13, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1183 setgd(&idt[14], hwexc_14, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1184 // Interrupt 15 is undefined
1185 setgd(&idt[16], hwexc_16, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1186 setgd(&idt[17], hwexc_17, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1187 setgd(&idt[18], hwexc_18, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1188 setgd(&idt[19], hwexc_19, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1189 // Interrupts 20 - 31 are reserved
1191 /* Setup classic PIC interrupt handlers */
1192 setgd(&idt[32], hwirq_32, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1193 setgd(&idt[33], hwirq_33, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1194 setgd(&idt[34], hwirq_34, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1195 setgd(&idt[35], hwirq_35, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1196 setgd(&idt[36], hwirq_36, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1197 setgd(&idt[37], hwirq_37, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1198 setgd(&idt[38], hwirq_38, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1199 setgd(&idt[39], hwirq_39, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1200 setgd(&idt[40], hwirq_40, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1201 setgd(&idt[41], hwirq_41, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1202 setgd(&idt[42], hwirq_42, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1203 setgd(&idt[43], hwirq_43, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1204 setgd(&idt[44], hwirq_44, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1205 setgd(&idt[45], hwirq_45, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1206 setgd(&idt[46], hwirq_46, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1207 setgd(&idt[47], hwirq_47, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1209 // Setup generic interrupt handlers
1210 setgd(&idt[48], hwirq_48, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1211 setgd(&idt[49], hwirq_49, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1212 setgd(&idt[50], hwirq_50, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1213 setgd(&idt[50], hwirq_50, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1214 setgd(&idt[51], hwirq_51, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1215 setgd(&idt[52], hwirq_52, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1216 setgd(&idt[53], hwirq_53, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1217 setgd(&idt[54], hwirq_54, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1218 setgd(&idt[55], hwirq_55, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1219 setgd(&idt[56], hwirq_56, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1220 setgd(&idt[57], hwirq_57, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1221 setgd(&idt[58], hwirq_58, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1222 setgd(&idt[59], hwirq_59, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1223 setgd(&idt[60], hwirq_60, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1224 setgd(&idt[61], hwirq_61, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1226 // XXX Interrupts used for TRACE IPIs
1227 setgd(&idt[62], hwirq_62, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1228 setgd(&idt[63], hwirq_63, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1230 // Setup local APIC interrupt handlers
1231 setgd(&idt[248], hwirq_248, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1232 setgd(&idt[249], hwirq_249, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1233 setgd(&idt[250], hwirq_250, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1234 setgd(&idt[251], hwirq_251, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1235 setgd(&idt[252], hwirq_252, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1236 setgd(&idt[253], hwirq_253, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1237 setgd(&idt[254], hwirq_254, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1239 /* Load IDT register */
1240 __asm volatile("lidt %0" :: "m" (region));