3 * \brief x86-64 interrupt/exception handling utility functions
7 * Copyright (c) 2007, 2008, 2009, 2010, 2011, 2013, ETH Zurich.
10 * This file is distributed under the terms in the attached LICENSE file.
11 * If you do not find this file, copies can be found by writing to:
12 * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
15 /*********************************************************************
17 * Copyright (C) 2003-2004, Karlsruhe University
19 * File path: glue/v4-amd64/hwirq.h
20 * Description: Macros to define interrupt handler stubs for AMD64
22 * Redistribution and use in source and binary forms, with or without
23 * modification, are permitted provided that the following conditions
25 * 1. Redistributions of source code must retain the above copyright
26 * notice, this list of conditions and the following disclaimer.
27 * 2. Redistributions in binary form must reproduce the above copyright
28 * notice, this list of conditions and the following disclaimer in the
29 * documentation and/or other materials provided with the distribution.
31 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
32 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
35 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
39 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
40 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
43 * $Id: hwirq.h,v 1.3 2006/10/19 22:57:35 ud3 Exp $
45 ********************************************************************/
53 #include <arch_gdb_stub.h>
57 #include <arch/x86/perfmon.h>
58 #include <arch/x86/barrelfish_kpi/perfmon.h>
59 #include <arch/x86/pic.h>
60 #include <arch/x86/apic.h>
61 #include <barrelfish_kpi/dispatcher_shared_target.h>
62 #include <asmoffsets.h>
63 #include <trace/trace.h>
64 #include <trace_definitions/trace_defs.h>
65 #include <arch/x86/timing.h>
66 #include <arch/x86/syscall.h>
67 #include <arch/x86/ipi_notify.h>
68 #include <barrelfish_kpi/cpu_arch.h>
70 #include <mdb/mdb_tree.h>
71 #include <sys_debug.h>
73 #include <dev/ia32_dev.h>
75 #ifdef FPU_LAZY_CONTEXT_SWITCH
80 # define START_KERNEL_PHYS K1OM_START_KERNEL_PHYS
82 # define START_KERNEL_PHYS X86_64_START_KERNEL_PHYS
85 static const char *idt_descs[] =
87 [IDT_DE] = "#DE: Divide Error",
88 [IDT_DB] = "#DB: Debug",
89 [IDT_NMI] = "Nonmaskable External Interrupt",
90 [IDT_BP] = "#BP: Breakpoint",
91 [IDT_OF] = "#OF: Overflow",
92 [IDT_BR] = "#BR: Bound Range Exceeded",
93 [IDT_UD] = "#UD: Undefined/Invalid Opcode",
94 [IDT_NM] = "#NM: No Math Coprocessor",
95 [IDT_DF] = "#DF: Double Fault",
96 [IDT_FPUGP] = "Coprocessor Segment Overrun",
97 [IDT_TS] = "#TS: Invalid TSS",
98 [IDT_NP] = "#NP: Segment Not Present",
99 [IDT_SS] = "#SS: Stack Segment Fault",
100 [IDT_GP] = "#GP: General Protection Fault",
101 [IDT_PF] = "#PF: Page Fault",
102 [IDT_MF] = "#MF: FPU Floating-Point Error",
103 [IDT_AC] = "#AC: Alignment Check",
104 [IDT_MC] = "#MC: Machine Check",
105 [IDT_XF] = "#XF: SIMD Floating-Point Exception",
109 * \brief Define IRQ handler number 'num'.
111 * This defines an interrupt handler for vector #num. The way this is done is
112 * quite tricky: A block of assembly is emitted, with a label pointing to
113 * the beginning of that block. The label is made known as a symbol by
114 * having a C function _declaration_ directly in front of the block. The
115 * symbol has to be defined extern, so it is global, but its ELF visibility
116 * is set "hidden", so that the symbol does not end up in the GOT. This is
117 * very important for keeping the code position-independent.
119 * The NOERR/ERR variants depend on whether the hardware delivers an error code.
121 #define HW_EXCEPTION_NOERR(num) \
122 void __attribute__ ((visibility ("hidden"))) hwexc_##num(void); \
125 "\t.type hwexc_"#num",@function \n\t" \
126 "hwexc_"#num": \n\t" \
127 "pushq $0 /* dummy error code */ \n\t" \
128 "pushq $"#num" /* vector number */ \n\t" \
129 "jmp hwexc_common /* common stuff */ \n\t" \
132 #define HW_EXCEPTION_ERR(num) \
133 void __attribute__ ((visibility ("hidden"))) hwexc_##num(void); \
136 "\t.type hwexc_"#num",@function \n\t" \
137 "hwexc_"#num": \n\t" \
138 "pushq $"#num" /* vector number */ \n\t" \
139 "jmp hwexc_common /* common stuff */ \n\t" \
142 #define XHW_IRQ(num) \
143 void __attribute__ ((visibility ("hidden"))) hwirq_##num(void); \
146 "\t.type hwirq_"#num",@function \n\t" \
147 "hwirq_"#num": \n\t" \
148 "pushq $"#num" /* vector number */ \n\t" \
149 "jmp hwirq_common /* common stuff */ \n\t" \
151 /// Noop wrapper for HW_IRQ to deal with CPP stringification problems
152 #define HW_IRQ(num) XHW_IRQ(num)
155 #define XTR(x) STR(x)
159 " .type hwexc_common ,@function \n\t"
161 "testb $3, 24(%rsp) /* if CS.CPL == 0 */ \n\t"
162 "jz kernel_fault \n\t"
164 /* User exception: save full state and return to the user.
165 * This path could be optimized by only saving the non-volatile
166 * registers (since the kernel's C path will maintain them), and
167 * having the user-mode code save them if needed. Since the
168 * current user code always does need them, we just save the full
171 /* decide where to save the state, the options are:
172 * pagefault and enabled -> enabled save area
173 * pagefault while disabled or any other trap -> trap save area
176 "movq dcb_current(%rip), %rcx /* rcx = dcb_current */ \n\t"
177 "movq "XTR(OFFSETOF_DCB_DISP)"(%rcx), %rcx /* rcx = dcb_current->disp */\n\t"
178 "cmpq $14, 8(%rsp) /* is pagefault? */ \n\t"
180 "cmpl $0, "XTR(OFFSETOF_DISP_DISABLED)"(%rcx) /* disp->disabled ? */\n\t"
183 "movq 4*8(%rsp), %rbx /* rbx = faulting IP */ \n\t"
184 "cmpq "XTR(OFFSETOF_DISP_X86_64_CRIT_PC_LOW)"(%rcx), %rbx /* crit_pc_low <= rip? */\n\t"
185 "jae disabled_test \n\t"
186 "\nsave_enabled: \n\t"
188 "addq $"XTR(OFFSETOF_DISP_X86_64_ENABLED_AREA)", %rcx /* rcx = enabled_save_area */\n\t"
190 "\ndisabled_test: \n\t"
191 "cmpq "XTR(OFFSETOF_DISP_X86_64_CRIT_PC_HIGH)"(%rcx), %rbx /* crit_pc_high > rip? */\n\t"
192 "jae save_enabled \n\t"
195 "addq $"XTR(OFFSETOF_DISP_X86_64_TRAP_AREA)", %rcx /* trap_save_area */\n\t"
197 /* save to the save area. at this point, rcx = save area ptr,
198 * rsp+8 = exception num, rsp+16 = CPU-stacked error and registers */
200 "movq %rax, 0*8(%rcx) \n\t"
201 "popq %rax /* original rcx */ \n\t"
202 "movq %rbx, 1*8(%rcx) \n\t"
203 "movq %rax, 2*8(%rcx) \n\t"
204 "movq %rdx, 3*8(%rcx) \n\t"
205 "movq %rsi, 4*8(%rcx) \n\t"
206 "movq %rdi, 5*8(%rcx) \n\t"
207 "movq %rbp, 6*8(%rcx) \n\t"
208 "movq %r8, 8*8(%rcx) \n\t"
209 "movq %r9, 9*8(%rcx) \n\t"
210 "movq %r10, 10*8(%rcx) \n\t"
211 "movq %r11, 11*8(%rcx) \n\t"
212 "movq %r12, 12*8(%rcx) \n\t"
213 "movq %r13, 13*8(%rcx) \n\t"
214 "movq %r14, 14*8(%rcx) \n\t"
215 "movq %r15, 15*8(%rcx) \n\t"
216 "mov %fs, "XTR(OFFSETOF_FS_REG)"(%rcx) \n\t"
217 "mov %gs, "XTR(OFFSETOF_GS_REG)"(%rcx) \n\t"
218 "popq %rdi /* vector number */ \n\t"
219 "popq %rsi /* error code */ \n\t"
220 "movq %rsp, %rdx /* CPU save area */ \n\t"
221 "callq generic_handle_user_exception \n\t"
224 /* a kernel fault means something bad happened, so we stack
225 * everything for the debugger to use, in the GDB frame format */
226 "\nkernel_fault: \n\t"
227 "pushq 6*8(%rsp) /* SS */ \n\t"
228 "pushq 4*8(%rsp) /* CS */ \n\t"
229 "pushq 7*8(%rsp) /* EFLAGS */ \n\t"
230 "pushq 5*8(%rsp) /* RIP */ \n\t"
231 /* TODO: extend frame size and save FS/GS so we can resume afterwards */
240 "pushq 17*8(%rsp) /* RSP */ \n\t"
248 "movq 20*8(%rsp), %rdi /* vector number */ \n\t"
249 "movq 21*8(%rsp), %rsi /* error code */ \n\t"
250 "movq %rsp, %rdx /* save area ptr*/ \n\t"
251 "jmp generic_handle_kernel_exception \n\t"
254 /* (Device) interrupt. */
255 " .type hwirq_common ,@function \n\t"
257 /* If it happened in kernel_mode, simply make userspace runnable.
258 * This is a special case, since interrupts are normally disabled when
259 * entering the kernel. However, they are enabled when there is nothing
260 * to do, and the kernel goes to sleep using wait_for_interrupts() */
261 "testb $3, 16(%rsp) /* if CS.CPL == 0 */ \n\t"
262 "jz call_handle_irq \n\t"
264 /* Happened in user mode.
265 * we need to save everything to the dispatcher. */
266 /* decide where to save the state, either enabled or disabled save areas */
268 "movq dcb_current(%rip), %rdx /* rdx = dcb_current */ \n\t"
269 "movq "XTR(OFFSETOF_DCB_DISP)"(%rdx), %rdx /* rdx = dcb_current->disp */\n\t"
270 "cmpl $0, "XTR(OFFSETOF_DISP_DISABLED)"(%rdx) /* disp->disabled ? */\n\t"
271 "jne irq_save_disabled \n\t"
273 "movq 24(%rsp), %rbx /* rbx = faulting IP */ \n\t"
274 "cmpq "XTR(OFFSETOF_DISP_X86_64_CRIT_PC_LOW)"(%rdx), %rbx /* crit_pc_low <= rip? */\n\t"
275 "jae irq_disabled_test \n\t"
276 "\nirq_save_enabled: \n\t"
278 "addq $"XTR(OFFSETOF_DISP_X86_64_ENABLED_AREA)", %rdx /* rdx = enabled_save_area */\n\t"
279 "jmp irq_do_save \n\t"
280 "\nirq_disabled_test: \n\t"
281 "cmpq "XTR(OFFSETOF_DISP_X86_64_CRIT_PC_HIGH)"(%rdx), %rbx /* crit_pc_high > rip? */\n\t"
282 "jae irq_save_enabled \n\t"
284 "\nirq_save_disabled: \n\t"
285 "addq $"XTR(OFFSETOF_DISP_X86_64_DISABLED_AREA)", %rdx /* disabled_save_area */\n\t"
287 /* save to the save area. at this point, rdx = save area ptr,
288 * rsp+8 = vector number, rsp+16 = CPU-stacked regisers */
289 "\nirq_do_save: \n\t"
290 "movq %rax, 0*8(%rdx) \n\t"
291 "movq %rbx, 1*8(%rdx) \n\t"
292 "movq %rcx, 2*8(%rdx) \n\t"
293 "popq %rax /* original rdx */ \n\t"
294 "movq %rax, 3*8(%rdx) \n\t"
295 "movq %rsi, 4*8(%rdx) \n\t"
296 "movq %rdi, 5*8(%rdx) \n\t"
297 "movq %rbp, 6*8(%rdx) \n\t"
298 "movq %r8, 8*8(%rdx) \n\t"
299 "movq %r9, 9*8(%rdx) \n\t"
300 "movq %r10, 10*8(%rdx) \n\t"
301 "movq %r11, 11*8(%rdx) \n\t"
302 "movq %r12, 12*8(%rdx) \n\t"
303 "movq %r13, 13*8(%rdx) \n\t"
304 "movq %r14, 14*8(%rdx) \n\t"
305 "movq %r15, 15*8(%rdx) \n\t"
306 "mov %fs, "XTR(OFFSETOF_FS_REG)"(%rdx) \n\t"
307 "mov %gs, "XTR(OFFSETOF_GS_REG)"(%rdx) \n\t"
308 "popq %rdi /* vector number */ \n\t"
309 "movq %rsp, %rsi /* CPU save area */ \n\t"
310 "jmp generic_handle_irq /* NB: rdx = disp save ptr*/\n\t"
312 "\ncall_handle_irq: \n\t"
314 "callq handle_irq \n\t"
318 HW_EXCEPTION_NOERR(0);
319 HW_EXCEPTION_NOERR(1);
320 HW_EXCEPTION_NOERR(2);
321 HW_EXCEPTION_NOERR(3);
322 HW_EXCEPTION_NOERR(4);
323 HW_EXCEPTION_NOERR(5);
324 HW_EXCEPTION_NOERR(6);
325 HW_EXCEPTION_NOERR(7);
327 HW_EXCEPTION_NOERR(9);
328 HW_EXCEPTION_ERR(10);
329 HW_EXCEPTION_ERR(11);
330 HW_EXCEPTION_ERR(12);
331 HW_EXCEPTION_ERR(13);
332 HW_EXCEPTION_ERR(14);
333 HW_EXCEPTION_NOERR(16);
334 HW_EXCEPTION_ERR(17);
335 HW_EXCEPTION_NOERR(18);
336 HW_EXCEPTION_NOERR(19);
338 // Classic PIC interrupts
356 // Generic interrupts
376 // Local APIC interrupts
385 // Reserved as "unhandled exception" handler
386 HW_EXCEPTION_NOERR(666);
388 #define ERR_PF_PRESENT (1 << 0)
389 #define ERR_PF_READ_WRITE (1 << 1)
390 #define ERR_PF_USER_SUPERVISOR (1 << 2)
391 #define ERR_PF_RESERVED (1 << 3)
392 #define ERR_PF_INSTRUCTION (1 << 4)
395 * \brief Interrupt Descriptor Table (IDT) for processor this kernel is running
398 static struct gate_descriptor idt[NIDT] __attribute__ ((aligned (16)));
400 static int timer_fired = 0;
402 #if CONFIG_TRACE && NETWORK_STACK_TRACE
403 #define TRACE_ETHERSRV_MODE 1
404 #endif // CONFIG_TRACE && NETWORK_STACK_TRACE
408 * \brief Send interrupt notification to user-space listener.
410 * Sends an interrupt notification IDC to a local endpoint that
411 * listens for IRQ notifications.
413 * \param irq IRQ# to send in notification.
415 static uint32_t pkt_interrupt_count = 0;
416 static void send_user_interrupt(int irq)
418 assert(irq >= 0 && irq < NDISPATCH);
419 struct kcb *k = kcb_current;
421 if (k->irq_dest_caps[irq] != NULL) {
422 assert(k->irq_dest_caps[irq]->cap.type == ObjType_IRQVector);
426 } while (k && k != kcb_current);
427 // if k == NULL we don't need to switch as we only have a single kcb
431 // from here: kcb_current is the kcb for which the interrupt was intended
432 struct capability *cap = &kcb_current->irq_dest_caps[irq]->cap;
434 // Return on null cap (unhandled interrupt)
435 if(cap->type == ObjType_Null) {
436 printk(LOG_WARN, "unhandled IRQ %d\n", irq);
438 } else if (cap->type == ObjType_IRQVector && cap->u.irqvector.ep == NULL){
439 printk(LOG_WARN, "unhandled IRQ (no endpoint) %d\n", irq);
441 } else if (cap->type > ObjType_Num) {
442 // XXX: HACK: this doesn't fix the root cause of having weird entries
443 // in kcb_current->irq_dispatch[], but it allows us to test the system
444 // more reliably for now. -SG
445 // Also complain to SG if this gets checked in to the main tree!
446 printk(LOG_WARN, "receiver type > %d, %d, assume unhandled\n", ObjType_Num, cap->type);
451 ++pkt_interrupt_count;
452 #if NETWORK_STACK_TRACE
453 trace_event(TRACE_SUBSYS_NNET, TRACE_EVENT_NNET_UIRQ, pkt_interrupt_count);
454 #endif // NETWORK_STACK_TRACE
457 // Otherwise, cap needs to be an endpoint
458 assert(cap->type == ObjType_IRQVector);
460 struct capability * ep = cap->u.irqvector.ep;
463 // send empty message as notification
464 errval_t err = lmp_deliver_notification(ep);
465 if (err_is_fail(err)) {
466 if (err_no(err) == SYS_ERR_LMP_BUF_OVERFLOW) {
467 struct dispatcher_shared_generic *disp =
468 get_dispatcher_shared_generic(ep->u.endpoint.listener->disp);
469 printk(LOG_DEBUG, "%.*s: IRQ message buffer overflow on IRQ %d\n",
470 DISP_NAME_LEN, disp->name, irq);
472 printk(LOG_ERR, "Unexpected error delivering IRQ\n");
477 /* XXX: run the handler dispatcher immediately
478 * we shouldn't do this (we should let the scheduler decide), but because
479 * our default scheduler is braindead, this is a quick hack to make sure
480 * that mostly-sane things happen
482 dispatch(cap->u.endpoint.listener);
484 dispatch(schedule());
489 * This interface is deprecated. Use irq_table_alloc_dest_caps
491 errval_t irq_table_alloc(int *outvec)
494 // XXX: this is O(#kcb*NDISPATCH)
496 for (i = 0; i < NDISPATCH; i++) {
497 struct kcb *k = kcb_current;
498 bool found_free = true;
500 if (k->irq_dest_caps[i] != NULL) {
505 } while(k && k != kcb_current);
510 if (i == NDISPATCH) {
512 return SYS_ERR_IRQ_NO_FREE_VECTOR;
519 errval_t irq_debug_create_src_cap(uint8_t dcn_vbits, capaddr_t dcn, capaddr_t out_cap_addr, uint16_t gsi)
521 // This method is a hack to forge a irq src cap for the given GSI targeting the ioapic
524 memset(&out_cap, 0, sizeof(struct cte));
526 out_cap.cap.type = ObjType_IRQ;
527 out_cap.cap.u.irq.line = gsi;
528 const uint32_t ioapic_controller_id = 2;
529 out_cap.cap.u.irq.controller = ioapic_controller_id;
532 err = caps_lookup_slot(&dcb_current->cspace.cap, dcn, dcn_vbits, &cn, CAPRIGHTS_WRITE);
533 if(err_is_fail(err)){
536 err = caps_copy_to_cnode(cn, out_cap_addr, &out_cap, 0, 0, 0);
537 if(err_is_fail(err)){
544 errval_t irq_table_alloc_dest_cap(uint8_t dcn_vbits, capaddr_t dcn, capaddr_t out_cap_addr)
548 memset(&out_cap, 0, sizeof(struct cte));
551 // TODO Luki: Figure out why it was working with i=0 before
552 for (i = 1; i < NDISPATCH; i++) {
553 //struct kcb * k = kcb_current;
554 assert(kcb_current->irq_dest_caps[i] == NULL ||
555 kcb_current->irq_dest_caps[i]->cap.type == ObjType_IRQVector);
556 //TODO Luki: iterate over kcb
557 if (kcb_current->irq_dest_caps[i] == NULL) {
561 if (i == NDISPATCH) {
562 return SYS_ERR_IRQ_NO_FREE_VECTOR;
564 out_cap.cap.type = ObjType_IRQVector;
566 //TODO Luki: Set the lapic_controller_id
567 const uint32_t lapic_controller_id = 0;
568 out_cap.cap.u.irqvector.controller = lapic_controller_id;
569 out_cap.cap.u.irqvector.vector = i;
572 err = caps_lookup_slot(&dcb_current->cspace.cap, dcn, dcn_vbits, &cn, CAPRIGHTS_WRITE);
573 if(err_is_fail(err)){
576 // The following lines equal
577 // caps_copy_to_cnode(cn, out_cap_addr, &out_cap, 0, 0, 0);
578 assert(cn->cap.type == ObjType_CNode);
579 struct cte *dest_cte;
580 dest_cte = caps_locate_slot(cn->cap.u.cnode.cnode, out_cap_addr);
581 err = caps_copy_to_cte(dest_cte, &out_cap, 0, 0, 0);
582 if(err_is_fail(err)){
587 kcb_current->irq_dest_caps[i] = dest_cte;
593 errval_t irq_connect(struct capability *dest_cap, capaddr_t endpoint_adr)
596 struct cte *endpoint;
598 printk(LOG_ERR, "Entering irq_connect\n");
600 // Lookup & check message endpoint cap
601 err = caps_lookup_slot(&dcb_current->cspace.cap, endpoint_adr,
602 CPTR_BITS, &endpoint, CAPRIGHTS_WRITE);
603 if (err_is_fail(err)) {
604 return err_push(err, SYS_ERR_IRQ_LOOKUP_EP);
607 assert(endpoint != NULL);
609 // Return w/error if cap is not an endpoint
610 if(endpoint->cap.type != ObjType_EndPoint) {
611 return SYS_ERR_IRQ_NOT_ENDPOINT;
614 // Return w/error if no listener on endpoint
615 if(endpoint->cap.u.endpoint.listener == NULL) {
616 return SYS_ERR_IRQ_NO_LISTENER;
619 assert(dest_cap->type == ObjType_IRQVector);
620 dest_cap->u.irqvector.ep = &endpoint->cap;
626 * Deprecated. Use capabilities.
628 errval_t irq_table_set(unsigned int nidt, capaddr_t endpoint)
633 err = caps_lookup_slot(&dcb_current->cspace.cap, endpoint,
634 CPTR_BITS, &recv, CAPRIGHTS_WRITE);
635 if (err_is_fail(err)) {
636 return err_push(err, SYS_ERR_IRQ_LOOKUP);
639 assert(recv != NULL);
641 // Return w/error if cap is not an endpoint
642 if(recv->cap.type != ObjType_EndPoint) {
643 return SYS_ERR_IRQ_NOT_ENDPOINT;
646 // Return w/error if no listener on endpoint
647 if(recv->cap.u.endpoint.listener == NULL) {
648 return SYS_ERR_IRQ_NO_LISTENER;
651 printk(LOG_ERR, "Used deprecated irq_table_set. Not setting interrupt\n");
652 return SYS_ERR_IRQ_INVALID;
655 if(nidt < NDISPATCH) {
656 // check that we don't overwrite someone else's handler
657 if (kcb_current->irq_dispatch[nidt].cap.type != ObjType_Null) {
658 printf("kernel: installing new handler for IRQ %d\n", nidt);
660 err = caps_copy_to_cte(&kcb_current->irq_dispatch[nidt], recv, false, 0, 0);
663 return SYS_ERR_IRQ_INVALID;
667 errval_t irq_table_delete(unsigned int nidt)
669 printk(LOG_ERR, "Used deprecated irq_table_set. Not setting interrupt\n");
670 return SYS_ERR_IRQ_INVALID;
672 if(nidt < NDISPATCH) {
673 kcb_current->irq_dispatch[nidt].cap.type = ObjType_Null;
676 return SYS_ERR_IRQ_INVALID;
680 errval_t irq_table_notify_domains(struct kcb *kcb)
682 //TODO Luki: Check if this stuff is correct with multiple kcbs
683 uintptr_t msg[] = { 1 };
684 for (int i = 0; i < NDISPATCH; i++) {
685 struct capability * dest_cap = &(kcb->irq_dest_caps[i]->cap);
686 if (dest_cap->type == ObjType_IRQVector) {
687 struct capability * ep_cap = dest_cap->u.irqvector.ep;
689 // 1 word message as notification
690 errval_t err = lmp_deliver_payload(ep_cap, NULL, msg, 1, false);
691 if (err_is_fail(err)) {
692 if (err_no(err) == SYS_ERR_LMP_BUF_OVERFLOW) {
693 struct dispatcher_shared_generic *disp =
694 get_dispatcher_shared_generic(ep_cap->u.endpoint.listener->disp);
695 printk(LOG_DEBUG, "%.*s: IRQ message buffer overflow\n",
696 DISP_NAME_LEN, disp->name);
698 printk(LOG_ERR, "Unexpected error delivering IRQ\n");
702 // Remove endpoint. Domains must re-register by calling connect again.
703 kcb->irq_dest_caps[i]->cap.u.irqvector.ep->type = ObjType_Null;
710 * \brief Handles kernel exceptions
712 * \param vec Vector number of exception
713 * \param error Error code from CPU, or 0 for an exception without an error code
714 * \param gdb_save_frame Pointer to save area for registers stacked by trap handler
716 static __attribute__ ((used,noreturn))
717 void generic_handle_kernel_exception(uint64_t vec, uint64_t error,
718 uintptr_t *gdb_save_frame)
720 lvaddr_t fault_address;
724 panic("unhandled kernel exception (vector 666)");
727 assert(vec < NEXCEPTIONS);
729 printk(LOG_PANIC, "exception %d (error code 0x%lx): ", (int)vec, error);
731 if (vec == ia32_vec_pf) {
732 printf("%s page fault due to %s%s, while in %s mode%s\n",
733 error & ERR_PF_READ_WRITE ? "write" : "read",
734 error & ERR_PF_PRESENT ? "access violation" : "page not present",
735 error & ERR_PF_RESERVED ? ", reserved bits set in page table"
737 error & ERR_PF_USER_SUPERVISOR ? "user" : "supervisor",
738 error & ERR_PF_INSTRUCTION ? ", by instruction fetch" : "");
740 __asm volatile("mov %%cr2, %[fault_address]"
741 : [fault_address] "=r" (fault_address));
742 printf("Address that caused the fault: 0x%lx\n", fault_address);
744 } else if ((descr = ia32_exc_vec_describe(vec))) {
745 printf("%s\n", descr);
747 printf("unhandled exception!\n");
750 // Print faulting instruction pointer
751 uintptr_t rip = gdb_save_frame[GDB_X86_64_RIP_REG];
752 printf("Faulting instruction pointer (or next instruction): 0x%lx\n", rip);
753 printf(" => i.e. unrelocated kernel address 0x%lx\n",
754 rip - (uintptr_t)&_start_kernel + START_KERNEL_PHYS);
756 printf("Registers:\n");
757 printf(" rax: 0x%016lx r8 : 0x%016lx\n",
758 gdb_save_frame[GDB_X86_64_RAX_REG],
759 gdb_save_frame[GDB_X86_64_R8_REG]);
760 printf(" rbx: 0x%016lx r9 : 0x%016lx\n",
761 gdb_save_frame[GDB_X86_64_RBX_REG],
762 gdb_save_frame[GDB_X86_64_R9_REG]);
763 printf(" rcx: 0x%016lx r10: 0x%016lx\n",
764 gdb_save_frame[GDB_X86_64_RCX_REG],
765 gdb_save_frame[GDB_X86_64_R10_REG]);
766 printf(" rdx: 0x%016lx r11: 0x%016lx\n",
767 gdb_save_frame[GDB_X86_64_RDX_REG],
768 gdb_save_frame[GDB_X86_64_R11_REG]);
769 printf(" rsp: 0x%016lx r12: 0x%016lx\n",
770 gdb_save_frame[GDB_X86_64_RSP_REG],
771 gdb_save_frame[GDB_X86_64_R12_REG]);
772 printf(" rdi: 0x%016lx r13: 0x%016lx\n",
773 gdb_save_frame[GDB_X86_64_RDI_REG],
774 gdb_save_frame[GDB_X86_64_R13_REG]);
775 printf(" rsi: 0x%016lx r14: 0x%016lx\n",
776 gdb_save_frame[GDB_X86_64_RSI_REG],
777 gdb_save_frame[GDB_X86_64_R14_REG]);
778 printf(" rip: 0x%016lx r15: 0x%016lx\n",
779 gdb_save_frame[GDB_X86_64_RIP_REG],
780 gdb_save_frame[GDB_X86_64_R15_REG]);
782 // Print the top 10 stack words
783 printf("Top o' stack:\n");
784 for(int i = 0; i < 10; i++) {
785 unsigned long *p = (unsigned long *)gdb_save_frame[GDB_X86_64_RSP_REG] + i;
786 printf(" %d \t 0x%016lx (%lu)\n", i, *p, *p);
789 // Drop to the debugger
790 gdb_handle_exception(vec, gdb_save_frame);
791 panic("gdb_handle_exception returned");
795 * \brief copies CPU-stacked registers to a dispatcher save area
797 static void copy_cpu_frame_to_dispatcher(
798 uintptr_t * NONNULL COUNT(X86_SAVE_AREA_SIZE) cpu_save_area,
799 struct registers_x86_64 *disp_save_area)
802 assert((cpu_save_area[X86_SAVE_EFLAGS] & USER_EFLAGS) == USER_EFLAGS);
804 disp_save_area->rsp = cpu_save_area[X86_SAVE_RSP];
805 disp_save_area->eflags = cpu_save_area[X86_SAVE_EFLAGS];
806 disp_save_area->rip = cpu_save_area[X86_SAVE_RIP];
810 * \brief Handles user-mode exceptions
812 * \param vec Vector number of exception
813 * \param error Error code from CPU, or 0 for an exception without an error code
814 * \param cpu_save_area Pointer to save area for registers stacked by CPU
815 * \param disp_save_area Pointer to save area in dispatcher
817 static __attribute__ ((used))
818 void generic_handle_user_exception(int vec, uint64_t error,
819 uintptr_t * NONNULL COUNT(X86_SAVE_AREA_SIZE) cpu_save_area,
820 struct registers_x86_64 *disp_save_area)
822 assert(dcb_current->disp_cte.cap.type == ObjType_Frame);
823 dispatcher_handle_t handle = dcb_current->disp;
824 struct dispatcher_shared_generic *disp =
825 get_dispatcher_shared_generic(handle);
826 uint64_t rip = cpu_save_area[X86_SAVE_RIP];
827 uint64_t rsp = cpu_save_area[X86_SAVE_RSP];
828 lvaddr_t fault_address, handler = 0, param = 0;
830 assert(vec < NEXCEPTIONS);
831 assert((cpu_save_area[X86_SAVE_CS] & 0x3) != 0); // CS.CPL > 0
833 copy_cpu_frame_to_dispatcher(cpu_save_area, disp_save_area);
835 bool disabled = dispatcher_is_disabled_ip(handle, rip);
836 dcb_current->disabled = disabled;
839 dcb_current->faults_taken++;
842 // Store FPU state if it's used
843 // Do this for every trap when the current domain used the FPU
844 // Do it for FPU not available traps in any case (to save the last FPU user)
845 // XXX: Need to reset fpu_dcb when that DCB is deleted
846 if(fpu_dcb != NULL &&
847 (fpu_dcb == dcb_current || vec == IDT_NM)) {
848 struct dispatcher_shared_generic *dst =
849 get_dispatcher_shared_generic(fpu_dcb->disp);
851 // Turn FPU trap off temporarily for saving its state
852 bool trap = fpu_trap_get();
855 if(fpu_dcb->disabled) {
856 fpu_save(dispatcher_get_disabled_fpu_save_area(fpu_dcb->disp));
859 assert(!fpu_dcb->disabled);
860 fpu_save(dispatcher_get_enabled_fpu_save_area(fpu_dcb->disp));
869 if (vec == IDT_PF) { // Page fault
871 __asm volatile("mov %%cr2, %[fault_address]"
872 : [fault_address] "=r" (fault_address));
874 printk(LOG_WARN, "user page fault%s in '%.*s': addr %lx IP %lx SP %lx "
876 disabled ? " WHILE DISABLED" : "", DISP_NAME_LEN,
877 disp->name, fault_address, rip, rsp, error);
879 /* sanity-check that the trap handler saved in the right place */
880 assert((disabled && disp_save_area == dispatcher_get_trap_save_area(handle))
881 || (!disabled && disp_save_area == dispatcher_get_enabled_save_area(handle)));
883 handler = disp->dispatcher_pagefault_disabled;
885 handler = disp->dispatcher_pagefault;
887 param = fault_address;
888 } else if (vec == IDT_NMI) {
889 printk(LOG_WARN, "NMI - ignoring\n");
890 dispatch(dcb_current);
891 } else if (vec == IDT_NM) { // device not available (FPU) exception
892 debug(SUBSYS_DISPATCH, "FPU trap in %.*s at 0x%" PRIxPTR "\n",
893 DISP_NAME_LEN, disp->name, rip);
894 assert(!dcb_current->is_vm_guest);
896 /* Intel system programming part 1: 2.3.1, 2.5, 11, 12.5.1
897 * clear the TS flag (flag that says, that the FPU is not available)
901 // Remember FPU-using DCB
902 fpu_dcb = dcb_current;
904 // Wipe FPU for protection and to initialize it in case we trapped while
909 // Initialize FPU (done earlier) and ignore trap
910 dispatch(dcb_current);
912 // defer trap to user-space
913 // FPUs are switched eagerly while disabled, there should be no trap
914 assert(disp_save_area == dispatcher_get_trap_save_area(handle));
915 handler = disp->dispatcher_trap;
918 } else if (vec == IDT_MF) {
921 __asm volatile("fnstsw %0" : "=a" (fpu_status));
923 printk(LOG_WARN, "FPU error%s in '%.*s': IP %" PRIxPTR " FPU status %x\n",
924 disabled ? " WHILE DISABLED" : "", DISP_NAME_LEN,
925 disp->name, rip, fpu_status);
927 handler = disp->dispatcher_trap;
929 } else if (vec == IDT_MC) {
930 // TODO: provide more useful information about the cause
931 panic("machine check exception while in user mode");
932 } else { // All other traps
933 printk(LOG_WARN, "user trap #%d: %s%s in '%.*s': IP %lx, error %lx\n",
934 vec, idt_descs[vec], disabled ? " WHILE DISABLED" : "",
935 DISP_NAME_LEN, disp->name, rip, error);
936 assert(disp_save_area == dispatcher_get_trap_save_area(handle));
938 if (vec == IDT_DB) { // debug exception: just continue
939 resume(dispatcher_get_trap_save_area(handle));
941 // can't handle a trap while disabled: nowhere safe to deliver it
942 scheduler_remove(dcb_current);
943 dispatch(schedule());
946 handler = disp->dispatcher_trap;
951 // Make unrunnable if it has taken too many faults
952 if (dcb_current->faults_taken > 2) {
953 printk(LOG_WARN, "generic_handle_user_exception: too many faults, "
954 "making domain unrunnable\n");
955 dcb_current->faults_taken = 0; // just in case it gets restarted
956 scheduler_remove(dcb_current);
957 dispatch(schedule());
960 /* resume user to save area */
963 printk(LOG_WARN, "no suitable handler for this type of fault, "
964 "making domain unrunnable\n");
965 scheduler_remove(dcb_current);
966 dispatch(schedule());
968 cpu_save_area[X86_SAVE_RIP] = handler;
969 cpu_save_area[X86_SAVE_EFLAGS] = USER_EFLAGS;
972 /* XXX: get GCC to load up the argument registers before returning */
973 register uintptr_t arg0 __asm ("%rdi") = disp->udisp;
974 register uintptr_t arg1 __asm ("%rsi") = param;
975 register uintptr_t arg2 __asm ("%rdx") = error;
976 register uintptr_t arg3 __asm ("%rcx") = rip;
977 __asm volatile("" :: "r" (arg0), "r" (arg1), "r" (arg2), "r" (arg3));
981 update_kernel_now(void)
983 uint64_t tsc_now = rdtsc();
984 #ifdef CONFIG_ONESHOT_TIMER
985 uint64_t ticks = tsc_now - tsc_lasttime;
986 kernel_now += ticks / timing_get_tsc_per_ms();
987 #else // !CONFIG_ONESHOT_TIMER
988 // maintain compatibility with old behaviour. Not sure if it is
989 // actually needed. -AKK
991 // Ignore timeslice if it happens too closely (less than half
992 // of the TSC ticks that are supposed to pass) to the last.
993 // In that case we have just synced timers and see a spurious
994 // APIC timer interrupt.
995 if(tsc_now - tsc_lasttime >
996 (kernel_timeslice * timing_get_tsc_per_ms()) / 2) {
997 kernel_now += kernel_timeslice;
999 #endif // CONFIG_ONESHOT_TIMER
1000 tsc_lasttime = tsc_now;
1003 /// Handle an IRQ that arrived, either while in user or kernel mode (HLT)
1004 static __attribute__ ((used)) void handle_irq(int vector)
1006 int irq = vector - NEXCEPTIONS;
1007 debug(SUBSYS_DISPATCH, "IRQ vector %d (irq %d) while %s\n", vector, irq,
1008 dcb_current ? (dcb_current->disabled ? "disabled": "enabled") : "in kernel");
1011 // if we were in wait_for_interrupt(), unmask timer before running userspace
1012 if (dcb_current == NULL && kernel_ticks_enabled) {
1013 apic_unmask_timer();
1016 #if TRACE_ETHERSRV_MODE
1017 trace_event(TRACE_SUBSYS_NNET, TRACE_EVENT_NNET_IRQ, vector);
1018 #endif // TRACE_ETHERSRV_MODE
1020 // APIC timer interrupt: handle in kernel and reschedule
1021 if (vector == APIC_TIMER_INTERRUPT_VECTOR) {
1022 // count time slices
1025 // switch kcb every other timeslice
1026 if (!kcb_sched_suspended && timer_fired % 2 == 0 && kcb_current->next) {
1027 //printk(LOG_NOTE, "switching from kcb(%p) to kcb(%p)\n", kcb_current, kcb_current->next);
1028 switch_kcb(kcb_current->next);
1032 assert(kernel_ticks_enabled);
1033 update_kernel_now();
1034 trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_TIMER, kernel_now);
1035 wakeup_check(kernel_now+kcb_current->kernel_off);
1036 } else if (vector == APIC_PERFORMANCE_INTERRUPT_VECTOR) {
1037 // Handle performance counter overflow
1039 perfmon_measure_reset();
1040 if(dcb_current!=NULL) {
1041 // Get faulting instruction pointer
1042 struct registers_x86_64 *disp_save_area = dcb_current->disabled ?
1043 dispatcher_get_disabled_save_area(dcb_current->disp) :
1044 dispatcher_get_enabled_save_area(dcb_current->disp);
1045 struct dispatcher_shared_generic *disp =
1046 get_dispatcher_shared_generic(dcb_current->disp);
1048 // Setup data structure for LMP transfer to user level handler
1049 struct perfmon_overflow_data data = {
1050 .ip = disp_save_area->rip
1052 strncpy(data.name, disp->name, PERFMON_DISP_NAME_LEN);
1054 // Call overflow handler represented by endpoint
1055 extern struct capability perfmon_callback_ep;
1056 size_t payload_len = sizeof(struct perfmon_overflow_data)/ sizeof(uintptr_t)+1;
1057 errval_t err = lmp_deliver_payload(&perfmon_callback_ep,
1063 // Make sure delivery was okay. SYS_ERR_LMP_BUF_OVERFLOW is okay for now
1064 assert(err_is_ok(err) || err_no(err)==SYS_ERR_LMP_BUF_OVERFLOW);
1066 // This should never happen, as interrupts are disabled in kernel
1067 printf("Performance counter overflow interrupt from "
1068 "apic in kernel level\n");
1071 } else if (vector == APIC_ERROR_INTERRUPT_VECTOR) {
1072 printk(LOG_ERR, "APIC error interrupt fired!\n");
1073 xapic_esr_t esr = apic_get_esr();
1075 xapic_esr_prtval(str, 256, esr);
1076 printf("%s\n", str);
1078 } else if (vector == APIC_INTER_CORE_VECTOR) {
1080 ipi_handle_notify();
1081 } else if (vector == APIC_INTER_HALT_VECTOR) {
1083 // Update kernel_off for all KCBs
1084 struct kcb *k = kcb_current;
1086 k->kernel_off = kernel_now;
1088 } while(k && k!=kcb_current);
1091 } else if (vector == APIC_SPURIOUS_INTERRUPT_VECTOR) {
1093 printk(LOG_DEBUG, "spurious interrupt\n");
1097 else if (irq >= 0 && irq <= 15) { // classic PIC device interrupt
1098 printk(LOG_NOTE, "got interrupt %d!\n", irq);
1102 // only handle PIC interrupts on the BSP core
1103 if (apic_is_bsp()) {
1104 if (pic_have_interrupt(irq)) {
1106 send_user_interrupt(irq);
1107 } else { // no interrupt pending, check for a different one (!)
1108 irq = pic_pending_interrupt();
1109 if (irq == -1) { // really nothing pending
1110 printk(LOG_NOTE, "spurious interrupt (IRQ %d)\n", irq);
1111 } else { // why does this happen?! -AB
1112 printk(LOG_NOTE, "IRQ %d reported on wrong vector (%d)\n",
1113 irq, vector - NEXCEPTIONS);
1115 send_user_interrupt(irq);
1121 else { // APIC device interrupt (or IPI)
1122 //printk(LOG_NOTE, "interrupt %d vector %d!\n", irq, vector);
1124 send_user_interrupt(irq);
1127 // reschedule (because the runnable processes may have changed) and dispatch
1128 /* FIXME: the round-robin scheduler doesn't do the best thing here:
1129 * it always picks the next task, but we only really want to do that on
1132 dispatch(schedule());
1133 panic("dispatch() returned");
1137 * \brief Handles device interrupts that arrive while in user mode
1139 * \param vector Vector number
1140 * \param cpu_save_area Pointer to save area for registers stacked by CPU
1141 * \param disp_save_area Pointer to save area in dispatcher
1143 static __attribute__ ((used, noreturn)) void
1144 generic_handle_irq(int vector,
1145 uintptr_t * NONNULL COUNT(X86_SAVE_AREA_SIZE) cpu_save_area,
1146 struct registers_x86_64 *disp_save_area)
1148 assert(dcb_current->disp_cte.cap.type == ObjType_Frame);
1149 dispatcher_handle_t handle = dcb_current->disp;
1150 uint64_t rip = cpu_save_area[X86_SAVE_RIP];
1151 assert(vector < NIDT && vector >= NEXCEPTIONS);
1153 // Copy CPU-saved registers to dispatcher save area
1154 copy_cpu_frame_to_dispatcher(cpu_save_area, disp_save_area);
1156 /* sanity-check that the trap handler saved in the right place,
1157 * and update disabled flag in DCB */
1158 if (disp_save_area == dispatcher_get_disabled_save_area(handle)) {
1159 assert(dispatcher_is_disabled_ip(handle, rip));
1160 dcb_current->disabled = true;
1162 assert(disp_save_area == dispatcher_get_enabled_save_area(handle));
1163 assert(!dispatcher_is_disabled_ip(handle, rip));
1164 dcb_current->disabled = false;
1168 resume(disp_save_area);
1171 /* Utility function for code below; initialises a gate_descriptor */
1172 static void setgd(struct gate_descriptor *gd, void (* handler)(void),
1173 int ist, int type, int dpl, int selector)
1175 memset(gd, 0, sizeof(struct gate_descriptor));
1176 gd->gd_looffset = (uintptr_t)handler & ((1UL << 16) - 1);
1177 gd->gd_hioffset = (uintptr_t)handler >> 16;
1178 gd->gd_selector = selector;
1186 * \brief Sets up the default IDT for current CPU.
1188 void setup_default_idt(void)
1190 struct region_descriptor region = { // set default IDT
1191 .rd_limit = NIDT * sizeof(idt[0]) - 1,
1192 .rd_base = (uint64_t)&idt
1197 memset((void *)&idt, 0, NIDT * sizeof(idt[0]));
1199 // initialize IDT with default generic handlers
1200 for (i = 0; i < NIDT; i++)
1201 setgd(&idt[i], hwexc_666, 0, SDT_SYSIGT, SEL_KPL,
1202 GSEL(KCODE_SEL, SEL_KPL));
1204 /* Setup exception handlers */
1205 setgd(&idt[0], hwexc_0, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1206 setgd(&idt[1], hwexc_1, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1207 setgd(&idt[2], hwexc_2, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1208 setgd(&idt[3], hwexc_3, 0, SDT_SYSIGT, SEL_UPL, GSEL(KCODE_SEL, SEL_KPL));
1209 setgd(&idt[4], hwexc_4, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1210 setgd(&idt[5], hwexc_5, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1211 setgd(&idt[6], hwexc_6, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1212 setgd(&idt[7], hwexc_7, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1213 setgd(&idt[8], hwexc_8, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1214 setgd(&idt[9], hwexc_9, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1215 setgd(&idt[10], hwexc_10, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1216 setgd(&idt[11], hwexc_11, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1217 setgd(&idt[12], hwexc_12, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1218 setgd(&idt[13], hwexc_13, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1219 setgd(&idt[14], hwexc_14, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1220 // Interrupt 15 is undefined
1221 setgd(&idt[16], hwexc_16, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1222 setgd(&idt[17], hwexc_17, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1223 setgd(&idt[18], hwexc_18, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1224 setgd(&idt[19], hwexc_19, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1225 // Interrupts 20 - 31 are reserved
1227 /* Setup classic PIC interrupt handlers */
1228 setgd(&idt[32], hwirq_32, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1229 setgd(&idt[33], hwirq_33, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1230 setgd(&idt[34], hwirq_34, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1231 setgd(&idt[35], hwirq_35, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1232 setgd(&idt[36], hwirq_36, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1233 setgd(&idt[37], hwirq_37, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1234 setgd(&idt[38], hwirq_38, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1235 setgd(&idt[39], hwirq_39, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1236 setgd(&idt[40], hwirq_40, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1237 setgd(&idt[41], hwirq_41, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1238 setgd(&idt[42], hwirq_42, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1239 setgd(&idt[43], hwirq_43, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1240 setgd(&idt[44], hwirq_44, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1241 setgd(&idt[45], hwirq_45, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1242 setgd(&idt[46], hwirq_46, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1243 setgd(&idt[47], hwirq_47, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1245 // Setup generic interrupt handlers
1246 setgd(&idt[48], hwirq_48, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1247 setgd(&idt[49], hwirq_49, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1248 setgd(&idt[50], hwirq_50, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1249 setgd(&idt[50], hwirq_50, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1250 setgd(&idt[51], hwirq_51, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1251 setgd(&idt[52], hwirq_52, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1252 setgd(&idt[53], hwirq_53, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1253 setgd(&idt[54], hwirq_54, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1254 setgd(&idt[55], hwirq_55, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1255 setgd(&idt[56], hwirq_56, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1256 setgd(&idt[57], hwirq_57, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1257 setgd(&idt[58], hwirq_58, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1258 setgd(&idt[59], hwirq_59, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1259 setgd(&idt[60], hwirq_60, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1260 setgd(&idt[61], hwirq_61, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1262 // XXX Interrupts used for TRACE IPIs
1263 setgd(&idt[62], hwirq_62, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1264 setgd(&idt[63], hwirq_63, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1266 // Setup local APIC interrupt handlers
1267 setgd(&idt[248], hwirq_248, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1268 setgd(&idt[249], hwirq_249, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1269 setgd(&idt[250], hwirq_250, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1270 setgd(&idt[251], hwirq_251, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1271 setgd(&idt[252], hwirq_252, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1272 setgd(&idt[253], hwirq_253, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1273 setgd(&idt[254], hwirq_254, 0, SDT_SYSIGT, SEL_KPL, GSEL(KCODE_SEL, SEL_KPL));
1275 /* Load IDT register */
1276 __asm volatile("lidt %0" :: "m" (region));