Str "-mno-sse4a",
Str "-mno-3dnow",
-- specific Xeon Phi architecture
- Str "Wa,-march=k1om",
- Str "Wa,-mtune=k1om",
+ Str "-Wa,-march=k1om",
+ Str "-Wa,-mtune=k1om",
Str "-D__x86__" ]
cFlags = ArchDefaults.commonCFlags
--- /dev/null
+/*
+ * Copyright (c) 2007, 2008, 2009, 2010, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+alias errval uint64;
+alias cycles uint64;
--- /dev/null
+/*
+ * Copyright (c) 2007, 2008, 2009, 2010, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+alias genpaddr uint64;
+alias genvaddr uint64;
+alias rsrcid uint32;
--- /dev/null
+/**
+ * \file
+ * \brief Architecture specific dispatcher structure private to the user
+ */
+
+/*
+ * Copyright (c) 2010, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef TARGET_X86_64_BARRELFISH_DISPATCHER_H
+#define TARGET_X86_64_BARRELFISH_DISPATCHER_H
+
+#include <barrelfish_kpi/dispatcher_shared.h>
+#include <barrelfish_kpi/dispatcher_shared_arch.h>
+#include <barrelfish/dispatcher.h>
+
+/// Dispatcher structure (including data accessed only by user code)
+struct dispatcher_x86_64 {
+ struct dispatcher_shared_x86_64 d; ///< Shared (user/kernel) data. Must be first.
+ struct dispatcher_generic generic; ///< User private data
+
+ uint16_t disp_seg_selector; ///< Dispatcher segment selector
+ /// Dummy segment to which disp_seg_selector refers; see ldt_init_disabled()
+ uintptr_t dummyseg[2];
+
+ /* Incoming LMP endpoints (buffers and receive cap pointers) follow */
+};
+
+static inline struct dispatcher_generic*
+get_dispatcher_generic_x86_64(dispatcher_handle_t handle)
+{
+ struct dispatcher_x86_64 *disp = (struct dispatcher_x86_64*)handle;
+ return &disp->generic;
+}
+
+static inline struct dispatcher_x86_64 *
+get_dispatcher_x86_64(dispatcher_handle_t handle)
+{
+ return (struct dispatcher_x86_64*)handle;
+}
+
+#endif // TARGET_X86_64_BARRELFISH_DISPATCHER_H
--- /dev/null
+/**
+ * \file
+ * \brief Arch specific declerations that can be included by others
+ */
+
+/*
+ * Copyright (c) 2010, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef TARGET_X86_64_BARRELFISH_PMAP_H
+#define TARGET_X86_64_BARRELFISH_PMAP_H
+
+#include <target/x86/barrelfish/pmap_target.h>
+
+struct pmap_dump_info {
+ size_t pml4_index, pdpt_index, pdir_index, pt_index;
+ vregion_flags_t flags;
+ struct capref cap;
+ genvaddr_t offset;
+};
+#define PRIfmtPTIDX "%zd.%zd.%zd.%zd"
+#define GET_PTIDX(dump_info) (dump_info)->pml4_index, (dump_info)->pdpt_index, \
+ (dump_info)->pdir_index, (dump_info)->pt_index
+
+errval_t pmap_x86_64_init(struct pmap *pmap, struct vspace *vspace,
+ struct capref vnode,
+ struct slot_allocator *opt_slot_alloc);
+errval_t pmap_x86_64_current_init(bool);
+
+#endif // TARGET_X86_64_BARRELFISH_PMAP_H
--- /dev/null
+/**
+ * \file
+ * \brief Arch specific CPU declarations
+ */
+
+/*
+ * Copyright (c) 2010, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+/*-
+ * Copyright (c) 1989, 1990 William F. Jolitz
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)segments.h 7.1 (Berkeley) 5/9/91
+ * $FreeBSD$
+ */
+
+#ifndef TARGET_X86_64_BARRELFISH_KPI_CPU_H
+#define TARGET_X86_64_BARRELFISH_KPI_CPU_H
+
+/*
+ * AMD64 Segmentation Data Structures and definitions
+ */
+
+/**
+ * \brief Segment descriptor
+ */
+union segment_descriptor { // FIXME: rename to x86_64_segment_descriptor
+ uint64_t raw;
+ struct {
+ uint64_t lo_limit:16;
+ uint64_t lo_base:24;
+ uint64_t type:4;
+ uint64_t system_desc:1; // S
+ uint64_t privilege_level:2; // DPL
+ uint64_t present:1; // P
+ uint64_t hi_limit:4;
+ uint64_t available:1; // AVL
+ uint64_t long_mode:1; // L
+ uint64_t operation_size:1; // D/B
+ uint64_t granularity:1; // G
+ uint64_t hi_base:8;
+ } d;
+ struct {
+ uint64_t lo_limit:16;
+ uint64_t lo_base:24;
+ uint64_t type:4;
+ uint64_t always0:1;
+ uint64_t privilege_level:2;
+ uint64_t present:1;
+ uint64_t hi_limit:4;
+ uint64_t available:1;
+ uint64_t always0_1:2;
+ uint64_t granularity:1;
+ uint64_t hi_base:8;
+ } sys_lo; ///< low part of system descriptor (TSS, LDT, etc.)
+ struct {
+ uint64_t base:32;
+ uint64_t reserved:8;
+ uint64_t always0:5;
+ uint64_t reserved2:19;
+ } sys_hi; ///< high part of system descriptor (TSS, LDT, etc.)
+};
+
+///< Constructs a segment selector in the LDT with user privilege
+#define X86_64_LDT_SELECTOR(s) (((s)<<3) | 7)
+
+///< Return the index of a segment selector
+#define X86_64_SELECTOR_IDX(s) ((s)>>3)
+
+#endif
--- /dev/null
+/**
+ * \file
+ * \brief Architecture specific dispatcher struct shared between kernel and user
+ */
+
+/*
+ * Copyright (c) 2007, 2008, 2009, 2010, 2011, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef TARGET_X86_64_BARRELFISH_KPI_DISPATCHER_SHARED_H
+#define TARGET_X86_64_BARRELFISH_KPI_DISPATCHER_SHARED_H
+
+#include <barrelfish_kpi/dispatcher_shared.h>
+
+///< Architecture specific kernel/user shared dispatcher struct
+struct dispatcher_shared_x86_64 {
+ struct dispatcher_shared_generic d; ///< Generic portion
+
+ lvaddr_t crit_pc_low; ///< Critical section lower PC bound
+ lvaddr_t crit_pc_high; ///< Critical section upper PC bound
+
+ lvaddr_t ldt_base; ///< Base address of local descriptor table (LDT)
+ size_t ldt_npages; ///< Size of local descriptor table (# 4k pages)
+
+ struct registers_x86_64 enabled_save_area; ///< Enabled register save area
+ struct registers_x86_64 disabled_save_area; ///< Disabled register save area
+ struct registers_x86_64 trap_save_area; ///< Trap register save area
+ struct registers_fpu_x86_64 enabled_fpu_state; ///< FPU register save area
+ struct registers_fpu_x86_64 disabled_fpu_state; ///< FPU register save area
+};
+
+static inline struct dispatcher_shared_x86_64*
+get_dispatcher_shared_x86_64(dispatcher_handle_t handle)
+{
+ return (struct dispatcher_shared_x86_64*)handle;
+}
+
+static inline struct registers_x86_64*
+dispatcher_x86_64_get_enabled_save_area(dispatcher_handle_t handle)
+{
+ struct dispatcher_shared_x86_64 *disp =
+ get_dispatcher_shared_x86_64(handle);
+ return &disp->enabled_save_area;
+}
+
+static inline struct registers_x86_64*
+dispatcher_x86_64_get_disabled_save_area(dispatcher_handle_t handle)
+{
+ struct dispatcher_shared_x86_64 *disp =
+ get_dispatcher_shared_x86_64(handle);
+ return &disp->disabled_save_area;
+}
+
+static inline struct registers_x86_64*
+dispatcher_x86_64_get_trap_save_area(dispatcher_handle_t handle)
+{
+ struct dispatcher_shared_x86_64 *disp =
+ get_dispatcher_shared_x86_64(handle);
+ return &disp->trap_save_area;
+}
+
+static inline struct registers_fpu_x86_64*
+dispatcher_x86_64_get_enabled_fpu_save_area(dispatcher_handle_t handle)
+{
+ struct dispatcher_shared_x86_64 *disp =
+ get_dispatcher_shared_x86_64(handle);
+ return &disp->enabled_fpu_state;
+}
+
+static inline struct registers_fpu_x86_64*
+dispatcher_x86_64_get_disabled_fpu_save_area(dispatcher_handle_t handle)
+{
+ struct dispatcher_shared_x86_64 *disp =
+ get_dispatcher_shared_x86_64(handle);
+ return &disp->disabled_fpu_state;
+}
+
+#endif // TARGET_X86_64_BARRELFISH_KPI_DISPATCHER_SHARED_H
--- /dev/null
+/**
+ * \file
+ * \brief Arch specific definitions, can be included by others.
+ */
+
+/*
+ * Copyright (c) 2010, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef TARGET_X86_64_BARRELFISH_KPI_PAGING_H
+#define TARGET_X86_64_BARRELFISH_KPI_PAGING_H
+
+#ifndef __ASSEMBLER__
+typedef uint64_t paging_x86_64_flags_t;
+#endif
+
+/** The system's base page size is 4kB */
+#define X86_64_BASE_PAGE_BITS 12
+#define X86_64_BASE_PAGE_SIZE 0x1000
+#define X86_64_BASE_PAGE_MASK (X86_64_BASE_PAGE_SIZE - 1)
+#define X86_64_BASE_PAGE_OFFSET(a) ((a) & X86_64_BASE_PAGE_MASK)
+
+/** The system's large page size is 2MB */
+#define X86_64_LARGE_PAGE_BITS 21
+#define X86_64_LARGE_PAGE_SIZE 0x200000
+#define X86_64_LARGE_PAGE_MASK (X86_64_LARGE_PAGE_SIZE - 1)
+#define X86_64_LARGE_PAGE_OFFSET(a) ((a) & X86_64_LARGE_PAGE_MASK)
+
+/**
+ * Bits within the various page directories and tables.
+ */
+#define X86_64_PTABLE_EXECUTE_DISABLE (((paging_x86_64_flags_t)1) << 63)
+#define X86_64_PTABLE_GLOBAL_PAGE (((paging_x86_64_flags_t)1) << 8)
+#define X86_64_PTABLE_ATTR_INDEX (((paging_x86_64_flags_t)1) << 7)
+#define X86_64_PTABLE_DIRTY (((paging_x86_64_flags_t)1) << 6)
+#define X86_64_PTABLE_ACCESSED (((paging_x86_64_flags_t)1) << 5)
+#define X86_64_PTABLE_CACHE_DISABLED (((paging_x86_64_flags_t)1) << 4)
+#define X86_64_PTABLE_WRITE_THROUGH (((paging_x86_64_flags_t)1) << 3)
+#define X86_64_PTABLE_USER_SUPERVISOR (((paging_x86_64_flags_t)1) << 2)
+#define X86_64_PTABLE_READ_WRITE (((paging_x86_64_flags_t)1) << 1)
+#define X86_64_PTABLE_PRESENT (((paging_x86_64_flags_t)1) << 0)
+
+#define X86_64_PTABLE_SIZE 512 /**< Page directory/table size */
+#define X86_64_PTABLE_MASK 0x1ff /**< Page dir/table address mask */
+#define X86_64_PTABLE_CLEAR 0 /**< Bitmap of a clear table entry */
+
+#define X86_64_PTABLE_ENTRY_SIZE sizeof(union x86_64_pdir_entry)
+
+/// Default access is read/write, but not execute
+#define X86_64_PTABLE_ACCESS_DEFAULT \
+ (X86_64_PTABLE_EXECUTE_DISABLE | X86_64_PTABLE_USER_SUPERVISOR | \
+ X86_64_PTABLE_READ_WRITE)
+#define X86_64_PTABLE_ACCESS_READONLY \
+ (X86_64_PTABLE_EXECUTE_DISABLE | X86_64_PTABLE_USER_SUPERVISOR)
+
+/* Macros to compute the corresponding portions of the vaddr */
+#define X86_64_PML4_BASE(base) (((uint64_t)(base) >> 39) & X86_64_PTABLE_MASK)
+#define X86_64_PDPT_BASE(base) (((uint64_t)(base) >> 30) & X86_64_PTABLE_MASK)
+#define X86_64_PDIR_BASE(base) (((uint64_t)(base) >> 21) & X86_64_PTABLE_MASK)
+#define X86_64_PTABLE_BASE(base) (((uint64_t)(base) >> 12) & X86_64_PTABLE_MASK)
+
+#endif // TARGET_X86_64_BARRELFISH_KPI_PAGING_H
--- /dev/null
+/**
+ * \file
+ * \brief Arch specific definition of the registers, can be included by anyone.
+ * Definitions shared by kernel and user
+ */
+
+/*
+ * Copyright (c) 2010, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef TARGET_X86_64_BARRELFISH_KPI_REGISTERS_H
+#define TARGET_X86_64_BARRELFISH_KPI_REGISTERS_H
+
+#include <barrelfish_kpi/eflags_arch.h> // for USER_EFLAGS
+#include <barrelfish_kpi/types.h> // for lvaddr_t
+
+/** \brief Number of registers to be saved for this architecture
+ *
+ * This is the same as ARCH_NUMREGS, but it is used by assembler stubs, so needs
+ * to be defined here as a constant.
+ */
+#define X86_64_NUM_REGS 20
+
+#ifndef __ASSEMBLER__
+
+// Warning: both the GDB stubs and any number of asm fragments depend
+// on the order of these fields. Don't change them without due care!
+struct registers_x86_64 {
+ uint64_t rax, rbx, rcx, rdx, rsi, rdi, rbp, rsp,
+ r8, r9, r10, r11, r12, r13, r14, r15, rip, eflags;
+ uint16_t fs, gs; ///< Only meaningful segment selectors in 64-bit mode
+};
+
+struct registers_fpu_x86_64 {
+ // Should be aligned at 16-byte boundary, according to Intel
+ // description of FXRSTOR instruction.
+ uint8_t registers[512 + 16] __attribute__ ((aligned (16)));
+};
+
+static inline void
+registers_x86_64_set_entry(struct registers_x86_64 *regs, lvaddr_t entry)
+{
+ regs->rip = entry;
+ regs->eflags = USER_EFLAGS;
+ regs->fs = 0;
+ regs->gs = 0;
+}
+
+static inline void
+registers_x86_64_set_param(struct registers_x86_64 *regs, uint64_t param)
+{
+ regs->rax = param;
+}
+
+static inline void
+registers_x86_64_get_param(struct registers_x86_64 *regs, uint64_t *param)
+{
+ *param = regs->rax;
+}
+
+static inline uint64_t
+registers_x86_64_get_ip(struct registers_x86_64 *regs)
+{
+ return regs->rip;
+}
+
+static inline uint64_t
+registers_x86_64_get_sp(struct registers_x86_64 *regs)
+{
+ return regs->rsp;
+}
+
+#endif // __ASSEMBLER__
+#endif // TARGET_X86_64_BARRELFISH_KPI_REGISTERS_H
conio_cls();
serial_console_init();
+ panic("Hello World!\n");
+
void __attribute__ ((noreturn)) (*reloc_text_init)(void) =
(void *)local_phys_to_mem((lpaddr_t)text_init);
struct Elf64_Shdr *rela, *symtab;
--- /dev/null
+/**
+ * \file
+ * \brief Header for x86-specific GDB stub code.
+ */
+
+/*
+ * Copyright (c) 2007, 2008, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#include <x86.h>
+
+/**
+ * \brief X86_64 register set
+ *
+ * As defined by GDB.
+ */
+enum gdb_x86_64_register_nums {
+ GDB_X86_64_RAX_REG, GDB_X86_64_RBX_REG, GDB_X86_64_RCX_REG, GDB_X86_64_RDX_REG,
+ GDB_X86_64_RSI_REG, GDB_X86_64_RDI_REG, GDB_X86_64_RBP_REG, GDB_X86_64_RSP_REG,
+ GDB_X86_64_R8_REG, GDB_X86_64_R9_REG, GDB_X86_64_R10_REG, GDB_X86_64_R11_REG,
+ GDB_X86_64_R12_REG, GDB_X86_64_R13_REG, GDB_X86_64_R14_REG, GDB_X86_64_R15_REG,
+ GDB_X86_64_RIP_REG, GDB_X86_64_EFLAGS_REG, GDB_X86_64_CS_REG, GDB_X86_64_SS_REG,
+
+/* these are not saved/used in 64-bit mode, and currently avoided
+ DS_REG, ES_REG, FS_REG, GS_REG,
+*/
+
+/* these are not used yet:
+ ST0_REG, ST1_REG, ST2_REG, ST3_REG, ST4_REG, ST5_REG, ST6_REG, ST7_REG,
+
+ FCTRL_REG, FSTAT_REG, FTAG_REG, FISEG_REG,
+ FIOFF_REG, FOSEG_REG, FOOFF_REG, FOP_REG,
+
+ XMM0_REG, XMM1_REG, XMM2_REG, XMM3_REG, XMM4_REG, XMM5_REG,
+ XMM6_REG, XMM7_REG, XMM8_REG, XMM9_REG, XMM10_REG, XMM11_REG,
+ XMM12_REG, XMM13_REG, XMM14_REG, XMM15_REG,
+ MXCSR_REG
+*/
+
+ GDB_X86_64_NUM_REGS /* not a real register; must be last! */
+};
+
+extern uintptr_t *gdb_arch_registers;
+
+/** Address of saved registers as void * */
+#define GDB_ARCH_REGADDR ((void*)gdb_arch_registers)
+
+/** Number of bytes saved in GDB frame */
+#define GDB_ARCH_REGBYTES (sizeof(uintptr_t) * GDB_X86_64_NUM_REGS)
--- /dev/null
+/**
+ * \file
+ * \brief Address-space support for Mackerel CPUID device definitions
+ */
+
+/*
+ * Copyright (c) 2007, 2008, 2010, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef CPUID_SPACES_H
+#define CPUID_SPACES_H
+
+#define CPUID(offset, reg) \
+ __asm volatile("cpuid" : reg : "a" (offset))
+
+static inline uint32_t cpuid_eax_read_32(cpuid_t *dev, size_t offset)
+{
+ uint32_t eax;
+ CPUID(offset, "=a" (eax));
+ return eax;
+}
+
+static inline uint32_t cpuid_ebx_read_32(cpuid_t *dev, size_t offset)
+{
+ uint32_t ebx;
+ CPUID(offset, "=b" (ebx));
+ return ebx;
+}
+
+static inline uint32_t cpuid_ecx_read_32(cpuid_t *dev, size_t offset)
+{
+ uint32_t ecx;
+ CPUID(offset, "=c" (ecx));
+ return ecx;
+}
+
+static inline uint32_t cpuid_edx_read_32(cpuid_t *dev, size_t offset)
+{
+ uint32_t edx;
+ CPUID(offset, "=d" (edx));
+ return edx;
+}
+
+static inline uint32_t cpuid_dcpa_read_32(cpuid_t *dev, size_t offset)
+{
+ // Unimplemented
+ return 0;
+}
+
+static inline uint32_t cpuid_dcpb_read_32(cpuid_t *dev, size_t offset)
+{
+ return 0;
+}
+
+static inline uint32_t cpuid_dcpc_read_32(cpuid_t *dev, size_t offset)
+{
+ return 0;
+}
+
+// Writing is not possible, but has to be here anyway
+
+static inline void cpuid_eax_write_32(cpuid_t *dev, size_t offset,
+ uint32_t value)
+{
+}
+
+static inline void cpuid_ebx_write_32(cpuid_t *dev, size_t offset,
+ uint32_t value)
+{
+}
+
+static inline void cpuid_ecx_write_32(cpuid_t *dev, size_t offset,
+ uint32_t value)
+{
+}
+
+static inline void cpuid_edx_write_32(cpuid_t *dev, size_t offset,
+ uint32_t value)
+{
+}
+
+static inline void cpuid_dcpa_write_32(cpuid_t *dev, size_t offset,
+ uint32_t value)
+{
+}
+
+static inline void cpuid_dcpb_write_32(cpuid_t *dev, size_t offset,
+ uint32_t value)
+{
+}
+
+static inline void cpuid_dcpc_write_32(cpuid_t *dev, size_t offset,
+ uint32_t value)
+{
+}
+
+#undef CPUID
+
+#endif // CPUID_SPACES_H
--- /dev/null
+/**
+ * \file
+ * \brief FPU lazy context switch support
+ */
+
+/*
+ * Copyright (c) 2007, 2008, 2010, 2011, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef __FPU_H
+#define __FPU_H
+
+static inline void fpu_trap_on(void)
+{
+ uint64_t cr0;
+ __asm volatile("mov %%cr0, %%rax" : "=a" (cr0));
+ cr0 |= (1 << 3); // Set TS
+ __asm volatile("mov %%rax,%%cr0" :: "a" (cr0));
+}
+
+static inline bool fpu_trap_get(void)
+{
+ uint64_t cr0;
+ __asm volatile("mov %%cr0, %%rax" : "=a" (cr0));
+ return cr0 & (1 << 3);
+}
+
+static inline void fpu_trap_off(void)
+{
+ clts();
+}
+
+#endif
--- /dev/null
+/**
+ * \file
+ * \brief x86-64 architecture initialization
+ */
+
+/*
+ * Copyright (c) 2007, 2008, 2010, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef INIT_H
+#define INIT_H
+
+/// Magic value passed when kernel is booted by itself (and not bootloader)
+#define KERNEL_BOOT_MAGIC 0x33e1f154
+
+#ifndef __ASSEMBLER__
+
+extern bool idt_initialized;
+
+void arch_init(uint64_t magic, void *pointer) __attribute__ ((noreturn));
+
+/**
+ * Fast system call entry point (in Assembler).
+ */
+extern void syscall_entry(void);
+
+#endif
+
+#endif
--- /dev/null
+/**
+ * \file
+ * \brief x86-64 interrupt/exception handling
+ */
+
+/*
+ * Copyright (c) 2007, 2008, 2009, 2010, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+/*-
+ * Copyright (c) 1989, 1990 William F. Jolitz
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)segments.h 7.1 (Berkeley) 5/9/91
+ * $FreeBSD$
+ */
+
+#ifndef KERNEL_IRQ_H
+#define KERNEL_IRQ_H
+
+/*
+ * AMD64 Segmentation Data Structures and definitions
+ */
+
+/*
+ * Selectors
+ */
+
+#define SEL_RPL_MASK 3 /* requester priv level */
+#define ISPL(s) ((s)&3) /* what is the priority level of a selector */
+#define SEL_KPL 0 /* kernel priority level */
+#define SEL_UPL 3 /* user priority level */
+#define ISLDT(s) ((s)&SEL_LDT) /* is it local or global */
+#define SEL_LDT 4 /* local descriptor table */
+#define IDXSEL(s) (((s)>>3) & 0x1fff) /* index of selector */
+#define LSEL(s,r) (((s)<<3) | SEL_LDT | r) /* a local selector */
+#define GSEL(s,r) (((s)<<3) | r) /* a global selector */
+
+/**
+ * Gate descriptors (e.g. indirect descriptors, trap, interrupt etc. 128 bit)
+ * Only interrupt and trap gates have gd_ist.
+ */
+struct gate_descriptor {
+ uint64_t gd_looffset:16; /* gate offset (lsb) */
+ uint64_t gd_selector:16; /* gate segment selector */
+ uint64_t gd_ist:3; /* IST table index */
+ uint64_t gd_xx:5; /* unused */
+ uint64_t gd_type:5; /* segment type */
+ uint64_t gd_dpl:2; /* segment descriptor priority level */
+ uint64_t gd_p:1; /* segment descriptor present */
+ uint64_t gd_hioffset:48; /* gate offset (msb) */
+ uint64_t sd_xx1:32;
+} __attribute__((packed));
+
+/* system segments and gate types */
+#define SDT_SYSNULL 0 /* system null */
+#define SDT_SYSLDT 2 /* system 64 bit local descriptor table */
+#define SDT_SYSTSS 9 /* system available 64 bit TSS */
+#define SDT_SYSBSY 11 /* system busy 64 bit TSS */
+#define SDT_SYSCGT 12 /* system 64 bit call gate */
+#define SDT_SYSIGT 14 /* system 64 bit interrupt gate */
+#define SDT_SYSTGT 15 /* system 64 bit trap gate */
+
+/* memory segment types */
+#define SDT_MEMRO 16 /* memory read only */
+#define SDT_MEMROA 17 /* memory read only accessed */
+#define SDT_MEMRW 18 /* memory read write */
+#define SDT_MEMRWA 19 /* memory read write accessed */
+#define SDT_MEMROD 20 /* memory read only expand dwn limit */
+#define SDT_MEMRODA 21 /* memory read only expand dwn limit accessed */
+#define SDT_MEMRWD 22 /* memory read write expand dwn limit */
+#define SDT_MEMRWDA 23 /* memory read write expand dwn limit accessed */
+#define SDT_MEME 24 /* memory execute only */
+#define SDT_MEMEA 25 /* memory execute only accessed */
+#define SDT_MEMER 26 /* memory execute read */
+#define SDT_MEMERA 27 /* memory execute read accessed */
+#define SDT_MEMEC 28 /* memory execute only conforming */
+#define SDT_MEMEAC 29 /* memory execute only accessed conforming */
+#define SDT_MEMERC 30 /* memory execute read conforming */
+#define SDT_MEMERAC 31 /* memory execute read accessed conforming */
+
+/*
+ * Size of IDT table
+ */
+#define NIDT 256 /* 32 reserved, 16 h/w, 0 s/w, linux's 0x80 */
+
+/*
+ * Entries in the Global Descriptor Table (GDT)
+ */
+#define NULL_SEL 0 /**< Null descriptor */
+#define KCODE_SEL 1 /**< Kernel code descriptor */
+#define KSTACK_SEL 2 /**< Shared user/kernel stack descriptor */
+#define USTACK_SEL 3 /**< User stack descriptor */
+#define UCODE_SEL 4 /**< User code descriptor */
+#define TSS_LO_SEL 5 /**< Task State Segment (TSS) -- low 64bit */
+#define TSS_HI_SEL 6 /**< Task State Segment (TSS) -- high 64bit */
+#define LDT_LO_SEL 7 /**< Local descriptor table (LDT) -- low */
+#define LDT_HI_SEL 8 /**< Local descriptor table (LDT) -- high */
+#define NGDT_MEM 9 /**< Number of descriptors */
+
+/**
+ * region descriptors, used to load gdt/idt tables before segments yet exist.
+ */
+struct region_descriptor {
+ uint16_t rd_limit; /**< segment extent */
+ uint64_t rd_base; /**< base address */
+} __attribute__((packed));
+
+struct task_state_segment {
+ uint32_t reserved;
+ uint64_t rsp[3];
+ uint64_t reserved2;
+ uint64_t ist[7];
+ uint64_t reserved3;
+ uint16_t reserved4;
+ uint16_t iomap_base;
+} __attribute__ ((packed));
+
+void setup_default_idt(void);
+
+errval_t irq_table_set(unsigned int nidt, capaddr_t endpoint);
+errval_t irq_table_delete(unsigned int nidt);
+
+#endif
--- /dev/null
+/**
+ * \file
+ * \brief Relay header for multiboot structures and kernel-specific
+ * function definitions.
+ */
+
+/*
+ * Copyright (c) 2007, 2008, 2009, 2010, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef KERNEL_MULTIBOOT_H
+#define KERNEL_MULTIBOOT_H
+
+#include <multiboot.h>
+
+/**
+ * Convert a 32bit address from the Multiboot header to a native virtual
+ * address as a char pointer.
+ */
+#define MBADDR_ASSTRING(vaddr) (char * NTS)TC((uintptr_t)(local_phys_to_mem(vaddr)))
+
+void multiboot_info_print(struct multiboot_info *mb);
+struct multiboot_modinfo *multiboot_find_module(const char *basename);
+uintptr_t multiboot_end_addr(struct multiboot_info *mi);
+
+
+
+#endif
--- /dev/null
+/**
+ * \file
+ * \brief A struct for all shared data between the kernels
+ */
+
+/*
+ * Copyright (c) 2008, 2010, ETH Zurich/
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#include <arch/x86/kputchar.h>
--- /dev/null
+/**
+ * \file
+ * \brief Miscellaneous architecture-specific functions
+ */
+
+/*
+ * Copyright (c) 2008, 2009, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef ARCH_MISC_H
+#define ARCH_MISC_H
+
+#include <x86.h>
+#include <irq.h>
+
+void maybe_reload_ldt(struct dcb *dcb, bool force_reload);
+
+/**
+ * \brief Set thread-local-storage register.
+ */
+static inline void arch_set_thread_register(uintptr_t val)
+{
+ panic("shouldn't be called for x64 -AB");
+#if 0
+ curdisp->d.lo_base = val & ((1 << 24) - 1);
+ curdisp->d.hi_base = val >> 24;
+ __asm volatile("mov %[fs], %%fs" :: [fs] "r" (GSEL(DISP_SEL, SEL_UPL)));
+#else
+ wrmsr(MSR_IA32_FSBASE, val);
+#endif
+}
+
+#define arch_get_cycle_count() rdtscp()
+
+#endif
--- /dev/null
+/**
+ * \file
+ * \brief
+ */
+
+/*
+ * Copyright (c) 2010, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef OFFSETS_ARCH_H
+#define OFFSETS_ARCH_H
+
+#include <target/x86_64/offsets_target.h>
+
+#define PADDR_SPACE_SIZE X86_64_PADDR_SPACE_SIZE
+#define PADDR_SPACE_LIMIT X86_64_PADDR_SPACE_LIMIT
+
+#define REAL_MODE_LINEAR_OFFSET X86_64_REAL_MODE_LINEAR_OFFSET
+#define REAL_MODE_SEGMENT X86_64_REAL_MODE_SEGMENT
+#define REAL_MODE_OFFSET X86_64_REAL_MODE_OFFSET
+
+#define REAL_MODE_SEGMENT_TO_REAL_MODE_PAGE(seg) X86_64_REAL_MODE_SEGMENT_TO_REAL_MODE_PAGE(seg)
+#define REAL_MODE_ADDR_TO_REAL_MODE_VECTOR(seg,off) X86_64_REAL_MODE_ADDR_TO_REAL_MODE_VECTOR(seg,off)
+
+#ifndef __ASSEMBLER__
+
+/**
+ * Takes absolute physical address addr and returns corresponding
+ * "physical memory address space" address.
+ *
+ * \param addr Absolute physical address
+ *
+ * \return Corresponding "physical memory address space" address.
+ */
+static inline lvaddr_t local_phys_to_mem(lpaddr_t addr)
+{
+ assert(addr < X86_64_PADDR_SPACE_LIMIT);
+ return (lvaddr_t)(addr + (lvaddr_t)X86_64_MEMORY_OFFSET);
+}
+
+/**
+ * Takes "physical memory address space" address and returns
+ * corresponding physical address.
+ *
+ * \param addr Absolute physical address
+ *
+ * \return Corresponding "physical memory address space" address.
+ */
+static inline lpaddr_t mem_to_local_phys(lvaddr_t addr)
+{
+ assert(addr >= X86_64_MEMORY_OFFSET);
+ return (lpaddr_t)(addr - (lpaddr_t)X86_64_MEMORY_OFFSET);
+}
+
+static inline lpaddr_t gen_phys_to_local_phys(genpaddr_t addr)
+{
+ return (lpaddr_t)addr;
+}
+
+static inline genpaddr_t local_phys_to_gen_phys(lpaddr_t addr)
+{
+ return (genpaddr_t)addr;
+}
+
+/**
+ * Symbol: Start of kernel image. This symbol points to the start
+ * address of the kernel image.
+ */
+extern char _start_kernel;
+
+/**
+ * Symbol: End of kernel image. This symbol points to the end address
+ * of the kernel image.
+ */
+extern char _end_kernel;
+
+/**
+ * The size of the whole kernel image.
+ */
+#define SIZE_KERNEL_IMAGE \
+ (size_t)(&_end_kernel - &_start_kernel)
+
+#endif // __ASSEMBLER__
+
+#endif // OFFSETS_ARCH_H
--- /dev/null
+/**
+ * \file
+ * \brief Architecture specific kernel page table definitions
+ */
+
+/*
+ * Copyright (c) 2007, 2008, 2009, 2010, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef KERNEL_ARCH_X86_64_PAGING_H
+#define KERNEL_ARCH_X86_64_PAGING_H
+
+#include <target/x86_64/paging_kernel_target.h>
+#include <paging_kernel_helper.h>
+
+/** Physical memory page size is 2 MBytes */
+#define X86_64_MEM_PAGE_SIZE X86_64_LARGE_PAGE_SIZE
+
+/** Mask for physical memory page */
+#define X86_64_MEM_PAGE_MASK 0x1fffff
+
+/**
+ * Resolves to required number of entries in page directory pointer table to map
+ * 'limit' number of bytes.
+ */
+#define X86_64_PDPT_ENTRIES(limit) (X86_64_PML4_BASE((limit) - 1) + 1)
+
+/**
+ * Resolves to required number of entries in page directory to map 'limit'
+ * number of bytes.
+ */
+#define X86_64_PDIR_ENTRIES(limit) (X86_64_PDPT_BASE((limit) - 1) + 1)
+
+/**
+ * Resolves to required number of entries in page table to map 'limit' number
+ * of bytes.
+ */
+#define X86_64_PTABLE_ENTRIES(limit) (X86_64_PDIR_BASE((limit) - 1) + 1)
+
+/**
+ * \brief Switch context.
+ *
+ * Assigns given physical base address to the CR3 register,
+ * effectively switching context to new address space. Be
+ * cautious that you only switch to "good" page tables.
+ *
+ * \param addr Physical base address of page table.
+ */
+static void inline paging_context_switch(lpaddr_t addr)
+{
+ paging_x86_64_context_switch(addr);
+}
+
+static lvaddr_t inline paging_map_device(lpaddr_t base, size_t size)
+{
+ return paging_x86_64_map_device(base, size);
+}
+
+static inline bool is_root_pt(enum objtype type) {
+ return type == ObjType_VNode_x86_64_pml4;
+}
+
+static inline size_t get_pte_size(void) {
+ return sizeof(union x86_64_ptable_entry);
+}
+
+static inline void do_one_tlb_flush(genvaddr_t vaddr)
+{
+ __asm__ __volatile__("invlpg %0" : : "m" (*(char *)vaddr));
+}
+
+static inline void do_selective_tlb_flush(genvaddr_t vaddr, genvaddr_t vend)
+{
+ for (genvaddr_t addr = vaddr; addr < vend; addr += X86_64_BASE_PAGE_SIZE) {
+ __asm__ __volatile__("invlpg %0" : : "m" (*(char *)addr));
+ }
+}
+
+static inline void do_full_tlb_flush(void) {
+ // XXX: FIXME: Going to reload cr3 to flush the entire TLB.
+ // This is inefficient.
+ // The current implementation is also not multicore safe.
+ // We should only invalidate the affected entry using invlpg
+ // and figure out which remote tlbs to flush.
+ uint64_t cr3;
+ __asm__ __volatile__("mov %%cr3,%0" : "=a" (cr3) : );
+ __asm__ __volatile__("mov %0,%%cr3" : : "a" (cr3));
+}
+
+
+#endif // KERNEL_ARCH_X86_64_PAGING_H
--- /dev/null
+/**
+ * \file
+ * \brief VMKit Kernel interface.
+ */
+
+/*
+ * Copyright (c) 2009, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef VMKIT_H
+#define VMKIT_H
+
+#define VMKIT_ERR_OK 0
+#define VMKIT_ERR_UNAVAIL (-1)
+
+errval_t vmkit_enable_virtualization (void);
+int vmkit_disable_virtualization (void);
+void __attribute__ ((noreturn)) vmkit_vmenter (struct dcb *dcb);
+
+#endif // _VMKIT_H
--- /dev/null
+/**
+ * \file
+ * \brief X86 inline asm utilities and defines
+ */
+
+/*
+ * Copyright (c) 2007, 2008, 2009, 2010, 2011, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef __X86_H
+#define __X86_H
+
+#include <barrelfish_kpi/types.h>
+#include <barrelfish_kpi/cpu.h>
+#include <arch/x86/x86.h>
+#include <barrelfish_kpi/cpu_arch.h>
+
+/***** RFLAGS flags *****/
+
+/* Fixed flags */
+#define RFLAGS_ALWAYS1 (1 << 1)
+
+/* Status/Control flags */
+#define RFLAGS_CF (1 << 0)
+#define RFLAGS_PF (1 << 2)
+#define RFLAGS_AF (1 << 4)
+#define RFLAGS_ZF (1 << 6)
+#define RFLAGS_SF (1 << 7)
+#define RFLAGS_DF (1 << 10)
+#define RFLAGS_OF (1 << 11)
+
+/* System flags */
+#define RFLAGS_TF (1 << 8)
+#define RFLAGS_IF (1 << 9)
+#define RFLAGS_NT (1 << 14)
+#define RFLAGS_RF (1 << 16)
+#define RFLAGS_VM (1 << 17)
+#define RFLAGS_AC (1 << 18)
+#define RFLAGS_VIF (1 << 19)
+#define RFLAGS_VIP (1 << 20)
+#define RFLAGS_ID (1 << 21)
+
+/* I/O privilege flags */
+#define RFLAGS_IOPL0 (0 << 12)
+#define RFLAGS_IOPL1 (1 << 12)
+#define RFLAGS_IOPL2 (2 << 12)
+#define RFLAGS_IOPL3 (3 << 12)
+
+/**
+ * State of RFLAGS when executing a user-space program: Enable interrupts
+ */
+#define USER_RFLAGS (RFLAGS_ALWAYS1 | RFLAGS_IF)
+
+/**
+ * Allowed RFLAGS in user-space. Used when resuming programs.
+ */
+#define USER_RFLAGS_MASK \
+ (RFLAGS_CF | RFLAGS_PF | RFLAGS_AF | RFLAGS_ZF | RFLAGS_SF | RFLAGS_DF | \
+ RFLAGS_OF)
+
+
+#ifndef __ASSEMBLER__
+
+/**
+ * Registers automatically saved on kernel stack by CPU
+ */
+enum x86_64_cpu_save_registers {
+ X86_SAVE_RIP, X86_SAVE_CS, X86_SAVE_EFLAGS, X86_SAVE_RSP, X86_SAVE_SS,
+ X86_SAVE_AREA_SIZE
+};
+
+/** \brief Enable FPU */
+static inline void enable_fpu(void)
+{
+ uint64_t cr0, cr4;
+ __asm__ __volatile__("mov %%cr0, %%rax" : "=a" (cr0) : );
+ //clear EM
+ cr0 &= ~(1 << 2);
+ //set MP
+ cr0 |= (1 << 1);
+ //set NE
+ cr0 |= (1 << 5);
+#ifdef FPU_LAZY_CONTEXT_SWITCH
+ //set TS
+ cr0 |= (1 << 3);
+#else
+ //clear TS
+ cr0 &= ~(1 << 3);
+#endif
+ __asm__ __volatile__("mov %%rax,%%cr0" : : "a" (cr0));
+ //set OSFXSR
+ __asm__ __volatile__("mov %%cr4, %%rax" : "=a" (cr4) : );
+ cr4 |= (1 << 9);
+ __asm__ __volatile__("mov %%rax,%%cr4" : : "a" (cr4));
+
+#ifndef FPU_LAZY_CONTEXT_SWITCH
+ __asm volatile ("finit");
+#endif
+}
+
+#endif //__ASSEMBLER__
+
+#endif //__X86_H
--- /dev/null
+/**
+ * \file
+ * \brief X86-64 address space sizes and offsets
+ *
+ * The layout of the x86-64 virtual address space can be summarized as
+ * follows:
+ *
+ *<pre>
+ * +----------------------------------------------------+-----------------+
+ * | User-space | Physical memory |
+ * | PML4 entries: 0 1 2 3 4 ... 510 | 511 |
+ * +----------------------------------------------------+-----------------+</pre>
+ *
+ * User-space maps user-space programs. Physical memory maps all
+ * available physical memory (up to PADDR_SPACE_LIMIT).
+ *
+ * This partition is static and can only be changed at compile-time.
+ *
+ * Physical memory can grow downwards, towards user-space, although it
+ * is expected to stay within PML4 entry 511 for quite some time (one
+ * PML4 entry can map 512 GBytes). The rest of the address space can
+ * be freely mapped by (possibly multiple) user-space programs.
+ */
+
+/*
+ * Copyright (c) 2007, 2008, 2009, 2010, 2011, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef KERNEL_TARGET_X86_64_OFFSETS_H
+#define KERNEL_TARGET_X86_64_OFFSETS_H
+
+/**
+ * Absolute size of virtual address space. This is 48-bit on x86-64
+ * currently, which equals 256 TBytes and allows for 512 PML4 slots,
+ * each of which can map 512 GBytes.
+ */
+#define X86_64_VADDR_SPACE_SIZE ((genpaddr_t)1 << 48)
+
+/**
+ * Absolute size of physical address space. This is also 48-bit.
+ */
+#define X86_64_PADDR_SPACE_SIZE ((genpaddr_t)1 << 48)
+
+/**
+ * Start address of kernel image in physical memory. This is passed to
+ * the linker also. The bootloader will load us there.
+ */
+#define X86_64_START_KERNEL_PHYS 0x100000
+
+/**
+ * Kernel stack size -- 16KB
+ */
+#define X86_64_KERNEL_STACK_SIZE 0x4000
+
+/**
+ * Maximum physical address space mappable by the kernel. Adjust this
+ * for a bigger physical address space. We set this to 37-bit,
+ * i.e. 128 GBytes.
+ */
+#define X86_64_PADDR_SPACE_LIMIT ((genpaddr_t)1 << 37)
+
+/**
+ * Static address space limit for the init user-space domain. The
+ * static space is used to map in code and static data of the init
+ * module, as well as all loaded multiboot modules. init can freely
+ * allocate dynamic memory as soon as it is running. This is 32 MBytes
+ * right now.
+ *
+ * You should make this constant a multiple of #BASE_PAGE_SIZE *
+ * #PTABLE_SIZE or you'll restrict init's static address space
+ * unneccessarily. init's lowest segment should also be based at these
+ * multiples or it restricts itself.
+ */
+#define X86_64_INIT_SPACE_LIMIT (32 * 1024 * 1024)
+
+/**
+ * Base address of init address space in virtual memory. init should
+ * start at 4 MByte. The kernel maps in important structures at 2
+ * MByte. This address should be page-table size aligned (i.e. with 4
+ * KByte pages, a page table maps 2 MBytes. Thus, align it to
+ * multiples of 2 MBytes).
+ */
+#define X86_64_INIT_VBASE 0x200000
+
+/**
+ * Initial amount of physical memory to map during bootup. The low
+ * 1MByte of memory is always expected to be there and has to be
+ * specified here at minimum. If you need more during bootup, increase
+ * this value. This value is also the amount of memory you _expect_ to
+ * be in the system during bootup, or the kernel will crash!
+ */
+#define X86_64_KERNEL_INIT_MEMORY (1 * 1024 * 1024)
+
+/**
+ * Aligns an address to the nearest PML4 entry by masking out lower 39
+ * bits.
+ */
+#define X86_64_PML4_ALIGN(addr) ((addr) & ((genpaddr_t)0x1ffffff << 39))
+
+/**
+ * Absolute offset of mapped physical memory within virtual address
+ * space. This occupies one or more (usually one) PML4 slots directly
+ * before the kernel. This needs to be aligned to PADDR_SPACE_LIMIT.
+ *
+ * Change VSPACE_END in lib/barrelfish if you change this.
+ */
+#define X86_64_MEMORY_OFFSET X86_64_PML4_ALIGN(-X86_64_PADDR_SPACE_LIMIT)
+
+/**
+ * The real-mode addresses
+ */
+
+#define X86_64_REAL_MODE_SEGMENT 0x0600 /**< The real-mode segment */
+#define X86_64_REAL_MODE_OFFSET 0x0000 /**< The real-mode offset _has to be_ 0000!! */
+
+#define X86_64_REAL_MODE_LINEAR_OFFSET \
+ (X86_64_REAL_MODE_SEGMENT << 4) /**< The linear offset
+ of the real-mode
+ segment */
+
+#define X86_64_REAL_MODE_SEGMENT_TO_REAL_MODE_PAGE(seg) ((uint8_t)(seg >> 8))
+#define X86_64_REAL_MODE_ADDR_TO_REAL_MODE_VECTOR(seg,off) ((uint32_t)(seg << 16) | off)
+
+#ifndef __ASSEMBLER__
+
+/**
+ * \brief The kernel stack.
+ */
+extern uintptr_t x86_64_kernel_stack[X86_64_KERNEL_STACK_SIZE/sizeof(uintptr_t)];
+
+#endif
+
+#endif // KERNEL_TARGET_X86_64_OFFSETS_H
--- /dev/null
+/**
+ * \file
+ * \brief x86-64 kernel page-table structures.
+ */
+
+/*
+ * Copyright (c) 2007, 2008, 2009, 2010, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef KERNEL_TARGET_X86_64_PAGING_H
+#define KERNEL_TARGET_X86_64_PAGING_H
+
+#include <capabilities.h>
+#include <barrelfish_kpi/paging_arch.h>
+
+// Functions defined elsewhere. Move the declerations to appropriate includes
+int paging_x86_64_map_memory(lpaddr_t base, size_t size);
+lvaddr_t paging_x86_64_map_device(lpaddr_t base, size_t size);
+void paging_x86_64_reset(void);
+void paging_x86_64_make_good_pml4(lpaddr_t base);
+
+/// All flags valid for page access protection from user-space
+#define X86_64_PTABLE_ACCESS_MASK \
+ (X86_64_PTABLE_EXECUTE_DISABLE | X86_64_PTABLE_USER_SUPERVISOR | \
+ X86_64_PTABLE_READ_WRITE)
+
+/// All arch-specific flags valid to be set from user-space
+#define X86_64_PTABLE_FLAGS_MASK \
+ (X86_64_PTABLE_GLOBAL_PAGE | X86_64_PTABLE_ATTR_INDEX | \
+ X86_64_PTABLE_DIRTY | X86_64_PTABLE_ACCESSED | \
+ X86_64_PTABLE_CACHE_DISABLED | X86_64_PTABLE_WRITE_THROUGH)
+
+/// Mask out all arch-specific flags except those valid from user-space
+#define X86_64_PTABLE_FLAGS(flags) (flags & X86_64_PTABLE_FLAGS_MASK)
+
+/// Mask out all flags except those for access protection
+#define X86_64_PTABLE_ACCESS(flags) (flags & X86_64_PTABLE_ACCESS_MASK)
+
+/** True if page entry is present in memory */
+#define X86_64_IS_PRESENT(entry) \
+ ((*(uint64_t *)(entry)) & X86_64_PTABLE_PRESENT)
+
+/**
+ * A page directory entry.
+ */
+union x86_64_pdir_entry {
+ uint64_t raw;
+ struct {
+ uint64_t present :1;
+ uint64_t read_write :1;
+ uint64_t user_supervisor :1;
+ uint64_t write_through :1;
+ uint64_t cache_disabled :1;
+ uint64_t accessed :1;
+ uint64_t reserved :3;
+ uint64_t available :3;
+ uint64_t base_addr :28;
+ uint64_t reserved2 :12;
+ uint64_t available2 :11;
+ uint64_t execute_disable :1;
+ } d;
+};
+
+/**
+ * A page table entry.
+ */
+union x86_64_ptable_entry {
+ uint64_t raw;
+ struct {
+ uint64_t present :1;
+ uint64_t read_write :1;
+ uint64_t user_supervisor :1;
+ uint64_t write_through :1;
+ uint64_t cache_disabled :1;
+ uint64_t accessed :1;
+ uint64_t dirty :1;
+ uint64_t always1 :1;
+ uint64_t global :1;
+ uint64_t available :3;
+ uint64_t attr_index :1;
+ uint64_t reserved :8;
+ uint64_t base_addr :19;
+ uint64_t reserved2 :12;
+ uint64_t available2 :11;
+ uint64_t execute_disable :1;
+ } large;
+ struct {
+ uint64_t present :1;
+ uint64_t read_write :1;
+ uint64_t user_supervisor :1;
+ uint64_t write_through :1;
+ uint64_t cache_disabled :1;
+ uint64_t accessed :1;
+ uint64_t dirty :1;
+ uint64_t attr_index :1;
+ uint64_t global :1;
+ uint64_t available :3;
+ uint64_t base_addr :28;
+ uint64_t reserved2 :12;
+ uint64_t available2 :11;
+ uint64_t execute_disable :1;
+ } base;
+};
+
+/**
+ * \brief Clear page directory.
+ *
+ * Clears page directory pointed to by 'p'.
+ *
+ * \param p Pointer to page directory to clear.
+ */
+static inline void paging_x86_64_clear_pdir(union x86_64_pdir_entry * COUNT(X86_64_PTABLE_SIZE)
+ NONNULL p)
+{
+ for (int i = 0; i < X86_64_PTABLE_SIZE; i++) {
+ p[i].raw = X86_64_PTABLE_CLEAR;
+ }
+}
+
+/**
+ * \brief Clear page table.
+ *
+ * Clears page table pointed to by 'p'.
+ *
+ * \param p Pointer to page table to clear.
+ */
+static inline void paging_x86_64_clear_ptable(union x86_64_ptable_entry * COUNT(X86_64_PTABLE_SIZE)
+ NONNULL p)
+{
+ for (int i = 0; i < X86_64_PTABLE_SIZE; i++) {
+ p[i].raw = X86_64_PTABLE_CLEAR;
+ }
+}
+
+/**
+ * \brief Maps from page directory entry to page directory/table.
+ *
+ * Maps page directory or table, based at 'base', from page directory entry
+ * pointed to by 'entry'.
+ *
+ * \param entry Pointer to page directory entry to point from.
+ * \param base Base virtual address of page directory/table to point to.
+ */
+static inline void paging_x86_64_map_table(union x86_64_pdir_entry *entry,
+ lpaddr_t base)
+{
+ union x86_64_pdir_entry tmp;
+ tmp.raw = X86_64_PTABLE_CLEAR;
+
+ tmp.d.present = 1;
+ tmp.d.read_write = 1;
+ tmp.d.user_supervisor = 1;
+ tmp.d.base_addr = base >> 12;
+
+ *entry = tmp;
+}
+
+/**
+ * \brief Maps a large page.
+ *
+ * From large page table entry, pointed to by 'entry', maps physical address
+ * 'base' with page attribute bitmap 'bitmap'.
+ *
+ * \param entry Pointer to page table entry to map from.
+ * \param base Physical address to map to (will be page-aligned).
+ * \param bitmap Bitmap to apply to page attributes.
+ */
+static inline void paging_x86_64_map_large(union x86_64_ptable_entry *entry,
+ lpaddr_t base, uint64_t bitmap)
+{
+ union x86_64_ptable_entry tmp;
+ tmp.raw = X86_64_PTABLE_CLEAR;
+
+ tmp.large.present = bitmap & X86_64_PTABLE_PRESENT ? 1 : 0;
+ tmp.large.read_write = bitmap & X86_64_PTABLE_READ_WRITE ? 1 : 0;
+ tmp.large.user_supervisor = bitmap & X86_64_PTABLE_USER_SUPERVISOR ? 1 : 0;
+ tmp.large.write_through = bitmap & X86_64_PTABLE_WRITE_THROUGH ? 1 : 0;
+ tmp.large.cache_disabled = bitmap & X86_64_PTABLE_CACHE_DISABLED ? 1 : 0;
+ tmp.large.global = bitmap & X86_64_PTABLE_GLOBAL_PAGE ? 1 : 0;
+ tmp.large.attr_index = bitmap & X86_64_PTABLE_ATTR_INDEX ? 1 : 0;
+ tmp.large.execute_disable = bitmap & X86_64_PTABLE_EXECUTE_DISABLE ? 1 : 0;
+ tmp.large.always1 = 1;
+ tmp.large.base_addr = base >> 21;
+
+ *entry = tmp;
+}
+
+/**
+ * \brief Maps a normal (small) page.
+ *
+ * From small page table entry, pointed to by 'entry', maps physical address
+ * 'base' with page attribute bitmap 'bitmap'.
+ *
+ * \param entry Pointer to page table entry to map from.
+ * \param base Physical address to map to (will be page-aligned).
+ * \param bitmap Bitmap to apply to page attributes.
+ */
+static inline void paging_x86_64_map(union x86_64_ptable_entry * NONNULL entry,
+ lpaddr_t base, uint64_t bitmap)
+{
+ union x86_64_ptable_entry tmp;
+ tmp.raw = X86_64_PTABLE_CLEAR;
+
+ tmp.base.present = bitmap & X86_64_PTABLE_PRESENT ? 1 : 0;
+ tmp.base.read_write = bitmap & X86_64_PTABLE_READ_WRITE ? 1 : 0;
+ tmp.base.user_supervisor = bitmap & X86_64_PTABLE_USER_SUPERVISOR ? 1 : 0;
+ tmp.base.write_through = bitmap & X86_64_PTABLE_WRITE_THROUGH ? 1 : 0;
+ tmp.base.cache_disabled = bitmap & X86_64_PTABLE_CACHE_DISABLED ? 1 : 0;
+ tmp.base.attr_index = bitmap & X86_64_PTABLE_ATTR_INDEX ? 1 : 0;
+ tmp.base.global = bitmap & X86_64_PTABLE_GLOBAL_PAGE ? 1 : 0;
+ tmp.base.execute_disable = bitmap & X86_64_PTABLE_EXECUTE_DISABLE ? 1 : 0;
+ tmp.base.base_addr = base >> 12;
+
+ *entry = tmp;
+}
+
+/**
+ * \brief Modify flags of a normal (small) page.
+ *
+ * From small page table entry, pointed to by 'entry', maps physical address
+ * 'base' with page attribute bitmap 'bitmap'.
+ *
+ * \param entry Pointer to page table entry to map from.
+ * \param bitmap Bitmap to apply to page attributes.
+ */
+static inline void paging_x86_64_modify_flags(union x86_64_ptable_entry * NONNULL entry,
+ uint64_t bitmap)
+{
+ union x86_64_ptable_entry tmp = *entry;
+
+ tmp.base.present = bitmap & X86_64_PTABLE_PRESENT ? 1 : 0;
+ tmp.base.read_write = bitmap & X86_64_PTABLE_READ_WRITE ? 1 : 0;
+ tmp.base.user_supervisor = bitmap & X86_64_PTABLE_USER_SUPERVISOR ? 1 : 0;
+ tmp.base.write_through = bitmap & X86_64_PTABLE_WRITE_THROUGH ? 1 : 0;
+ tmp.base.cache_disabled = bitmap & X86_64_PTABLE_CACHE_DISABLED ? 1 : 0;
+ tmp.base.attr_index = bitmap & X86_64_PTABLE_ATTR_INDEX ? 1 : 0;
+ tmp.base.global = bitmap & X86_64_PTABLE_GLOBAL_PAGE ? 1 : 0;
+ tmp.base.execute_disable = bitmap & X86_64_PTABLE_EXECUTE_DISABLE ? 1 : 0;
+
+ *entry = tmp;
+}
+
+static inline void paging_unmap(union x86_64_ptable_entry * NONNULL entry)
+{
+ entry->raw = X86_64_PTABLE_CLEAR;
+}
+
+/**
+ * \brief Convert Capability access rights to X86-64 page flags.
+ *
+ * Returns corresponding X86-64 page flags to given capability access rights
+ * mask 'rights'.
+ *
+ * \param rights Capability rightsmask.
+ *
+ * \return X86-64 page flags.
+ */
+static inline uint64_t paging_x86_64_cap_to_page_flags(CapRights rights)
+{
+ uint64_t pageflags = 0;
+
+ // Sanity-check given flags
+ if(!(rights & CAPRIGHTS_READ) &&
+ (rights & CAPRIGHTS_WRITE || rights & CAPRIGHTS_EXECUTE)) {
+ printk(LOG_ERR, "Page mapped writable and/or executable, but not "
+ "readable. Impossible on X86! Will map non-everything "
+ "instead.\n");
+ }
+
+ // Convert flags
+ pageflags |= rights & CAPRIGHTS_READ ? X86_64_PTABLE_USER_SUPERVISOR : 0;
+ pageflags |= rights & CAPRIGHTS_WRITE ? X86_64_PTABLE_READ_WRITE : 0;
+ pageflags |= rights & CAPRIGHTS_EXECUTE ? 0 : X86_64_PTABLE_EXECUTE_DISABLE;
+
+ return pageflags;
+}
+
+/**
+ * \brief Switch context.
+ *
+ * Assigns given physical base address of PML4 'pml4' to the CR3
+ * register, effectively switching context to new address space. Be
+ * cautious that you only switch to "good" (as explained in
+ * paging_make_good_pml4()) PML4s!
+ *
+ * \param pml4 Physical base address of PML4 table.
+ */
+static void inline paging_x86_64_context_switch(lpaddr_t pml4)
+{
+ __asm volatile("mov %[pml4], %%cr3"
+ : /* No output */
+ :
+ [pml4] "r" (pml4)
+ );
+}
+
+/**
+ * \brief Mask out page attributes.
+ *
+ * Masks out all attributes and access rights from 'attr' according to
+ * 'mask'. This is architecture-specific. On x86-64, except for the
+ * execute disable attribute, rights are given by setting a
+ * corresponding bit. Thus, setting that bit within 'mask' to zero,
+ * masks out the right. For the execute disable bit, the right is
+ * masked out when the bit is set, so the mask works the other way
+ * around in this case: When the bit is set in 'mask', but not set in
+ * 'attr', it will be set in the return value, so mask-out behavior is
+ * preserved.
+ *
+ * \param attr The page attributes to mask.
+ * \param mask Mask for the page attributes.
+ *
+ * \return Masked version of 'attr'.
+ */
+static inline uint64_t paging_x86_64_mask_attrs(uint64_t attr, uint64_t mask)
+{
+ // First, mask out all "bit-sets-enabled" attributes
+ attr &= mask | X86_64_PTABLE_EXECUTE_DISABLE;
+
+ // Now, mask out all "bit-sets-disabled" attributes
+ attr |= mask & X86_64_PTABLE_EXECUTE_DISABLE;
+
+ return attr;
+}
+
+#endif // KERNEL_TARGET_X86_64_PAGING_H
--- /dev/null
+/**
+ * \file
+ * \brief FPU accessor functions.
+ */
+
+/*
+ * Copyright (c) 2010, 2011, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef X86_64_FPU_H
+#define X86_64_FPU_H
+
+/// Exception number for FPU (device) not available
+#define FPU_UNAVAILABLE_TRAP 7
+
+static inline void fpu_copy(struct registers_fpu_x86_64 *dst,
+ struct registers_fpu_x86_64 *src)
+{
+ // XXX: Round to next 16-byte boundary
+ uint8_t *dregs = dst->registers, *sregs = src->registers;
+ dregs += 16 - ((uintptr_t)dregs % 16);
+ sregs += 16 - ((uintptr_t)sregs % 16);
+
+ memcpy(dregs, sregs, 512);
+}
+
+#endif
--- /dev/null
+/**
+ * \file
+ * \brief Local descriptor table (LDT) management
+ */
+
+/*
+ * Copyright (c) 2011, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef LIBBARRELFISH_ARCH_LDT_H
+#define LIBBARRELFISH_ARCH_LDT_H
+
+#include <barrelfish/ldt.h>
+
+void ldt_init_disabled(dispatcher_handle_t handle);
+errval_t ldt_alloc_segment_disabled(dispatcher_handle_t handle, void *segbase,
+ uint16_t *ret_selector);
+errval_t ldt_free_segment_ondisp(dispatcher_handle_t handle, uint16_t selector);
+
+#endif // LIBBARRELFISH_ARCH_LDT_H
--- /dev/null
+/**
+ * \file
+ * \brief Arch independent accessor functions for use in generic code.
+ * Generic include for userland
+ */
+
+/*
+ * Copyright (c) 2010, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef ARCH_X86_64_BARRELFISH_REGISTERS_H
+#define ARCH_X86_64_BARRELFISH_REGISTERS_H
+
+#include <barrelfish_kpi/registers_arch.h>
+#include <target/x86_64/registers_target.h>
+
+static inline void
+registers_set_initial(arch_registers_state_t *regs, struct thread *thread,
+ lvaddr_t entry, lvaddr_t stack, uint64_t arg1,
+ uint64_t arg2, uint64_t arg3, uint64_t arg4)
+{
+ registers_x86_64_set_initial(regs, thread, entry, stack, arg1, arg2, arg3, arg4);
+}
+
+#endif // ARCH_X86_64_BARRELFISH_REGISTERS_H
--- /dev/null
+/**
+ * \file
+ * \brief Threads architecture-specific code
+ */
+
+/*
+ * Copyright (c) 2007, 2008, 2009, 2010, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef LIBBARRELFISH_ARCH_THREADS_H
+#define LIBBARRELFISH_ARCH_THREADS_H
+
+#include <barrelfish/syscall_arch.h>
+
+/* this is a label defined in the assembler code that implements cap_invoke() */
+extern void barrelfish_cap_invoke_post_syscall_instr(void);
+extern void barrelfish_lrpc_post_syscall_instr(void);
+
+#if 0 // Using the old format of dispatcher_frame and not called by anyone -akhi
+/**
+ * Returns true iff the thread with the given save area has successfully
+ * performed a syscall. Used for the thread_invoke_cap_and_exit() hack.
+ */
+static inline bool thread_check_syscall_succeeded(uintptr_t *save_area)
+{
+ return ((save_area[RIP_REG] == (lvaddr_t)barrelfish_cap_invoke_post_syscall_instr
+ || save_area[RIP_REG] == (lvaddr_t)barrelfish_lrpc_post_syscall_instr)
+ && save_area[RAX_REG] == 0);
+}
+#endif
+
+/**
+ * \brief Enable FPU trap.
+ */
+static inline void fpu_trap_on(void)
+{
+ errval_t err = sys_x86_fpu_trap_on();
+ assert_disabled(err_is_ok(err));
+}
+
+#endif // LIBBARRELFISH_ARCH_THREADS_H
"backends/net/stack_allocator.c",
"backends/local/control_channel.c"
],
- flounderDefs = [ "bulk_ctrl" ],
- flounderBindings = [ "bulk_ctrl", "net_ports", "net_ARP", "e10k" ],
- flounderExtraBindings = [ ("net_ports", ["rpcclient"]),
+ flounderDefs = [ "bulk_ctrl" ],
+ flounderBindings = [ "bulk_ctrl", "net_ports", "net_ARP", "e10k" ],
+ flounderExtraBindings = [ ("net_ports", ["rpcclient"]),
("net_ARP", ["rpcclient"]) ],
-
- mackerelDevices = [ "e10k", "e10k_q" ],
- addLibraries = [ "pci" ]
-
+ mackerelDevices = [ "e10k", "e10k_q" ],
+ addLibraries = [ "pci" ]
}
]
--- /dev/null
+/**
+ * \file
+ * \brief libc startup code.
+ */
+
+/*
+ * Copyright (c) 2007, 2008, 2010, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#include <barrelfish/dispatch.h>
+#include <asmoffsets.h>
+
+ .text
+ .globl _start, _start_init
+
+_start:
+ mov $0, %rsi
+
+_start_generic: // Assumption: rdi = dispatcher
+ // Initialize stack for setup code
+ movq %rdi, %rsp
+ addq $(OFFSETOF_DISP_PRIV_STACK_LIMIT), %rsp
+
+ // Call barrelfish_init_disabled (doesn't return)
+ jmp barrelfish_init_disabled
+
+/* special entry point for init domain */
+_start_init:
+ mov $1, %rsi
+ jmp _start_generic
"-mno-sse3", "-mno-ssse3", "-mno-sse4.1",
"-mno-sse4.2", "-mno-sse4", "-mno-sse4a",
"-mno-3dnow" ],
- architectures = [ "x86_64", "x86_32", "scc" ]
+ architectures = [ "x86_64", "x86_32", "scc", "k1om" ]
}
]
lrpc_words = 0
}
-all_archs = [x86_64, x86_32, arm]
+-- settings for the xeon phi. TODO: Verify.
+k1om = Arch {
+ archname = "k1om",
+ wordsize = 64,
+ ptrsize = 64,
+ sizesize = 64,
+ enum_type = Int32,
+ lmp_words = 10,
+ lrpc_words = 4
+}
+
+all_archs = [x86_64, x86_32, arm, k1om]
-- for option parsing: find the matching arch info
parse_arch :: String -> Maybe Arch
--- /dev/null
+/**
+ * \file
+ * \brief Capability invocations specific to the monitors
+ */
+
+/*
+ * Copyright (c) 2007, 2008, 2009, 2010, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef INVOCATIONS_H
+#define INVOCATIONS_H
+
+#include <barrelfish/syscall_arch.h>
+#include <barrelfish/caddr.h>
+#include <barrelfish/invocations_arch.h>
+
+/**
+ * \brief Spawn a new core.
+ *
+ * \param core_id APIC ID of the core to try booting
+ * \param cpu_type Type of core to boot
+ * \param entry Kernel entry point in physical memory
+ */
+static inline errval_t
+invoke_monitor_spawn_core(coreid_t core_id, enum cpu_type cpu_type,
+ forvaddr_t entry)
+{
+ return cap_invoke4(cap_kernel, KernelCmd_Spawn_core, core_id, cpu_type,
+ entry).error;
+}
+
+static inline errval_t
+invoke_monitor_identify_cap(capaddr_t cap, int bits, struct capability *out)
+{
+ return cap_invoke4(cap_kernel, KernelCmd_Identify_cap, cap, bits,
+ (uintptr_t)out).error;
+}
+
+static inline errval_t
+invoke_monitor_identify_domains_cap(capaddr_t root_cap, int root_bits,
+ capaddr_t cap, int bits,
+ struct capability *out)
+{
+ return cap_invoke6(cap_kernel, KernelCmd_Identify_domains_cap,
+ root_cap, root_bits, cap, bits, (uintptr_t)out).error;
+}
+
+
+static inline errval_t
+invoke_monitor_nullify_cap(capaddr_t cap, int bits)
+{
+ return cap_invoke3(cap_kernel, KernelCmd_Nullify_cap, cap, bits).error;
+}
+
+static inline errval_t
+invoke_monitor_cap_remote(capaddr_t cap, int bits, bool is_remote,
+ bool * has_descendents)
+{
+ struct sysret r = cap_invoke4(cap_kernel, KernelCmd_Remote_cap, cap, bits,
+ is_remote);
+ if (err_is_ok(r.error)) {
+ *has_descendents = r.value;
+ }
+ return r.error;
+}
+
+static inline errval_t
+invoke_monitor_create_cap(uint64_t *raw, capaddr_t caddr, int bits, capaddr_t slot)
+{
+ assert(sizeof(struct capability) % sizeof(uint64_t) == 0);
+ assert(sizeof(struct capability) / sizeof(uint64_t) == 4);
+ return cap_invoke8(cap_kernel, KernelCmd_Create_cap,
+ raw[0], raw[1], raw[2], raw[3],
+ caddr, bits, slot).error;
+}
+
+static inline errval_t
+invoke_monitor_register(struct capref ep)
+{
+ return cap_invoke2(cap_kernel, KernelCmd_Register, get_cap_addr(ep)).error;
+}
+
+static inline errval_t
+invoke_monitor_identify_cnode_get_cap(uint64_t *cnode_raw, capaddr_t slot,
+ struct capability *out)
+{
+ return cap_invoke4(cap_kernel, KernelCmd_Iden_cnode_get_cap,
+ (uintptr_t)cnode_raw, slot, (uintptr_t)out).error;
+}
+
+
+static inline errval_t
+invoke_monitor_remote_cap_retype(capaddr_t rootcap_addr, uint8_t rootcap_vbits,
+ capaddr_t src, enum objtype newtype,
+ int objbits, capaddr_t to, capaddr_t slot,
+ int bits) {
+ return cap_invoke9(cap_kernel, MonitorCmd_Retype, rootcap_addr,
+ rootcap_vbits, src, newtype, objbits, to, slot,
+ bits).error;
+}
+
+static inline errval_t
+invoke_monitor_remote_cap_delete(capaddr_t rootcap_addr, uint8_t rootcap_vbits,
+ capaddr_t src, int bits) {
+ return cap_invoke5(cap_kernel, MonitorCmd_Delete, rootcap_addr,
+ rootcap_vbits, src, bits).error;
+}
+
+static inline errval_t
+invoke_monitor_remote_cap_revoke(capaddr_t rootcap_addr, uint8_t rootcap_vbits,
+ capaddr_t src, int bits) {
+ return cap_invoke5(cap_kernel, MonitorCmd_Revoke, rootcap_addr,
+ rootcap_vbits, src, bits).error;
+}
+
+/**
+ * \brief Set up tracing in the kernel
+ *
+ */
+static inline errval_t
+invoke_trace_setup(struct capref cap)
+{
+ return cap_invoke2(cap_kernel, KernelCmd_Setup_trace,
+ get_cap_addr(cap)).error;
+}
+
+static inline errval_t
+invoke_domain_id(struct capref cap, domainid_t domain_id)
+{
+ return cap_invoke3(cap_kernel, KernelCmd_Domain_Id, get_cap_addr(cap),
+ domain_id).error;
+}
+
+static inline errval_t invoke_monitor_sync_timer(uint64_t synctime)
+{
+ return cap_invoke2(cap_kernel, KernelCmd_Sync_timer, synctime).error;
+}
+
+static inline errval_t
+invoke_monitor_ipi_register(struct capref ep, int chanid)
+{
+ return cap_invoke3(cap_kernel, KernelCmd_IPI_Register, get_cap_addr(ep),
+ chanid).error;
+}
+
+static inline errval_t
+invoke_monitor_ipi_delete(int chanid)
+{
+ return cap_invoke2(cap_kernel, KernelCmd_IPI_Delete, chanid).error;
+}
+
+static inline errval_t
+invoke_monitor_get_arch_id(uintptr_t *arch_id)
+{
+ assert(arch_id != NULL);
+
+ struct sysret sysret = cap_invoke1(cap_kernel, KernelCmd_Get_arch_id);
+ if (sysret.error == SYS_ERR_OK) {
+ *arch_id = sysret.value;
+ }
+ return sysret.error;
+}
+
+#endif
[ build application { target = "bulk_mini",
cFiles = [ "bulk_mini.c" ],
- addLibraries = libDeps [ "bulk_transfer" ]
+ addLibraries = [ "bulk_transfer" ]
},
build application { target = "bulk_netproxy",
cFiles = [ "bulk_netproxy.c", "sleep.c" ],
- addLibraries = libDeps [ "bulk_transfer", "lwip",
+ addLibraries = [ "bulk_transfer", "lwip",
"bench" ]
},
build application { target = "bulk_nettrans",
cFiles = [ "bulk_nettrans.c" ],
- addLibraries = libDeps [ "bulk_transfer" ]
+ addLibraries = [ "bulk_transfer" ]
},
build application { target = "bulk_shm",
cFiles = [ "bulk_shm.c" ],
- addLibraries = libDeps [ "bulk_transfer" ]
+ addLibraries = [ "bulk_transfer" ]
}
]