--- /dev/null
+--------------------------------------------------------------------------
+-- Copyright (c) 2007-2010, ETH Zurich.
+-- All rights reserved.
+--
+-- This file is distributed under the terms in the attached LICENSE file.
+-- If you do not find this file, copies can be found by writing to:
+-- ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+--
+-- Architectural definitions for Barrelfish on ARMv5 ISA.
+--
+-- The build target is the integratorcp board on QEMU with the default
+-- ARM926EJ-S cpu.
+--
+--------------------------------------------------------------------------
+
+module ARMv8 where
+
+import HakeTypes
+import Path
+import qualified Config
+import qualified ArchDefaults
+
+-------------------------------------------------------------------------
+--
+-- Architecture specific definitions for ARM
+--
+-------------------------------------------------------------------------
+
+arch = "armv8"
+archFamily = "aarch64"
+
+compiler = "aarch64-apm-linux-gnu-gcc"
+objcopy = "aarch64-apm-linux-gnu-objcopy"
+objdump = "aarch64-apm-linux-gnu-objdump"
+ar = "aarch64-apm-linux-gnu-ar"
+ranlib = "aarch64-apm-linux-gnu-ranlib"
+cxxcompiler = "aarch64-apm-linux-gnu-g++"
+
+ourCommonFlags = [ Str "-fno-unwind-tables",
+ Str "-Wno-packed-bitfield-compat",
+ Str "-fno-stack-protector",
+ Str "-mcpu=cortex-a57",
+ Str "-march=armv8-a",
+ Str "-mabi=lp64",
+ Str "-DPIC_REGISTER=X10",
+ Str "-fPIE",
+ Str "-ffixed-r9",
+ Str "-DTHREAD_REGISTER=X9",
+ Str "-D__ARM_CORTEX__",
+ Str "-D__ARM_ARCH_8A__",
+ Str "-Wno-unused-but-set-variable",
+ Str "-Wno-format"
+ ]
+
+cFlags = ArchDefaults.commonCFlags
+ ++ ArchDefaults.commonFlags
+ ++ ourCommonFlags
+
+cxxFlags = ArchDefaults.commonCxxFlags
+ ++ ArchDefaults.commonFlags
+ ++ ourCommonFlags
+
+cDefines = ArchDefaults.cDefines options
+
+ourLdFlags = [ Str "-Wl,-section-start,.text=0x400000",
+ Str "-Wl,-section-start,.data=0x600000",
+ Str "-Wl,--build-id=none" ]
+
+ldFlags = ArchDefaults.ldFlags arch ++ ourLdFlags
+ldCxxFlags = ArchDefaults.ldCxxFlags arch ++ ourLdFlags
+
+stdLibs = ArchDefaults.stdLibs arch ++ [ Str "-lgcc" ]
+
+options = (ArchDefaults.options arch archFamily) {
+ optFlags = cFlags,
+ optCxxFlags = cxxFlags,
+ optDefines = cDefines,
+ optDependencies =
+ [ PreDep InstallTree arch "/include/trace_definitions/trace_defs.h",
+ PreDep InstallTree arch "/include/errors/errno.h",
+ PreDep InstallTree arch "/include/barrelfish_kpi/capbits.h",
+ PreDep InstallTree arch "/include/asmoffsets.h"
+ ],
+ optLdFlags = ldFlags,
+ optLdCxxFlags = ldCxxFlags,
+ optLibs = stdLibs,
+ optInterconnectDrivers = ["lmp", "ump"],
+ optFlounderBackends = ["lmp", "ump"]
+ }
+
+--
+-- Compilers
+--
+cCompiler = ArchDefaults.cCompiler arch compiler
+cxxCompiler = ArchDefaults.cxxCompiler arch cxxcompiler
+makeDepend = ArchDefaults.makeDepend arch compiler
+makeCxxDepend = ArchDefaults.makeCxxDepend arch cxxcompiler
+cToAssembler = ArchDefaults.cToAssembler arch compiler
+assembler = ArchDefaults.assembler arch compiler
+archive = ArchDefaults.archive arch
+linker = ArchDefaults.linker arch compiler
+cxxlinker = ArchDefaults.cxxlinker arch cxxcompiler
+
+
+--
+-- The kernel is "different"
+--
+
+kernelCFlags = [ Str s | s <- [ "-fno-builtin",
+ "-fno-unwind-tables",
+ "-nostdinc",
+ "-std=c99",
+ "-mcpu=cortex-a57",
+ "-march=armv8-a",
+ "-mabi=lp64",
+ "-fPIE",
+ "-U__linux__",
+ "-Wall",
+ "-Wshadow",
+ "-Wstrict-prototypes",
+ "-Wold-style-definition",
+ "-Wmissing-prototypes",
+ "-Wmissing-declarations",
+ "-Wmissing-field-initializers",
+ "-Wredundant-decls",
+ "-Werror",
+ "-imacros deputy/nodeputy.h",
+ "-fno-stack-check",
+ "-ffreestanding",
+ "-fomit-frame-pointer",
+ "-Wmissing-noreturn",
+ "-DPIC_REGISTER=X10",
+ "-ffixed-r9",
+ "-DTHREAD_REGISTER=X9",
+ "-D__ARM_CORTEX__",
+ "-D__ARM_ARCH_8A__",
+ "-Wno-unused-but-set-variable",
+ "-Wno-format"
+ ]]
+
+kernelLdFlags = [ Str "-Wl,-N",
+ Str "-fno-builtin",
+ Str "-nostdlib",
+ Str "-pie",
+ Str "-Wl,--fatal-warnings"
+ ]
+
+
+--
+-- Link the kernel (CPU Driver)
+--
+linkKernel :: Options -> [String] -> [String] -> String -> HRule
+linkKernel opts objs libs name =
+ let linkscript = "/kernel/" ++ name ++ ".lds"
+ kernelmap = "/kernel/" ++ name ++ ".map"
+ kasmdump = "/kernel/" ++ name ++ ".asm"
+ kbinary = "/sbin/" ++ name
+ kbootable = kbinary ++ ".bin"
+ in
+ Rules [ Rule ([ Str compiler, Str Config.cOptFlags,
+ NStr "-T", In BuildTree arch linkscript,
+ Str "-o", Out arch kbinary,
+ NStr "-Wl,-Map,", Out arch kernelmap
+ ]
+ ++ (optLdFlags opts)
+ ++
+ [ In BuildTree arch o | o <- objs ]
+ ++
+ [ In BuildTree arch l | l <- libs ]
+ ++
+ [ Str "-lgcc" ]
+ ),
+ -- Generate kernel assembly dump
+ Rule [ Str objdump,
+ Str "-d",
+ Str "-M reg-names-raw",
+ In BuildTree arch kbinary,
+ Str ">", Out arch kasmdump ],
+ Rule [ Str "cpp",
+ NStr "-I", NoDep SrcTree "src" "/kernel/include/arch/armv8",
+ Str "-D__ASSEMBLER__",
+ Str "-P", In SrcTree "src" "/kernel/arch/armv8/linker.lds.in",
+ Out arch linkscript
+ ]
+ ]
architectures = allArchitectures
}
-allArchitectures = [ "x86_64", "x86_32", "armv5", "arm11mp", "scc", "xscale", "armv7", "armv7-m", "k1om" ]
+allArchitectures = [ "x86_64", "x86_32", "armv5", "arm11mp", "scc", "xscale", "armv7", "armv7-m", "armv8", "k1om" ]
allArchitectureFamilies = [ "x86_64", "x86_32", "arm", "scc", "k1om" ]
-- architectures that currently support THC
thcArchitectures = ["x86_64", "x86_32", "scc"]
import qualified XScale
import qualified ARMv7
import qualified ARMv7_M
+import qualified ARMv8
import HakeTypes
import qualified Args
import qualified Config
options "xscale" = XScale.options
options "armv7" = ARMv7.options
options "armv7-m" = ARMv7_M.options
+options "armv8" = ARMv8.options
kernelCFlags "x86_64" = X86_64.kernelCFlags
kernelCFlags "k1om" = K1om.kernelCFlags
kernelCFlags "xscale" = XScale.kernelCFlags
kernelCFlags "armv7" = ARMv7.kernelCFlags
kernelCFlags "armv7-m" = ARMv7_M.kernelCFlags
+kernelCFlags "armv8" = ARMv8.kernelCFlags
kernelLdFlags "x86_64" = X86_64.kernelLdFlags
kernelLdFlags "k1om" = K1om.kernelLdFlags
kernelLdFlags "xscale" = XScale.kernelLdFlags
kernelLdFlags "armv7" = ARMv7.kernelLdFlags
kernelLdFlags "armv7-m" = ARMv7_M.kernelLdFlags
+kernelLdFlags "armv8" = ARMv8.kernelLdFlags
archFamily :: String -> String
archFamily arch = optArchFamily (options arch)
| optArch opts == "xscale" = XScale.cCompiler opts phase src obj
| optArch opts == "armv7" = ARMv7.cCompiler opts phase src obj
| optArch opts == "armv7-m" = ARMv7_M.cCompiler opts phase src obj
+ | optArch opts == "armv8" = ARMv8.cCompiler opts phase src obj
| otherwise = [ ErrorMsg ("no C compiler for " ++ (optArch opts)) ]
cPreprocessor :: Options -> String -> String -> String -> [ RuleToken ]
ARMv7.makeDepend opts phase src obj depfile
| optArch opts == "armv7-m" =
ARMv7_M.makeDepend opts phase src obj depfile
+ | optArch opts == "armv8" =
+ ARMv8.makeDepend opts phase src obj depfile
| otherwise = [ ErrorMsg ("no dependency generator for " ++ (optArch opts)) ]
makeCxxDepend :: Options -> String -> String -> String -> String -> [ RuleToken ]
| optArch opts == "xscale" = XScale.cToAssembler opts phase src afile objdepfile
| optArch opts == "armv7" = ARMv7.cToAssembler opts phase src afile objdepfile
| optArch opts == "armv7-m" = ARMv7_M.cToAssembler opts phase src afile objdepfile
+ | optArch opts == "armv8" = ARMv8.cToAssembler opts phase src afile objdepfile
| otherwise = [ ErrorMsg ("no C compiler for " ++ (optArch opts)) ]
--
| optArch opts == "xscale" = XScale.assembler opts src obj
| optArch opts == "armv7" = ARMv7.assembler opts src obj
| optArch opts == "armv7-m" = ARMv7_M.assembler opts src obj
+ | optArch opts == "armv8" = ARMv8.assembler opts src obj
| otherwise = [ ErrorMsg ("no assembler for " ++ (optArch opts)) ]
archive :: Options -> [String] -> [String] -> String -> String -> [ RuleToken ]
| optArch opts == "xscale" = XScale.archive opts objs libs name libname
| optArch opts == "armv7" = ARMv7.archive opts objs libs name libname
| optArch opts == "armv7-m" = ARMv7_M.archive opts objs libs name libname
+ | optArch opts == "armv8" = ARMv8.archive opts objs libs name libname
| otherwise = [ ErrorMsg ("Can't build a library for " ++ (optArch opts)) ]
linker :: Options -> [String] -> [String] -> String -> [RuleToken]
| optArch opts == "xscale" = XScale.linker opts objs libs bin
| optArch opts == "armv7" = ARMv7.linker opts objs libs bin
| optArch opts == "armv7-m" = ARMv7_M.linker opts objs libs bin
+ | optArch opts == "armv8" = ARMv8.linker opts objs libs bin
| otherwise = [ ErrorMsg ("Can't link executables for " ++ (optArch opts)) ]
cxxlinker :: Options -> [String] -> [String] -> String -> [RuleToken]
| optArch opts == "xscale" = XScale.linkKernel opts objs [libraryPath l | l <- libs ] ("/sbin" ./. name)
| optArch opts == "armv7" = ARMv7.linkKernel opts objs [libraryPath l | l <- libs ] name
| optArch opts == "armv7-m" = ARMv7_M.linkKernel opts objs [libraryPath l | l <- libs ] name
+ | optArch opts == "armv8" = ARMv8.linkKernel opts objs [libraryPath l | l <- libs ] name
| otherwise = Rule [ Str ("Error: Can't link kernel for '" ++ (optArch opts) ++ "'") ]
--
--- /dev/null
+/*
+ * Copyright (c) 2015, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+alias errval uint64;
+alias cycles uint64;
--- /dev/null
+/*
+ * Copyright (c) 2015, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+alias genpaddr uint64;
+alias genvaddr uint64;
+alias rsrcid uint32;
--- /dev/null
+/**
+ * \file
+ * \brief Unidirectional bulk data transfer via shared memory
+ */
+
+/*
+ * Copyright (c) 2009, 2010, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef ARCH_BULK_TRANSFER_H
+#define ARCH_BULK_TRANSFER_H
+
+static inline void bulk_arch_prepare_send(void *mem, size_t size)
+{
+ // No-op
+}
+
+static inline void bulk_arch_prepare_recv(void *mem, size_t size)
+{
+ // No-op
+}
+
+#endif
--- /dev/null
+/**
+ * \file
+ * \brief
+ */
+
+/*
+ * Copyright (c) 2015, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef ARCH_AARCH64_BARRELFISH_CORESTATE_H
+#define ARCH_AARCH64_BARRELFISH_CORESTATE_H
+
+#include <barrelfish/core_state.h>
+
+struct vspace_state {
+ struct vspace vspace;
+ struct pmap_aarch64 pmap;
+};
+
+struct pinned_state {
+ struct thread_mutex mutex;
+ struct memobj_pinned memobj;
+ struct vregion vregion;
+ lvaddr_t offset;
+ struct slab_allocator vregion_list_slab;
+ struct slab_allocator frame_list_slab;
+};
+
+struct core_state_arch {
+ struct core_state_generic c;
+ struct vspace_state vspace_state;
+ struct pinned_state pinned_state;
+};
+
+#endif
--- /dev/null
+/**
+ * \file
+ * \brief Architecture specific CPU bits.
+ */
+
+/*
+ * Copyright (c) 2015, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef AARCH64_BARRELFISH_CPU_H
+#define AARCH64_BARRELFISH_CPU_H
+
+#if __ARM_ARCH_8A__
+#define CURRENT_CPU_TYPE CPU_AARCH648
+#else
+#error "must define CURRENT_CPU_TYPE"
+#endif
+
+#endif
--- /dev/null
+/**
+ * \file
+ * \brief Dispatcher architecture-specific code
+ */
+
+/*
+ * Copyright (c) 2015, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef ARCH_AARCH64_BARRELFISH_CURDISPATCHER_H
+#define ARCH_AARCH64_BARRELFISH_CURDISPATCHER_H
+
+#include <barrelfish_kpi/dispatcher_handle.h>
+
+//
+// Helpers for pasting #defined values into inline assembler.
+//
+#define STR(x) #x
+#define XTR(x) STR(x)
+
+/**
+ * \brief Returns pointer to current dispatcher, using thread register
+ */
+static inline dispatcher_handle_t curdispatcher(void)
+{
+ // TODO: XXX: check this!
+ dispatcher_handle_t ret = 0;
+ __asm (
+ "mov %[ret]," XTR(THREAD_REGISTER) : [ret] "=r" (ret)
+ );
+ return ret;
+}
+
+#endif // ARCH_AARCH64_BARRELFISH_CURDISPATCHER_H
--- /dev/null
+/**
+ * \file
+ * \brief Architecture specific dispatcher structure private to the user
+ */
+
+/*
+ * Copyright (c) 2015, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef ARCH_AARCH64_BARRELFISH_DISPATCHER_H
+#define ARCH_AARCH64_BARRELFISH_DISPATCHER_H
+
+#include <target/aarch64/barrelfish/dispatcher_target.h>
+
+static inline struct dispatcher_generic*
+get_dispatcher_generic(dispatcher_handle_t handle)
+{
+ struct dispatcher_aarch64 *disp = (struct dispatcher_aarch64*)handle;
+ return &disp->generic;
+}
+
+static inline size_t get_dispatcher_size(void)
+{
+ return sizeof(struct dispatcher_aarch64);
+}
+
+#endif // ARCH_AARCH64_BARRELFISH_DISPATCHER_H
--- /dev/null
+/**
+ * \file
+ * \brief Low-level capability invocations
+ */
+
+/*
+ * Copyright (c) 2007, 2008, 2009, 2010, 2012, 2013, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef INCLUDEBARRELFISH_INVOCATIONS_ARCH_H
+#define INCLUDEBARRELFISH_INVOCATIONS_ARCH_H
+
+#include <barrelfish/syscall_arch.h> // for sys_invoke and cap_invoke
+#include <barrelfish_kpi/dispatcher_shared.h>
+#include <barrelfish_kpi/distcaps.h> // for distcap_state_t
+#include <barrelfish_kpi/syscalls.h>
+#include <barrelfish/caddr.h>
+#include <barrelfish_kpi/paging_arch.h>
+#include <barrelfish/debug.h> // for USER_PANIC()
+
+/**
+ * capability invocation syscall wrapper, copied from x86_32 version
+ */
+static inline struct sysret cap_invoke(struct capref to, uintptr_t argc, uintptr_t cmd,
+ uintptr_t arg2, uintptr_t arg3,
+ uintptr_t arg4, uintptr_t arg5,
+ uintptr_t arg6, uintptr_t arg7,
+ uintptr_t arg8, uintptr_t arg9,
+ uintptr_t arg10, uintptr_t arg11)
+{
+ // XXX: TODO
+ USER_PANIC("NYI");
+ uint8_t invoke_bits = get_cap_valid_bits(to);
+ capaddr_t invoke_cptr = get_cap_addr(to) >> (CPTR_BITS - invoke_bits);
+
+ assert(cmd < 0xFF);
+ assert(invoke_bits < 0xFF);
+ // flags << 24 | invoke_bits << 16 | cmd << 8 | syscall_invoke
+ // ^ used for LMP
+ uint32_t invocation = ((invoke_bits << 16) | (cmd << 8) | SYSCALL_INVOKE);
+
+ switch (argc) {
+ case 0:
+ return syscall2(invocation, invoke_cptr);
+ case 1:
+ return syscall3(invocation, invoke_cptr, arg2);
+ case 2:
+ return syscall4(invocation, invoke_cptr, arg2, arg3);
+ case 3:
+ return syscall5(invocation, invoke_cptr, arg2, arg3, arg4);
+ case 4:
+ return syscall6(invocation, invoke_cptr, arg2, arg3, arg4, arg5);
+ case 5:
+ return syscall7(invocation, invoke_cptr, arg2, arg3, arg4, arg5, arg6);
+ case 6:
+ return syscall8(invocation, invoke_cptr, arg2, arg3, arg4, arg5, arg6,
+ arg7);
+ case 7:
+ return syscall9(invocation, invoke_cptr, arg2, arg3, arg4, arg5, arg6,
+ arg7, arg8);
+ case 8:
+ return syscall10(invocation, invoke_cptr, arg2, arg3, arg4, arg5, arg6,
+ arg7, arg8, arg9);
+ case 9:
+ return syscall11(invocation, invoke_cptr, arg2, arg3, arg4, arg5, arg6,
+ arg7, arg8, arg9, arg10);
+ case 10:
+ return syscall12(invocation, invoke_cptr, arg2, arg3, arg4, arg5, arg6,
+ arg7, arg8, arg9, arg10, arg11);
+ default:
+ return SYSRET(SYS_ERR_ILLEGAL_INVOCATION);
+ }
+ assert(!"reached");
+}
+
+#define cap_invoke11(to, _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k) \
+ cap_invoke(to, 10, _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k)
+#define cap_invoke10(to, _a, _b, _c, _d, _e, _f, _g, _h, _i, _j) \
+ cap_invoke(to, 9, _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, 0)
+#define cap_invoke9(to, _a, _b, _c, _d, _e, _f, _g, _h, _i) \
+ cap_invoke(to, 8, _a, _b, _c, _d, _e, _f, _g, _h, _i, 0, 0)
+#define cap_invoke8(to, _a, _b, _c, _d, _e, _f, _g, _h) \
+ cap_invoke(to, 7, _a, _b, _c, _d, _e, _f, _g, _h, 0, 0, 0)
+#define cap_invoke7(to, _a, _b, _c, _d, _e, _f, _g) \
+ cap_invoke(to, 6, _a, _b, _c, _d, _e, _f, _g, 0, 0, 0, 0)
+#define cap_invoke6(to, _a, _b, _c, _d, _e, _f) \
+ cap_invoke(to, 5, _a, _b, _c, _d, _e, _f, 0, 0, 0, 0, 0)
+#define cap_invoke5(to, _a, _b, _c, _d, _e) \
+ cap_invoke(to, 4, _a, _b, _c, _d, _e, 0, 0, 0, 0, 0, 0)
+#define cap_invoke4(to, _a, _b, _c, _d) \
+ cap_invoke(to, 3, _a, _b, _c, _d, 0, 0, 0, 0, 0, 0, 0)
+#define cap_invoke3(to, _a, _b, _c) \
+ cap_invoke(to, 2, _a, _b, _c, 0, 0, 0, 0, 0, 0, 0, 0)
+#define cap_invoke2(to, _a, _b) \
+ cap_invoke(to, 1, _a, _b, 0, 0, 0, 0, 0, 0, 0, 0, 0)
+#define cap_invoke1(to, _a) \
+ cap_invoke(to, 0, _a, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
+/**
+ * \brief Retype a capability.
+ *
+ * Retypes CPtr 'cap' into 2^'objbits' caps of type 'newtype' and places them
+ * into slots starting at slot 'slot' in the CNode, addressed by 'to', with
+ * 'bits' address bits of 'to' valid.
+ *
+ * See also cap_retype(), which wraps this.
+ *
+ * \param root Capability of the CNode to invoke
+ * \param cap Address of cap to retype.
+ * \param newtype Kernel object type to retype to.
+ * \param objbits Size of created objects, for variable-sized types
+ * \param to Address of CNode cap to place retyped caps into.
+ * \param slot Slot in CNode cap to start placement.
+ * \param bits Number of valid address bits in 'to'.
+ *
+ * \return Error code
+ */
+static inline errval_t invoke_cnode_retype(struct capref root, capaddr_t cap,
+ enum objtype newtype, int objbits,
+ capaddr_t to, capaddr_t slot, int bits)
+{
+ assert(cap != CPTR_NULL);
+
+ uint8_t invoke_bits = get_cap_valid_bits(root);
+ capaddr_t invoke_cptr = get_cap_addr(root) >> (CPTR_BITS - invoke_bits);
+
+ assert(newtype <= 0xffff);
+ assert(objbits <= 0xff);
+ assert(bits <= 0xff);
+ return syscall6((invoke_bits << 16) | (CNodeCmd_Retype << 8) | SYSCALL_INVOKE, invoke_cptr, cap,
+ (newtype << 16) | (objbits << 8) | bits,
+ to, slot).error;
+}
+
+/**
+ * \brief Create a capability.
+ *
+ * Create a new capability of type 'type' and size 'objbits'. The new cap will
+ * be placed in the slot 'dest_slot' of the CNode located at 'dest_cnode_cptr'
+ * in the address space rooted at 'root'.
+ *
+ * See also cap_create(), which wraps this.
+ *
+ * \param root Capability of the CNode to invoke.
+ * \param type Kernel object type to create.
+ * \param objbits Size of created object
+ * (ignored for fixed-size objects)
+ * \param dest_cnode_cptr Address of CNode cap, where newly created cap will be
+ * placed into.
+ * \param dest_slot Slot in CNode cap to place new cap.
+ * \param dest_vbits Number of valid address bits in 'dest_cnode_cptr'.
+ *
+ * \return Error code
+ */
+static inline errval_t invoke_cnode_create(struct capref root,
+ enum objtype type, uint8_t objbits,
+ capaddr_t dest_cnode_cptr,
+ capaddr_t dest_slot,
+ uint8_t dest_vbits)
+{
+ /* Pack arguments */
+ assert(dest_cnode_cptr != CPTR_NULL);
+
+ uint8_t invoke_bits = get_cap_valid_bits(root);
+ capaddr_t invoke_cptr = get_cap_addr(root) >> (CPTR_BITS - invoke_bits);
+
+ assert(type <= 0xffff);
+ assert(objbits <= 0xff);
+ assert(dest_vbits <= 0xff);
+
+ return syscall5((invoke_bits << 16) | (CNodeCmd_Create << 8) | SYSCALL_INVOKE,
+ invoke_cptr, (type << 16) | (objbits << 8) | dest_vbits,
+ dest_cnode_cptr, dest_slot).error;
+}
+
+/**
+ * \brief "Mint" a capability.
+ *
+ * Copies CPtr 'from' into slot 'slot' in the CNode, addressed by 'to', within
+ * the address space, rooted at 'root' and with 'tobits' and 'frombits' address
+ * bits of 'to' and 'from' valid, respectively.
+ *
+ * See also cap_mint(), which wraps this.
+ *
+ * \param root Capability of the CNode to invoke
+ * \param to CNode to place copy into.
+ * \param slot Slot in CNode cap to place copy into.
+ * \param from Address of cap to copy.
+ * \param tobits Number of valid address bits in 'to'.
+ * \param frombits Number of valid address bits in 'from'.
+ * \param param1 1st cap-dependent parameter.
+ * \param param2 2nd cap-dependent parameter.
+ *
+ * \return Error code
+ */
+
+//XXX: workaround for inline bug of arm-gcc 4.6.1 and lower
+#if defined(__ARM_ARCH_7A__) && defined(__GNUC__) \
+ && __GNUC__ == 4 && __GNUC_MINOR__ <= 6 && __GNUC_PATCHLEVEL__ <= 1
+static __attribute__((noinline, unused)) errval_t
+#else
+static inline errval_t
+#endif
+invoke_cnode_mint(struct capref root, capaddr_t to,
+ capaddr_t slot, capaddr_t from, int tobits,
+ int frombits, uintptr_t param1,
+ uintptr_t param2)
+{
+ uint8_t invoke_bits = get_cap_valid_bits(root);
+ capaddr_t invoke_cptr = get_cap_addr(root) >> (CPTR_BITS - invoke_bits);
+
+ assert(slot <= 0xffff);
+ assert(tobits <= 0xff);
+ assert(frombits <= 0xff);
+
+ return syscall7((invoke_bits << 16) | (CNodeCmd_Mint << 8) | SYSCALL_INVOKE,
+ invoke_cptr, to, from,
+ (slot << 16) | (tobits << 8) | frombits,
+ param1, param2).error;
+}
+
+/**
+ * \brief Copy a capability.
+ *
+ * Copies CPtr 'from' into slot 'slot' in the CNode, addressed by 'to', within
+ * the address space, rooted at 'root' and with 'tobits' and 'frombits' address
+ * bits of 'to' and 'from' valid, respectively.
+ *
+ * See also cap_copy(), which wraps this.
+ *
+ * \param root Capability of the CNode to invoke
+ * \param to CNode to place copy into.
+ * \param slot Slot in CNode cap to place copy into.
+ * \param from Address of cap to copy.
+ * \param tobits Number of valid address bits in 'to'.
+ * \param frombits Number of valid address bits in 'from'.
+ *
+ * \return Error code
+ */
+//XXX: workaround for inline bug of arm-gcc 4.6.1 and lower
+
+#if defined(__ARM_ARCH_7A__) && defined(__GNUC__) \
+ && __GNUC__ == 4 && __GNUC_MINOR__ <= 6 && __GNUC_PATCHLEVEL__ <= 1
+static __attribute__((noinline, unused)) errval_t
+#else
+static inline errval_t
+#endif
+invoke_cnode_copy(struct capref root, capaddr_t to,
+ capaddr_t slot, capaddr_t from, int tobits,
+ int frombits)
+{
+ uint8_t invoke_bits = get_cap_valid_bits(root);
+ capaddr_t invoke_cptr = get_cap_addr(root) >> (CPTR_BITS - invoke_bits);
+
+ assert(slot <= 0xffff);
+ assert(tobits <= 0xff);
+ assert(frombits <= 0xff);
+
+ return syscall5((invoke_bits << 16) | (CNodeCmd_Copy << 8) | SYSCALL_INVOKE,
+ invoke_cptr, to, from,
+ (slot << 16) | (tobits << 8) | frombits).error;
+}
+
+/**
+ * \brief Delete a capability.
+ *
+ * Delete the capability pointed to by 'cap', with 'bits' address bits
+ * of it valid, from the address space rooted at 'root'.
+ *
+ * \param root Capability of the CNode to invoke
+ * \param cap Address of cap to delete.
+ * \param bits Number of valid bits within 'cap'.
+ *
+ * \return Error code
+ */
+//XXX: workaround for inline bug of arm-gcc 4.6.1 and lower
+#if defined(__ARM_ARCH_7A__) && defined(__GNUC__) \
+ && __GNUC__ == 4 && __GNUC_MINOR__ <= 6 && __GNUC_PATCHLEVEL__ <= 1
+static __attribute__((noinline, unused)) errval_t
+#else
+static inline errval_t
+#endif
+invoke_cnode_delete(struct capref root, capaddr_t cap,
+ int bits)
+{
+ uint8_t invoke_bits = get_cap_valid_bits(root);
+ capaddr_t invoke_cptr = get_cap_addr(root) >> (CPTR_BITS - invoke_bits);
+
+ assert(bits <= 0xff);
+
+ return syscall4((invoke_bits << 16) | (CNodeCmd_Delete << 8) | SYSCALL_INVOKE,
+ invoke_cptr, cap, bits).error;
+}
+//XXX: workaround for inline bug of arm-gcc 4.6.1 and lower
+#if defined(__ARM_ARCH_7A__) && defined(__GNUC__) \
+ && __GNUC__ == 4 && __GNUC_MINOR__ <= 6 && __GNUC_PATCHLEVEL__ <= 1
+static __attribute__((noinline, unused)) errval_t
+#else
+static inline errval_t
+#endif
+invoke_cnode_revoke(struct capref root, capaddr_t cap,
+ int bits)
+{
+ uint8_t invoke_bits = get_cap_valid_bits(root);
+ capaddr_t invoke_cptr = get_cap_addr(root) >> (CPTR_BITS - invoke_bits);
+
+ assert(bits <= 0xff);
+
+ return syscall4((invoke_bits << 16) | (CNodeCmd_Revoke << 8) | SYSCALL_INVOKE,
+ invoke_cptr, cap, bits).error;
+}
+
+//XXX: workaround for inline bug of arm-gcc 4.6.1 and lower
+#if defined(__ARM_ARCH_7A__) && defined(__GNUC__) \
+ && __GNUC__ == 4 && __GNUC_MINOR__ <= 6 && __GNUC_PATCHLEVEL__ <= 1
+static __attribute__((noinline, unused)) errval_t
+#else
+static inline errval_t
+#endif
+invoke_cnode_get_state(struct capref root, capaddr_t cap,
+ int bits, distcap_state_t *ret)
+{
+ uint8_t invoke_bits = get_cap_valid_bits(root);
+ capaddr_t invoke_cptr = get_cap_addr(root) >> (CPTR_BITS - invoke_bits);
+
+ assert (bits <= 0xff);
+
+ struct sysret sysret =
+ syscall4((invoke_bits << 16) | (CNodeCmd_GetState << 8) | SYSCALL_INVOKE,
+ invoke_cptr, cap, bits);
+
+ assert(ret != NULL);
+ if (err_is_ok(sysret.error)) {
+ *ret = sysret.value;
+ }
+ else {
+ *ret = 0;
+ }
+ return sysret.error;
+}
+
+//XXX: workaround for inline bug of arm-gcc 4.6.1 and lower
+#if defined(__ARM_ARCH_7A__) && defined(__GNUC__) \
+ && __GNUC__ == 4 && __GNUC_MINOR__ <= 6 && __GNUC_PATCHLEVEL__ <= 1
+static __attribute__((noinline, unused)) errval_t
+#else
+static inline errval_t
+#endif
+invoke_vnode_map(struct capref ptable, capaddr_t slot, capaddr_t from,
+ int frombits, uintptr_t flags, uintptr_t offset,
+ uintptr_t pte_count)
+{
+ uint8_t invoke_bits = get_cap_valid_bits(ptable);
+ capaddr_t invoke_cptr = get_cap_addr(ptable) >> (CPTR_BITS - invoke_bits);
+
+ assert(slot <= 0xffff);
+ assert(frombits <= 0xff);
+
+ // XXX: needs check of flags, offset, and pte_count sizes
+ return syscall7((invoke_bits << 16) | (VNodeCmd_Map << 8) | SYSCALL_INVOKE,
+ invoke_cptr, from, (slot << 16) | frombits,
+ flags, offset, pte_count).error;
+}
+
+//XXX: workaround for inline bug of arm-gcc 4.6.1 and lower
+#if defined(__ARM_ARCH_7A__) && defined(__GNUC__) \
+ && __GNUC__ == 4 && __GNUC_MINOR__ <= 6 && __GNUC_PATCHLEVEL__ <= 1
+static __attribute__((noinline, unused)) errval_t
+#else
+static inline errval_t
+#endif
+invoke_vnode_unmap(struct capref cap, capaddr_t mapping_cptr, int mapping_bits,
+ size_t entry, size_t pte_count)
+{
+ uint8_t invoke_bits = get_cap_valid_bits(cap);
+ capaddr_t invoke_cptr = get_cap_addr(cap) >> (CPTR_BITS - invoke_bits);
+
+ pte_count -= 1;
+
+ assert(entry < 1024);
+ assert(pte_count < 1024);
+ assert(mapping_bits <= 0xff);
+
+ return syscall4((invoke_bits << 16) | (VNodeCmd_Unmap << 8) | SYSCALL_INVOKE,
+ invoke_cptr, mapping_cptr,
+ ((mapping_bits & 0xff)<<20) | ((pte_count & 0x3ff)<<10) |
+ (entry & 0x3ff)).error;
+}
+
+/**
+ * \brief Return the physical address and size of a frame capability
+ *
+ * \param frame CSpace address of frame capability
+ * \param ret frame_identity struct filled in with relevant data
+ *
+ * \return Error code
+ */
+
+//XXX: workaround for inline bug of arm-gcc 4.6.1 and lower
+#if defined(__ARM_ARCH_7A__) && defined(__GNUC__) \
+ && __GNUC__ == 4 && __GNUC_MINOR__ <= 6 && __GNUC_PATCHLEVEL__ <= 1
+static __attribute__((noinline, unused)) errval_t
+#else
+static inline errval_t
+#endif
+invoke_frame_identify (struct capref frame, struct frame_identity *ret)
+{
+ uint8_t invoke_bits = get_cap_valid_bits(frame);
+ capaddr_t invoke_cptr = get_cap_addr(frame) >> (CPTR_BITS - invoke_bits);
+
+ uintptr_t arg1 = ((uintptr_t)invoke_bits) << 16;
+ arg1 |= ((uintptr_t)FrameCmd_Identify<<8);
+ arg1 |= (uintptr_t)SYSCALL_INVOKE;
+ struct sysret sysret =
+ syscall2(arg1, //(invoke_bits << 16) | (FrameCmd_Identify << 8) | SYSCALL_INVOKE,
+ invoke_cptr);
+
+ assert(ret != NULL);
+ if (err_is_ok(sysret.error)) {
+ ret->base = sysret.value & (~BASE_PAGE_MASK);
+ ret->bits = sysret.value & BASE_PAGE_MASK;
+ return sysret.error;
+ }
+
+ ret->base = 0;
+ ret->bits = 0;
+ return sysret.error;
+}
+
+/**
+ * \brief Return the physical address and size of a frame capability
+ *
+ * \param frame CSpace address of frame capability
+ * \param ret frame_identity struct filled in with relevant data
+ *
+ * \return Error code
+ */
+
+//XXX: workaround for inline bug of arm-gcc 4.6.1 and lower
+#if defined(__ARM_ARCH_7A__) && defined(__GNUC__) \
+ && __GNUC__ == 4 && __GNUC_MINOR__ <= 6 && __GNUC_PATCHLEVEL__ <= 1
+static __attribute__((noinline, unused)) errval_t
+#else
+static inline errval_t
+#endif
+invoke_frame_modify_flags (struct capref frame, uintptr_t offset,
+ uintptr_t pages, uintptr_t flags)
+{
+ uint8_t invoke_bits = get_cap_valid_bits(frame);
+ capaddr_t invoke_cptr = get_cap_addr(frame) >> (CPTR_BITS - invoke_bits);
+
+ uintptr_t arg1 = ((uintptr_t)invoke_bits) << 16;
+ arg1 |= ((uintptr_t)FrameCmd_ModifyFlags<<8);
+ arg1 |= (uintptr_t)SYSCALL_INVOKE;
+
+ return syscall5(arg1, invoke_cptr, offset, pages, flags).error;
+}
+
+static inline errval_t invoke_iocap_in(struct capref iocap, enum io_cmd cmd,
+ uint16_t port, uint32_t *data)
+{
+ // Not strictly applicable on ARM
+// USER_PANIC("NYI");
+ return LIB_ERR_NOT_IMPLEMENTED;
+}
+
+static inline errval_t invoke_iocap_out(struct capref iocap, enum io_cmd cmd,
+ uint16_t port, uint32_t data)
+{
+ // Not strictly applicable on ARM
+// USER_PANIC("NYI");
+ return LIB_ERR_NOT_IMPLEMENTED;
+}
+
+
+/**
+ * \brief Setup a dispatcher, possibly making it runnable
+ *
+ * \param dispatcher Address of dispatcher capability
+ * \param domdispatcher Address of existing dispatcher for domain ID
+ * \param cspace_root Root of CSpace for new dispatcher
+ * \param cspace_root_bits Number of valid bits in cspace_root
+ * \param vspace_root Root of VSpace for new dispatcher
+ * \param dispatcher_frame Frame capability for dispatcher structure
+ * \param run Make runnable if true
+ *
+ * Any arguments of CPTR_NULL are ignored.
+ *
+ * \return Error code
+ */
+static inline errval_t
+invoke_dispatcher(struct capref dispatcher, struct capref domdispatcher,
+ struct capref cspace, struct capref vspace,
+ struct capref dispframe, bool run)
+{
+ uint8_t root_vbits = get_cap_valid_bits(cspace);
+ capaddr_t root_caddr = get_cap_addr(cspace) >> (CPTR_BITS - root_vbits);
+ capaddr_t vtree_caddr = get_cap_addr(vspace);
+ capaddr_t disp_caddr = get_cap_addr(dispframe);
+ capaddr_t dd_caddr = get_cap_addr(domdispatcher);
+ uint8_t invoke_bits = get_cap_valid_bits(dispatcher);
+ capaddr_t invoke_cptr = get_cap_addr(dispatcher) >> (CPTR_BITS - invoke_bits);
+
+ assert(root_vbits <= 0xff);
+
+ return syscall7((invoke_bits << 16) | (DispatcherCmd_Setup << 8) | SYSCALL_INVOKE,
+ invoke_cptr, dd_caddr, root_caddr,
+ (run << 8) | (root_vbits & 0xff), vtree_caddr,
+ disp_caddr).error;
+}
+
+/**
+ * \brief Setup a VM guest DCB
+ *
+ * \param dcb Dispatcher capability
+ */
+static inline errval_t invoke_dispatcher_setup_guest(struct capref dispatcher,
+ capaddr_t ep_cap,
+ capaddr_t vnode,
+ capaddr_t vmkit_guest,
+ capaddr_t guest_control_cap)
+{
+ return LIB_ERR_NOT_IMPLEMENTED;
+}
+
+static inline errval_t invoke_irqtable_alloc_vector(struct capref irqcap, int *retirq)
+{
+ uint8_t invoke_bits = get_cap_valid_bits(irqcap);
+ capaddr_t invoke_cptr = get_cap_addr(irqcap) >> (CPTR_BITS - invoke_bits);
+
+ struct sysret ret = syscall2(
+ (invoke_bits << 16) | (IRQTableCmd_Alloc << 8) | SYSCALL_INVOKE,
+ invoke_cptr);
+ if (err_is_ok(ret.error)) {
+ *retirq = ret.value;
+ } else {
+ *retirq = 0;
+ }
+ return ret.error;
+}
+
+static inline errval_t invoke_irqtable_set(struct capref irqcap, int irq,
+ struct capref ep)
+{
+ uint8_t invoke_bits = get_cap_valid_bits(irqcap);
+ capaddr_t invoke_cptr = get_cap_addr(irqcap) >> (CPTR_BITS - invoke_bits);
+
+ return syscall4((invoke_bits << 16) | (IRQTableCmd_Set << 8) | SYSCALL_INVOKE,
+ invoke_cptr, irq, get_cap_addr(ep)).error;
+}
+
+static inline errval_t invoke_irqtable_delete(struct capref irqcap, int irq)
+{
+ uint8_t invoke_bits = get_cap_valid_bits(irqcap);
+ capaddr_t invoke_cptr = get_cap_addr(irqcap) >> (CPTR_BITS - invoke_bits);
+
+ return syscall3((invoke_bits << 16) | (IRQTableCmd_Delete << 8) | SYSCALL_INVOKE,
+ invoke_cptr, irq).error;
+}
+
+static inline errval_t invoke_kernel_get_core_id(struct capref kern_cap,
+ coreid_t *core_id)
+{
+ assert(core_id != NULL);
+
+ uint8_t invoke_bits = get_cap_valid_bits(kern_cap);
+ capaddr_t invoke_cptr = get_cap_addr(kern_cap) >> (CPTR_BITS - invoke_bits);
+
+ struct sysret sysret =
+ syscall2((invoke_bits << 16) | (KernelCmd_Get_core_id << 8) | SYSCALL_INVOKE,
+ invoke_cptr);
+
+ if (sysret.error == SYS_ERR_OK) {
+ *core_id = sysret.value;
+ }
+
+ return sysret.error;
+}
+
+static inline errval_t invoke_dispatcher_dump_ptables(struct capref dispcap)
+{
+ uint8_t invoke_bits = get_cap_valid_bits(dispcap);
+ capaddr_t invoke_cptr = get_cap_addr(dispcap) >> (CPTR_BITS - invoke_bits);
+
+ return syscall2((invoke_bits << 16) | (DispatcherCmd_DumpPTables << 8) |
+ SYSCALL_INVOKE, invoke_cptr).error;
+}
+
+static inline errval_t
+invoke_dispatcher_properties(
+ struct capref dispatcher,
+ enum task_type type, unsigned long deadline,
+ unsigned long wcet, unsigned long period,
+ unsigned long release, unsigned short weight
+ )
+{
+ uint8_t invoke_bits = get_cap_valid_bits(dispatcher);
+ capaddr_t invoke_cptr = get_cap_addr(dispatcher) >> (CPTR_BITS - invoke_bits);
+
+ if (weight > 0xffff)
+ {
+ weight = 0xffff;
+ }
+
+ return syscall7((invoke_bits << 16) | (DispatcherCmd_Properties << 8) | SYSCALL_INVOKE,
+ invoke_cptr,
+ (type << 16) | weight,
+ deadline, wcet, period, release).error;
+}
+
+static inline errval_t
+invoke_idcap_identify(
+ struct capref idcap,
+ idcap_id_t *id
+ )
+{
+ assert(id != NULL);
+
+ uint8_t invoke_bits = get_cap_valid_bits(idcap);
+ capaddr_t invoke_cptr = get_cap_addr(idcap) >> (CPTR_BITS - invoke_bits);
+
+ // user-space pointer 'id' is directly written to by kernel.
+ struct sysret sysret =
+ syscall3((invoke_bits << 16) | (IDCmd_Identify << 8) | SYSCALL_INVOKE,
+ invoke_cptr, (uintptr_t) id);
+
+ return sysret.error;
+}
+
+static inline errval_t invoke_get_global_paddr(struct capref kernel_cap, genpaddr_t* global)
+{
+ uint8_t invoke_bits = get_cap_valid_bits(kernel_cap);
+ capaddr_t invoke_cptr = get_cap_addr(kernel_cap) >> (CPTR_BITS - invoke_bits);
+ uintptr_t invocation = (invoke_bits << 16)
+ | (KernelCmd_GetGlobalPhys << 8)
+ | SYSCALL_INVOKE;
+ struct sysret sysret = syscall2(invocation, invoke_cptr);
+ if (err_is_ok(sysret.error)) {
+ *global = sysret.value;
+ }
+
+ return sysret.error;
+}
+
+#endif
--- /dev/null
+/**
+ * \file
+ * \brief
+ */
+
+/*
+ * Copyright (c) 2015, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef ARCH_AARCH64_BARRELFISH_LMP_CHAN_H
+#define ARCH_AARCH64_BARRELFISH_LMP_CHAN_H
+
+#include <barrelfish/syscall_arch.h>
+#include <barrelfish/caddr.h>
+#include <barrelfish/debug.h> // for USER_PANIC()
+#include <barrelfish_kpi/lmp.h>
+#include <barrelfish_kpi/syscalls.h>
+
+/**
+ * \brief Send a message on the given LMP channel, if possible
+ *
+ * Non-blocking, may fail if there is no space in the receiver's endpoint.
+ *
+ * \param ep Remote endpoint cap
+ * \param flags LMP send flags
+ * \param send_cap (Optional) capability to send with the message
+ * \param length_words Length of the message in words; payload beyond this
+ * size will not be delivered
+ * \param arg1..N Message payload
+ */
+static inline errval_t
+lmp_ep_send(
+ struct capref ep,
+ lmp_send_flags_t flags,
+ struct capref send_cap,
+ uint8_t length_words,
+ uintptr_t arg1,
+ uintptr_t arg2,
+ uintptr_t arg3,
+ uintptr_t arg4,
+ uintptr_t arg5,
+ uintptr_t arg6,
+ uintptr_t arg7,
+ uintptr_t arg8,
+ uintptr_t arg9
+ )
+{
+ USER_PANIC("NYI!");
+ uint8_t invoke_bits = get_cap_valid_bits(ep);
+ capaddr_t invoke_cptr = get_cap_addr(ep) >> (CPTR_BITS - invoke_bits);
+
+ uint8_t send_bits = get_cap_valid_bits(send_cap);
+ capaddr_t send_cptr = get_cap_addr(send_cap) >> (CPTR_BITS - send_bits);
+
+ assert(length_words <= LMP_MSG_LENGTH);
+
+ return syscall12((length_words << 28) | ((flags & 0xf) << 24) |
+ (invoke_bits << 16) | (send_bits << 8) | SYSCALL_INVOKE,
+ invoke_cptr, send_cptr,
+ arg1, arg2, arg3,
+ arg4, arg5, arg6,
+ arg7, arg8, arg9).error;
+}
+
+#define lmp_ep_send9(ep, flags, send_cap, a, b, c, d, e, f, g, h, i) \
+ lmp_ep_send((ep),(flags),(send_cap),9,(a),(b),(c),(d),(e),(f),(g),(h),(i))
+#define lmp_ep_send8(ep, flags, send_cap, a, b, c, d, e, f, g, h, i) \
+ lmp_ep_send((ep),(flags),(send_cap),8,(a),(b),(c),(d),(e),(f),(g),(h),0)
+#define lmp_ep_send7(ep, flags, send_cap, a, b, c, d, e, f, g) \
+ lmp_ep_send((ep),(flags),(send_cap),7,(a),(b),(c),(d),(e),(f),(g),0,0)
+#define lmp_ep_send6(ep,flags,send_cap,a,b,c,d,e,f) \
+ lmp_ep_send((ep),(flags),(send_cap),6,(a),(b),(c),(d),(e),(f),0,0,0)
+#define lmp_ep_send5(ep,flags,send_cap,a,b,c,d,e) \
+ lmp_ep_send((ep),(flags),(send_cap),5,(a),(b),(c),(d),(e),0,0,0,0)
+#define lmp_ep_send4(ep,flags,send_cap,a,b,c,d) \
+ lmp_ep_send((ep),(flags),(send_cap),4,(a),(b),(c),(d),0,0,0,0,0)
+#define lmp_ep_send3(ep,flags,send_cap,a,b,c) \
+ lmp_ep_send((ep),(flags),(send_cap),3,(a),(b),(c),0,0,0,0,0,0)
+#define lmp_ep_send2(ep,flags,send_cap,a,b) \
+ lmp_ep_send((ep),(flags),(send_cap),2,(a),(b),0,0,0,0,0,0,0)
+#define lmp_ep_send1(ep,flags,send_cap,a) \
+ lmp_ep_send((ep),(flags),(send_cap),1,(a),0,0,0,0,0,0,0,0)
+#define lmp_ep_send0(ep,flags,send_cap) \
+ lmp_ep_send((ep),(flags),(send_cap),0,0,0,0,0,0,0,0,0,0)
+
+#define lmp_chan_send(lc,flags,send_cap,len,a,b,c,d,e,f,g,h,i) \
+ lmp_ep_send((lc)->remote_cap,(flags),(send_cap),(len), \
+ (a),(b),(c),(d),(e),(f),(g),(h),(i))
+
+#define lmp_chan_send9(lc,flags,send_cap,a,b,c,d,e,f,g,h,i) \
+ lmp_ep_send9((lc)->remote_cap,(flags),(send_cap), \
+ (a),(b),(c),(d),(e),(f),(g),(h),(i))
+#define lmp_chan_send8(lc,flags,send_cap,a,b,c,d,e,f,g,h) \
+ lmp_ep_send8((lc)->remote_cap,(flags),(send_cap), \
+ (a),(b),(c),(d),(e),(f),(g),(h))
+#define lmp_chan_send7(lc,flags,send_cap,a,b,c,d,e,f,g) \
+ lmp_ep_send7((lc)->remote_cap,(flags),(send_cap), \
+ (a),(b),(c),(d),(e),(f),(g))
+#define lmp_chan_send6(lc,flags,send_cap,a,b,c,d,e,f) \
+ lmp_ep_send6((lc)->remote_cap,(flags),(send_cap), \
+ (a),(b),(c),(d),(e),(f))
+#define lmp_chan_send5(lc,flags,send_cap,a,b,c,d,e) \
+ lmp_ep_send5((lc)->remote_cap,(flags),(send_cap), \
+ (a),(b),(c),(d),(e))
+#define lmp_chan_send4(lc,flags,send_cap,a,b,c,d) \
+ lmp_ep_send4((lc)->remote_cap,(flags),(send_cap), (a),(b),(c),(d))
+#define lmp_chan_send3(lc,flags,send_cap,a,b,c) \
+ lmp_ep_send3((lc)->remote_cap,(flags),(send_cap), (a),(b),(c))
+#define lmp_chan_send2(lc,flags,send_cap,a,b) \
+ lmp_ep_send2((lc)->remote_cap,(flags),(send_cap), (a),(b))
+#define lmp_chan_send1(lc,flags,send_cap,a) \
+ lmp_ep_send1((lc)->remote_cap,(flags),(send_cap),(a))
+#define lmp_chan_send0(lc,flags,send_cap) \
+ lmp_ep_send0((lc)->remote_cap,(flags),(send_cap))
+
+#endif
--- /dev/null
+/**
+ * \file
+ * \brief pmap management wrappers
+ */
+
+/*
+ * Copyright (c) 2010, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef ARCH_AARCH64_BARRELFISH_PMAP_H
+#define ARCH_AARCH64_BARRELFISH_PMAP_H
+
+#include <target/aarch64/barrelfish/pmap_target.h>
+
+#define ARCH_DEFAULT_PMAP_SIZE sizeof(struct pmap_aarch64)
+
+errval_t pmap_init(struct pmap *p, struct vspace *v, struct capref vnode,
+ struct slot_allocator *opt_slot_alloc);
+errval_t pmap_current_init(bool);
+
+#endif // ARCH_AARCH64_BARRELFISH_PMAP_H
--- /dev/null
+/**
+ * \file
+ * \brief User-side system call implementation
+ */
+
+/*
+ * Copyright (c) 2007, 2008, 2009, 2010, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef ARCH_AARCH64_BARRELFISH_SYSCALL_H
+#define ARCH_AARCH64_BARRELFISH_SYSCALL_H
+
+//
+// This is the actual system call function. Because the return
+// value is a structure, r0 is setup point to the return
+// structure. The first system call argument supplied at end of
+// argument list and moved to r0 before use in syscall. This
+// simplifies the amount of swizzling involved therein as r1 =
+// arg1, r2 = arg2, r3 = arg3, and the remaining args including
+// arg0 are on the stack.
+//
+extern struct sysret
+syscall(uintptr_t b, uintptr_t c, uintptr_t d, uintptr_t e,
+ uintptr_t f, uintptr_t g, uintptr_t h, uintptr_t i,
+ uintptr_t j, uintptr_t k, uintptr_t l, uintptr_t a);
+
+#define syscallx(a,b,c,d,e,f,g,h,i,j,k,l) \
+ syscall(b,c,d,e,f,g,h,i,j,k,l,a)
+
+//
+// System call argument 0 is encoded thus:
+//
+// arg[3:0] = syscall ordinal (e.g. SYSCALL_YIELD)
+// arg[7:4] = number of system call arguments (for sanity checking)
+// arg[31:8] = SYSCALL_INVOKE arguments | do_not_care
+//
+
+//C_ASSERT(SYSCALL_COUNT <= 0xf);
+#define sysord(a,n) (a) | ((n) << 4)
+
+// The following macros add the argument count to arg0
+
+#define syscall12(a,b,c,d,e,f,g,h,i,j,k,l) \
+ syscallx(sysord(a,10),(b),(c),(d),(e),(f),(g),(h),(i),(j),(k),(l))
+
+#define syscall11(a,b,c,d,e,f,g,h,i,j,k) \
+ syscallx(sysord(a,10),(b),(c),(d),(e),(f),(g),(h),(i),(j),(k),0)
+
+#define syscall10(a,b,c,d,e,f,g,h,i,j) \
+ syscallx(sysord(a,10),(b),(c),(d),(e),(f),(g),(h),(i),(j),0,0)
+
+#define syscall9(a,b,c,d,e,f,g,h,i) \
+ syscallx(sysord(a,9),(b),(c),(d),(e),(f),(g),(h),(i),0,0,0)
+
+#define syscall8(a,b,c,d,e,f,g,h) \
+ syscallx(sysord(a,8),(b),(c),(d),(e),(f),(g),(h),0,0,0,0)
+
+#define syscall7(a,b,c,d,e,f,g) \
+ syscallx(sysord(a,7),(b),(c),(d),(e),(f),(g),0,0,0,0,0)
+
+#define syscall6(a,b,c,d,e,f) \
+ syscallx(sysord(a,6),(b),(c),(d),(e),(f),0,0,0,0,0,0)
+
+#define syscall5(a,b,c,d,e) \
+ syscallx(sysord(a,5),(b),(c),(d),(e),0,0,0,0,0,0,0)
+
+#define syscall4(a,b,c,d) \
+ syscallx(sysord(a,4),(b),(c),(d),0,0,0,0,0,0,0,0)
+
+#define syscall3(a,b,c) \
+ syscallx(sysord(a,3),(b),(c),0,0,0,0,0,0,0,0,0)
+
+#define syscall2(a,b) \
+ syscallx(sysord(a,2),(b),0,0,0,0,0,0,0,0,0,0)
+
+#define syscall1(a) \
+ syscallx(sysord(a,1),0,0,0,0,0,0,0,0,0,0,0)
+
+
+#endif
--- /dev/null
+/**
+ * \file
+ * \brief
+ */
+
+/*
+ * Copyright (c) 2015, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Universitaetstr 6, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef ARCH_AARCH64_BARRELFISH_KPI_ASM_INLINES_H
+#define ARCH_AARCH64_BARRELFISH_KPI_ASM_INLINES_H
+
+#endif // ARCH_AARCH64_BARRELFISH_KPI_ASM_INLINES_H
--- /dev/null
+/**
+ * \file
+ * \brief Arch specific CPU declarations
+ */
+
+/*
+ * Copyright (c) 2015, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef ARCH_AARCH64_BARRELFISH_KPI_CPU_H
+#define ARCH_AARCH64_BARRELFISH_KPI_CPU_H
+
+/// This CPU supports lazy FPU context switching?
+#undef FPU_LAZY_CONTEXT_SWITCH
+
+#endif
--- /dev/null
+/**
+ * \file
+ * \brief Architecture specific dispatcher struct shared between kernel and user
+ */
+
+/*
+ * Copyright (c) 2010, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef ARCH_AARCH64_BARRELFISH_KPI_DISPATCHER_SHARED_ARCH_H
+#define ARCH_AARCH64_BARRELFISH_KPI_DISPATCHER_SHARED_ARCH_H
+
+#include <target/aarch64/barrelfish_kpi/dispatcher_shared_target.h>
+
+/**
+ * \brief Returns whether dispatcher is currently disabled, given IP.
+ *
+ * \param disp Pointer to dispatcher
+ * \param ip User-level instruction pointer.
+ *
+ * \return true if dispatcher disabled, false otherwise.
+ */
+static inline bool dispatcher_is_disabled_ip(dispatcher_handle_t handle,
+ uintptr_t rip)
+{
+ struct dispatcher_shared_generic *disp =
+ get_dispatcher_shared_generic(handle);
+ /* one crit_pc pair */
+ struct dispatcher_shared_aarch64 *dispaarch64 =
+ get_dispatcher_shared_aarch64(handle);
+ return disp->disabled ||
+ (dispaarch64->crit_pc_low <= rip && rip < dispaarch64->crit_pc_high);
+}
+
+static inline arch_registers_state_t*
+dispatcher_get_enabled_save_area(dispatcher_handle_t handle)
+{
+ return &((struct dispatcher_shared_aarch64 *)handle)->enabled_save_area;
+}
+
+static inline arch_registers_state_t*
+dispatcher_get_disabled_save_area(dispatcher_handle_t handle)
+{
+ return &((struct dispatcher_shared_aarch64 *)handle)->disabled_save_area;
+}
+
+static inline arch_registers_state_t*
+dispatcher_get_trap_save_area(dispatcher_handle_t handle)
+{
+ return &((struct dispatcher_shared_aarch64 *)handle)->trap_save_area;
+}
+
+#endif // ARCH_AARCH64_BARRELFISH_KPI_DISPATCHER_SHARED_ARCH_H
--- /dev/null
+/**
+ * \file
+ * \brief
+ */
+
+/*
+ * Copyright (c) 2010, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef ARCH_AARCH64_BARRELFISH_KPI_FLAGS_H
+#define ARCH_AARCH64_BARRELFISH_KPI_FLAGS_H
+
+// XXX: TODO: fix these for aarch64
+#define ARM_MODE_USR 0x10
+#define ARM_MODE_FIQ 0x11
+#define ARM_MODE_IRQ 0x12
+#define ARM_MODE_SVC 0x13
+#define ARM_MODE_ABT 0x17
+#define ARM_MODE_UND 0x1b
+#define ARM_MODE_SYS 0x1f
+#define ARM_MODE_MASK 0x1f
+#define ARM_MODE_PRIV 0x0f
+
+#define CPSR_IF_MASK 0xc0
+#define CPSR_I_MASK 0x80
+#define CPSR_F_MASK 0x40
+
+#endif // ARCH_AARCH64_BARRELFISH_KPI_FLAGS_H
--- /dev/null
+/**
+ * \file
+ * \brief Generic include for a bunch of arch specific files
+ */
+
+/*
+ * Copyright (c) 2015, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef ARCH_AARCH64_BARRELFISH_KPI_GENERIC_H
+#define ARCH_AARCH64_BARRELFISH_KPI_GENERIC_H
+
+#include <barrelfish_kpi/flags_arch.h>
+#include <barrelfish_kpi/spinlocks_arch.h>
+#include <barrelfish_kpi/unknown_arch.h>
+
+#endif // ARCH_ARM_BARRELFISH_KPI_GENERIC_H
--- /dev/null
+/**
+ * \file
+ * \brief Arch specific LMP declarations
+ */
+
+/*
+ * Copyright (c) 2015, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef ARCH_AARCH64_BARRELFISH_KPI_LMP_H
+#define ARCH_AARCH64_BARRELFISH_KPI_LMP_H
+
+/**
+ * \brief Maximum total length of LMP and LRPC messages (payload)
+ *
+ * Determined by number of registers available to transfer messages.
+ * XXX: TODO: figure out numbers here
+ */
+#define LMP_MSG_LENGTH 9
+#define LRPC_MSG_LENGTH 0
+
+#endif // ARCH_AARCH64_BARRELFISH_KPI_LMP_H
--- /dev/null
+/**
+ * \file
+ * \brief Arch specific paging definitions
+ */
+
+/*
+ * Copyright (c) 2010, 2015, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef ARCH_AARCH64_BARRELFISH_KPI_PAGING_H
+#define ARCH_AARCH64_BARRELFISH_KPI_PAGING_H
+
+#if defined(__ARM_ARCH_8A__)
+#include <target/arm/barrelfish_kpi/paging_arm_v8.h>
+#else
+#error "Missing ARM Paging header file"
+#endif
+
+#endif // ARCH_ARM_BARRELFISH_KPI_PAGING_H
--- /dev/null
+/**
+ * \file
+ * \brief architecture-specific registers code
+ */
+
+/*
+ * Copyright (c) 2015, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef ARCH_AARCH64_BARRELFISH_KPI_REGISTERS_H
+#define ARCH_AARCH64_BARRELFISH_KPI_REGISTERS_H
+
+// TODO: update for aarch64
+
+#ifndef __ASSEMBLER__
+#include<stddef.h> // for offsetof
+//#include <barrelfish/curdispatcher_arch.h> // XXX For curdispatcher()
+#include <barrelfish_kpi/types.h> // for lvaddr_t
+#endif
+
+//
+// Offsets of saved registers in save area.
+//
+#define CPSR_REG 0
+#define X0_REG 1
+#define X1_REG 2
+#define X2_REG 3
+#define X3_REG 4
+#define X4_REG 5
+#define X5_REG 6
+#define X6_REG 7
+#define X7_REG 8
+#define X8_REG 9
+#define X9_REG 10
+#define X10_REG 11
+#define X11_REG 12
+#define X12_REG 13
+#define X13_REG 14
+#define X14_REG 15
+#define X15_REG 16
+#define X16_REG 17
+#define X17_REG 18
+#define X18_REG 19
+#define X19_REG 20
+#define X20_REG 21
+#define X21_REG 22
+#define X22_REG 23
+#define X23_REG 24
+#define X24_REG 25
+#define X25_REG 26
+#define X26_REG 27
+#define X27_REG 28
+#define X28_REG 29
+#define X29_REG 30
+#define SP_REG 31
+#define LR_REG 32
+#define PC_REG 33
+
+#define NUM_REGS 33 /* cpsr, x0-x30, sp, pc */
+#define NUM_FPU_REGS 0
+#define ARCH_NUMREGS NUM_REGS
+
+#define RIP_REG PC_REG /* pc == rip.x86_64 */
+#define RSP_REG SP_REG /* sp == rsp.x86_64 */
+
+/// Register used in system calls to encode function and arg count
+#define SYSCALL_REG 0
+
+//
+// Helpers for pasting system reserved register names
+//
+#define REG_OFFSET_CONCAT(x) x ## _REG
+#define REG_OFFSET(name) REG_OFFSET_CONCAT(name)
+
+#define REG_NAME(ord)
+
+#ifndef __ASSEMBLER__
+
+union registers_aarch64 {
+ struct registers_aarch64_named {
+ uint64_t cpsr;
+ uint64_t r0, r1, r2, r3;
+ uint64_t r4, r5, r6, r7, r8;
+ uint64_t rtls; // r9 is thread local storage
+ uint64_t r10; // r10 is for global offset table base.
+ uint64_t r11, r12, r13, r14;
+ uint64_t r15, r16, r17, r18;
+ uint64_t r19, r20, r21, r22;
+ uint64_t r23, r24, r25, r26;
+ uint64_t r27, r28, r29;
+ uint64_t stack; // sp
+ uint64_t link; // x30
+ uint64_t pc; // pc
+ } named;
+ struct registers_aarch64_syscall_args {
+ uint64_t cpsr;
+ uint64_t arg0, arg1, arg2, arg3;
+ uint64_t arg4, arg5, arg6, arg7, arg8;
+ uint64_t arg9;
+ uint64_t arg10;
+ uint64_t fp;
+ uint64_t arg11, arg12, arg13, arg14;
+ uint64_t arg15, arg16, arg17, arg18;
+ uint64_t arg19, arg20, arg21, arg22;
+ uint64_t arg23, arg24, arg25, arg26;
+ uint64_t arg27, arg28;
+ uint64_t stack;
+ uint64_t link;
+ uint64_t pc;
+ } syscall_args;
+ uint64_t regs[sizeof(struct registers_aarch64_named) / sizeof(uint64_t)];
+};
+
+STATIC_ASSERT_SIZEOF(union registers_aarch64, 34 * 8);
+
+STATIC_ASSERT((REG_OFFSET(THREAD_REGISTER) * sizeof(uint64_t)) == offsetof(struct registers_aarch64_named, rtls), "Thread register conflict");
+
+
+///< Opaque handle for the register state
+typedef union registers_aarch64 arch_registers_state_t;
+
+///< Opaque handle for the FPU register state
+typedef void *arch_registers_fpu_state_t;
+
+static inline void
+registers_set_entry(arch_registers_state_t *regs, lvaddr_t entry)
+{
+ regs->named.pc = (uint64_t)entry;
+}
+
+static inline void
+registers_set_param(arch_registers_state_t *regs, uint64_t param)
+{
+ regs->named.r0 = param;
+}
+
+static inline void
+registers_get_param(arch_registers_state_t *regs, uint64_t *param)
+{
+ *param = regs->named.r0;
+}
+
+static inline uint64_t
+registers_get_ip(arch_registers_state_t *regs)
+{
+ return regs->named.pc;
+}
+
+static inline uint64_t
+registers_get_sp(arch_registers_state_t *regs)
+{
+ return regs->named.stack;
+}
+
+#endif // __ASSEMBLER__
+
+#endif // ARCH_AARCH64_BARRELFISH_KPI_REGISTERS_H
--- /dev/null
+/**
+ * \file
+ * \brief
+ */
+
+/*
+ * Copyright (c) 2010, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef ARCH_ARM_BARRELFISH_KPI_SPINLOCKS_H
+#define ARCH_ARM_BARRELFISH_KPI_SPINLOCKS_H
+
+#include <barrelfish_kpi/asm_inlines_arch.h>
+
+typedef volatile uint32_t spinlock_t;
+
+static inline void acquire_spinlock(spinlock_t *spinlock)
+{
+ // TODO
+}
+
+static inline void release_spinlock(spinlock_t *spinlock)
+{
+ // TODO
+}
+
+#endif // ARCH_ARM_BARRELFISH_KPI_SPINLOCKS_H
--- /dev/null
+/**
+ * \file
+ * \brief Not sure where to put these definitions
+ */
+
+/*
+ * Copyright (c) 2010, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef ARCH_AARCH64_BARRELFISH_KPI_UNKNOWN_H
+#define ARCH_AARCH64_BARRELFISH_KPI_UNKNOWN_H
+
+#ifndef IN_KERNEL
+
+// XXX: this code shouldn't be in the KPI, and it should be living behind a clean portability layer!
+// required for lib/lwip/src/barrelfish/idc_barrelfish.c
+
+#include <assert.h>
+
+static inline void mfence(void)
+{
+ assert(!"mfence() NYI for ARM");
+}
+
+static inline void cache_flush_range(void *base, size_t len)
+{
+ assert(!"cache_flush_range() NYI for ARM");
+}
+
+
+static inline uint64_t rdtsc(void)
+{
+ assert(!"rdtsc() NYI for ARM");
+ return 0;
+}
+
+
+#endif
+
+#endif // ARCH_ARM_BARRELFISH_KPI_UNKNOWN_H
--- /dev/null
+/**
+ * \file
+ * \brief Arch specific bench include.
+ */
+
+/*
+ * Copyright (c) 2015, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef ARCH_AARCH64_BARRELFISH_BENCH_H
+#define ARCH_AARCH64_BARRELFISH_BENCH_H
+
+#include <barrelfish/sys_debug.h>
+#include <bench/bench.h>
+#include <stdio.h>
+
+extern uint64_t tsc_hz;
+void bench_arch_init(void);
+
+
+/**
+ * \brief Take a timestamp
+ */
+static inline cycles_t bench_tsc(void)
+{
+ STATIC_ASSERT_SIZEOF(cycles_t, sizeof(uintptr_t));
+ cycles_t tsc;
+ sys_debug_hardware_timer_read((uintptr_t *)&tsc);
+ return tsc;
+}
+
+uint64_t bench_tsc_to_ms(cycles_t tsc);
+uint64_t bench_tsc_to_us(cycles_t tsc);
+uint64_t bench_tsc_per_us(void);
+uint64_t bench_tsc_per_ms(void);
+#endif // ARCH_AARCH64_BARRELFISH_BENCH_H
--- /dev/null
+/* $NetBSD: float.h,v 1.6 2005/12/11 12:16:47 christos Exp $ */\r
+/* $NetBSD: float_ieee754.h,v 1.8 2005/12/11 12:25:20 christos Exp $ */\r
+\r
+/*\r
+ * Copyright (c) 1992, 1993\r
+ * The Regents of the University of California. All rights reserved.\r
+ *\r
+ * Redistribution and use in source and binary forms, with or without\r
+ * modification, are permitted provided that the following conditions\r
+ * are met:\r
+ * 1. Redistributions of source code must retain the above copyright\r
+ * notice, this list of conditions and the following disclaimer.\r
+ * 2. Redistributions in binary form must reproduce the above copyright\r
+ * notice, this list of conditions and the following disclaimer in the\r
+ * documentation and/or other materials provided with the distribution.\r
+ * 3. Neither the name of the University nor the names of its contributors\r
+ * may be used to endorse or promote products derived from this software\r
+ * without specific prior written permission.\r
+ *\r
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND\r
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\r
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\r
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE\r
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\r
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS\r
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\r
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\r
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\r
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF\r
+ * SUCH DAMAGE.\r
+ *\r
+ * @(#)float.h 8.1 (Berkeley) 6/10/93\r
+ */\r
+\r
+#ifndef _ARM_FLOAT_H_\r
+#define _ARM_FLOAT_H_\r
+\r
+#include <sys/cdefs.h>\r
+\r
+#ifndef FLT_ROUNDS\r
+__BEGIN_DECLS\r
+extern int __flt_rounds(void);\r
+__END_DECLS\r
+#define FLT_ROUNDS -1 /* __flt_rounds() */\r
+#endif\r
+\r
+#define FLT_EVAL_METHOD (-1) /* XXX */\r
+\r
+#define LDBL_MANT_DIG 64\r
+#define LDBL_EPSILON 1.0842021724855044340E-19L\r
+#define LDBL_DIG 18\r
+#define LDBL_MIN_EXP (-16381)\r
+#define LDBL_MIN 1.6810515715560467531E-4932L\r
+#define LDBL_MIN_10_EXP (-4931)\r
+#define LDBL_MAX_EXP 16384\r
+#define LDBL_MAX 1.1897314953572317650E+4932L\r
+#define LDBL_MAX_10_EXP 4932\r
+\r
+#define FLT_RADIX 2 /* b */\r
+\r
+#define FLT_MANT_DIG 24 /* p */\r
+#define FLT_EPSILON 1.19209290E-7F /* b**(1-p) */\r
+#define FLT_DIG 6 /* floor((p-1)*log10(b))+(b == 10) */\r
+#define FLT_MIN_EXP (-125) /* emin */\r
+#define FLT_MIN 1.17549435E-38F /* b**(emin-1) */\r
+#define FLT_MIN_10_EXP (-37) /* ceil(log10(b**(emin-1))) */\r
+#define FLT_MAX_EXP 128 /* emax */\r
+#define FLT_MAX 3.40282347E+38F /* (1-b**(-p))*b**emax */\r
+#define FLT_MAX_10_EXP 38 /* floor(log10((1-b**(-p))*b**emax)) */\r
+\r
+#define DBL_MANT_DIG 53\r
+#define DBL_EPSILON 2.2204460492503131E-16\r
+#define DBL_DIG 15\r
+#define DBL_MIN_EXP (-1021)\r
+#define DBL_MIN 2.2250738585072014E-308\r
+#define DBL_MIN_10_EXP (-307)\r
+#define DBL_MAX_EXP 1024\r
+#define DBL_MAX 1.7976931348623157E+308\r
+#define DBL_MAX_10_EXP 308\r
+\r
+#define DECIMAL_DIG 17 /* ceil((1+p*log10(b))-(b==10) */\r
+\r
+#endif /* !_ARM_FLOAT_H_ */\r
--- /dev/null
+/*-
+ * Copyright (c) 1988, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)limits.h 8.3 (Berkeley) 1/4/94
+ * $FreeBSD: src/sys/amd64/include/_limits.h,v 1.11 2005/08/20 16:44:40 stefanf Exp $
+ */
+
+#ifndef _MACHINE__LIMITS_H_
+#define _MACHINE__LIMITS_H_
+
+/*
+ * According to ANSI (section 2.2.4.2), the values below must be usable by
+ * #if preprocessing directives. Additionally, the expression must have the
+ * same type as would an expression that is an object of the corresponding
+ * type converted according to the integral promotions. The subtraction for
+ * INT_MIN, etc., is so the value is not unsigned; e.g., 0x80000000 is an
+ * unsigned int for 32-bit two's complement ANSI compilers (section 3.1.3.2).
+ * These numbers are for the default configuration of gcc. They work for
+ * some other compilers as well, but this should not be depended on.
+ */
+
+#define __CHAR_BIT 8 /* number of bits in a char */
+
+#define __SCHAR_MAX 0x7f /* max value for a signed char */
+#define __SCHAR_MIN (-0x7f - 1) /* min value for a signed char */
+
+#define __UCHAR_MAX 0xffU /* max value for an unsigned char */
+
+#define __USHRT_MAX 0xffffU /* max value for an unsigned short */
+#define __SHRT_MAX 0x7fff /* max value for a short */
+#define __SHRT_MIN (-0x7fff - 1) /* min value for a short */
+
+#define __UINT_MAX 0xffffffffU /* max value for an unsigned int */
+#define __INT_MAX 0x7fffffff /* max value for an int */
+#define __INT_MIN (-0x7fffffff - 1) /* min value for an int */
+
+#define __ULONG_MAX 0xffffffffffffffffUL /* max for an unsigned long */
+#define __LONG_MAX 0x7fffffffffffffffL /* max for a long */
+#define __LONG_MIN (-0x7fffffffffffffffL - 1) /* min for a long */
+
+ /* max value for an unsigned long long */
+#define __ULLONG_MAX 0xffffffffffffffffULL
+#define __LLONG_MAX 0x7fffffffffffffffLL /* max value for a long long */
+#define __LLONG_MIN (-0x7fffffffffffffffLL - 1) /* min for a long long */
+
+#define __SSIZE_MAX __LONG_MAX /* max value for a ssize_t */
+
+#define __SIZE_T_MAX __ULONG_MAX /* max value for a size_t */
+
+#define __OFF_MAX __LONG_MAX /* max value for an off_t */
+#define __OFF_MIN __LONG_MIN /* min value for an off_t */
+
+/* Quads and longs are the same on the amd64. Ensure they stay in sync. */
+#define __UQUAD_MAX __ULONG_MAX /* max value for a uquad_t */
+#define __QUAD_MAX __LONG_MAX /* max value for a quad_t */
+#define __QUAD_MIN __LONG_MIN /* min value for a quad_t */
+
+#define __LONG_BIT 64
+#define __WORD_BIT 32
+
+/*
+ * Minimum signal stack size. The current signal frame
+ * for i386 is 408 bytes large.
+ */
+#define __MINSIGSTKSZ (512 * 4)
+
+#endif /* !_MACHINE__LIMITS_H_ */
#define CURRENT_CPU_TYPE CPU_ARM7
#elif __ARM_ARCH_5__
#define CURRENT_CPU_TYPE CPU_ARM5
+#elif __ARM_ARCH_8A__
+#define CURRENT_CPU_TYPE CPU_ARM8
#else
#error "must define CURRENT_CPU_TYPE"
#endif
*/
/*
- * Copyright (c) 2010, ETH Zurich.
+ * Copyright (c) 2010, 2015, ETH Zurich.
* All rights reserved.
*
* This file is distributed under the terms in the attached LICENSE file.
* If you do not find this file, copies can be found by writing to:
- * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
*/
#ifndef ARCH_ARM_BARRELFISH_KPI_PAGING_H
#include <target/arm/barrelfish_kpi/paging_arm_v7.h>
#elif defined(__ARM_ARCH_7M__)
#include <target/arm/barrelfish_kpi/paging_arm_v7m.h>
+#elif defined(__ARM_ARCH_8A__)
+#include <target/arm/barrelfish_kpi/paging_arm_v8.h>
#else
#error "Missing ARM Paging header file"
#endif
# endif
#elif defined(__arm__)
# define CACHELINE_BYTES 32
+#elif defined(__aarch64__)
+// XXX: is this true?
+# define CACHELINE_BYTES 64
#else
# error set CACHELINE_BYTES appropriately
#endif
--- /dev/null
+/**
+ * \file
+ * \brief Architecture specific dispatcher structure private to the user
+ */
+
+/*
+ * Copyright (c) 2015, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef TARGET_AARCH64_BARRELFISH_DISPATCHER_H
+#define TARGET_AARCH64_BARRELFISH_DISPATCHER_H
+
+#include <barrelfish_kpi/dispatcher_shared.h>
+#include <barrelfish_kpi/dispatcher_shared_arch.h>
+#include <barrelfish/dispatcher.h>
+
+/// Dispatcher structure (including data accessed only by user code)
+struct dispatcher_aarch64 {
+ struct dispatcher_shared_aarch64 d; ///< Shared (user/kernel) data. Must be first.
+ struct dispatcher_generic generic; ///< User private data
+ /* Incoming LMP endpoints (buffers and receive cap pointers) follow */
+};
+
+#endif // TARGET_AARCH64_BARRELFISH_DISPATCHER_H
--- /dev/null
+/**
+ * \file
+ * \brief Pmap definition common for the aarch64 archs
+ */
+
+/*
+ * Copyright (c) 2015, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef TARGET_AARCH64_BARRELFISH_PMAP_H
+#define TARGET_AARCH64_BARRELFISH_PMAP_H
+
+#include <barrelfish/pmap.h>
+
+/// Node in the meta-data, corresponds to an actual VNode object
+struct vnode {
+ uint16_t entry; ///< Page table entry of this VNode
+ bool is_vnode; ///< Is this a page table or a page mapping
+ struct vnode *next; ///< Next entry in list of siblings
+ union {
+ struct {
+ struct capref cap; ///< Capability of this VNode
+ struct vnode *children; ///< Children of this VNode
+ } vnode; // for non-leaf node
+ struct {
+ struct capref cap; ///< Capability of this VNode
+ genvaddr_t offset; ///< Offset within mapped frame cap
+ vregion_flags_t flags; ///< Flags for mapping
+ size_t pte_count; ///< number of mapped PTEs in this mapping
+ } frame; // for leaf node (maps page(s))
+ } u;
+};
+
+struct pmap_aarch64 {
+ struct pmap p;
+ struct vregion vregion; ///< Vregion used to reserve virtual address for metadata
+ genvaddr_t vregion_offset; ///< Offset into amount of reserved virtual address used
+ struct vnode root; ///< Root of the vnode tree
+ struct slab_allocator slab; ///< Slab allocator for the vnode lists
+ uint8_t slab_buffer[512]; ///< Initial buffer to back the allocator
+};
+
+#endif // TARGET_AARCH64_BARRELFISH_PMAP_H
--- /dev/null
+/**
+ * \file
+ * \brief Architecture specific dispatcher struct shared between kernel and user
+ */
+
+/*
+ * Copyright (c) 2015, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef TARGET_AARCH64_BARRELFISH_KPI_DISPATCHER_SHARED_H
+#define TARGET_AARCH64_BARRELFISH_KPI_DISPATCHER_SHARED_H
+
+#include <barrelfish_kpi/dispatcher_shared.h>
+
+///< Architecture specific kernel/user shared dispatcher struct
+struct dispatcher_shared_aarch64 {
+ struct dispatcher_shared_generic d; ///< Generic portion
+
+ lvaddr_t crit_pc_low; ///< Critical section lower PC bound
+ lvaddr_t crit_pc_high; ///< Critical section upper PC bound
+ lvaddr_t got_base; ///< Global Offset Table base
+
+ union registers_aarch64 enabled_save_area; ///< Enabled register save area
+ union registers_aarch64 disabled_save_area; ///< Disabled register save area
+ union registers_aarch64 trap_save_area; ///< Trap register save area
+};
+
+static inline struct dispatcher_shared_aarch64*
+get_dispatcher_shared_aarch64(dispatcher_handle_t handle)
+{
+ return (struct dispatcher_shared_aarch64*)handle;
+}
+
+#endif // TARGET_AARCH64_BARRELFISH_KPI_DISPATCHER_SHARED_H
--- /dev/null
+/**
+ * \file
+ * \brief Arch specific definitions, can be included by others.
+ */
+
+/*
+ * Copyright (c) 2015, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef TARGET_ARMV8_BARRELFISH_KPI_PAGING_H
+#define TARGET_ARMV8_BARRELFISH_KPI_PAGING_H
+
+#ifndef __ASSEMBLER__
+typedef uint64_t paging_x86_64_flags_t;
+#endif
+
+/** The system's base page size is 4kB */
+#define ARMv8_BASE_PAGE_BITS 12
+#define ARMv8_BASE_PAGE_SIZE (1<<ARMv8_BASE_PAGE_BITS)
+#define ARMv8_BASE_PAGE_MASK (ARMv8_BASE_PAGE_SIZE - 1)
+#define ARMv8_BASE_PAGE_OFFSET(a) ((a) & ARMv8_BASE_PAGE_MASK)
+
+/** The system's large page size is 2MB */
+#define ARMv8_LARGE_PAGE_BITS 21
+#define ARMv8_LARGE_PAGE_SIZE (1<<ARMv8_LARGE_PAGE_BITS)
+#define ARMv8_LARGE_PAGE_MASK (ARMv8_LARGE_PAGE_SIZE - 1)
+#define ARMv8_LARGE_PAGE_OFFSET(a) ((a) & ARMv8_LARGE_PAGE_MASK)
+
+/** The system's huge page size is 1GB */
+#define ARMv8_HUGE_PAGE_BITS 30
+#define ARMv8_HUGE_PAGE_SIZE (1<<ARMv8_HUGE_PAGE_BITS)
+#define ARMv8_HUGE_PAGE_MASK (ARMv8_HUGE_PAGE_SIZE - 1)
+#define ARMv8_HUGE_PAGE_OFFSET(a) ((a) & ARMv8_HUGE_PAGE_MASK)
+
+/**
+ * Bits within the various page directories and tables.
+ */
+
+// TODO: check what ptable sizes are available
+#define ARMv8_PTABLE_BITS 9 /**< Page directory/table size in bits */
+/** Page directory/table size */
+#define ARMv8_PTABLE_SIZE (1UL<<ARMv8_PTABLE_BITS)
+#define ARMv8_PTABLE_MASK 0x1ff /**< Page dir/table address mask */
+#define ARMv8_PTABLE_CLEAR 0 /**< Bitmap of a clear table entry */
+
+// XXX: maybe sizeof(union ...)
+#define ARMv8_PTABLE_ENTRY_SIZE sizeof(uint64_t)
+
+// XXX: These may depend on system config registers
+/* Macros to compute the corresponding portions of the vaddr */
+#define ARMv8_PML4_BASE(base) (((uint64_t)(base) >> 39) & ARMv8_PTABLE_MASK)
+#define ARMv8_PDPT_BASE(base) (((uint64_t)(base) >> 30) & ARMv8_PTABLE_MASK)
+#define ARMv8_PDIR_BASE(base) (((uint64_t)(base) >> 21) & ARMv8_PTABLE_MASK)
+#define ARMv8_PTABLE_BASE(base) (((uint64_t)(base) >> 12) & ARMv8_PTABLE_MASK)
+
+// non-prefixed versions
+// XXX: should cleanup arm include mess
+
+/** The system's base page size is 4kB */
+#define BASE_PAGE_BITS ARMv8_BASE_PAGE_BITS
+#define BASE_PAGE_SIZE ARMv8_BASE_PAGE_SIZE
+#define BASE_PAGE_MASK ARMv8_BASE_PAGE_MASK
+#define BASE_PAGE_OFFSET(a) ARMv8_BASE_PAGE_OFFSET(a)
+
+/** The system's large page size is 2MB */
+#define LARGE_PAGE_BITS ARMv8_LARGE_PAGE_BITS
+#define LARGE_PAGE_SIZE ARMv8_LARGE_PAGE_SIZE
+#define LARGE_PAGE_MASK ARMv8_LARGE_PAGE_MASK
+#define LARGE_PAGE_OFFSET(a) ARMv8_LARGE_PAGE_OFFSET(a)
+
+/** The system's huge page size is 1GB */
+#define HUGE_PAGE_BITS ARMv8_HUGE_PAGE_BITS
+#define HUGE_PAGE_SIZE ARMv8_HUGE_PAGE_SIZE
+#define HUGE_PAGE_MASK ARMv8_HUGE_PAGE_MASK
+#define HUGE_PAGE_OFFSET(a) ARMv8_HUGE_PAGE_OFFSET(a)
+
+/**
+ * Bits within the various page directories and tables.
+ */
+
+// TODO: check what ptable sizes are available
+#define PTABLE_BITS ARMv8_PTABLE_BITS /**< Page directory/table size in bits */
+/** Page directory/table size */
+#define PTABLE_SIZE ARMv8_PTABLE_SIZE
+#define PTABLE_MASK ARMv8_PTABLE_MASK /**< Page dir/table address mask */
+#define PTABLE_CLEAR ARMv8_PTABLE_CLEAR /**< Bitmap of a clear table entry */
+
+#define PTABLE_ENTRY_SIZE ARMv8_PTABLE_ENTRY_SIZE
+
+#endif // TARGET_ARMV8_BARRELFISH_KPI_PAGING_H
"memory")
#endif
+#elif defined (__aarch64__)
+// XXX: todo sane implementations of these
+ #define KILL_CALLEE_SAVES() \
+ __asm__ volatile ("" : : : "sp")
#else
#error "Need definition of KILL_CALLEE_SAVES"
#endif
#elif defined(__arm__)
#define FORCE_ARGS_STACK assert(0 && "THC not yet implemented on ARM")
#define FORCE_ARGS_STACK_CALL assert(0 && "THC not yet implemented on ARM")
+#elif defined(__aarch64__)
+#define FORCE_ARGS_STACK assert(0 && "THC not yet implemented on ARM")
+#define FORCE_ARGS_STACK_CALL assert(0 && "THC not yet implemented on ARM")
#else
#error "Need definition of FORCE_ARGS_STACK"
#endif
#define RESTORE_OLD_STACK_POINTER(OLD_STACK_PTR) \
__asm__ volatile ("movl %0, %%esp \n\t" \
: : "m"(OLD_STACK_PTR))
-#elif defined(__arm__)
+#elif defined(__arm__) || defined(__aarch64__)
#define GET_STACK_POINTER(_) assert(0 && "THC not yet implemented on ARM")
#define RESTORE_OLD_STACK_POINTER(_) assert(0 && "THC not yet implemented on ARM")
#else
" addl $4, %esp \n\t" /* clean up stack for callee */ \
" jmp " JMP_ADDR " \n\t" /* jump to continuation */ \
);
-#elif defined(__arm__)
+#elif defined(__arm__) || defined(__aarch64__)
// *** NOTEs for the adventurous: porting lazy THC to ARM
//
}
-#elif defined(__i386__)
-
-
-static inline bool trace_cas(volatile uintptr_t *address, uintptr_t old,
- uintptr_t nw)
-{
- return false;
-}
-
-#define TRACE_TIMESTAMP() rdtsc()
-
-
-#elif defined(__arm__)
-
+#elif defined(__i386__) || defined(__arm__) || defined(__aarch64__)
static inline bool trace_cas(volatile uintptr_t *address, uintptr_t old,
uintptr_t nw)
#define TRACE_TIMESTAMP() 0
-
#else
#warning You need to supply CAS and a timestamp function for this architecture.
"omap/omap44xx_mmu",
"omap/omap44xx_spinlock"],
addLibraries = [ "elf", "cpio" ]
+ },
+ --
+ -- Applied Micro APM88xxxx series SoC,
+ -- used to run a heterogenous system on the pandaboard
+ --
+
+ cpuDriver {
+ target = "apm88xxxx",
+ architectures = [ "armv8" ],
+ assemblyFiles = [ "arch/apm88xxxx/boot.S" ],
+ cFiles = [ "arch/armv8/exec.c",
+ "arch/armv8/irq.c",
+ "arch/armv8/kludges.c",
+ "arch/armv8/kputchar.c",
+ "arch/armv8/misc.c",
+ "arch/apm88xxxx/init.c",
+ "arch/apm88xxxx/paging.c",
+ "arch/apm88xxxx/uart.c" ],
+ mackerelDevices = [ "arm" ],
+ addLibraries = [ "elf", "cpio" ]
}
]
--- /dev/null
+/**
+ * \file
+ * \brief Bootstrap the kernel for ARMv7 processors. This code is
+ * entered from the bootloader (typically arm_molly, RedBoot,
+ * etc.).
+ */
+/*
+ * Copyright (c) 2009 ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef __ASSEMBLER__
+#define __ASSEMBLER__ 1
+#endif
+
+#include <barrelfish_kpi/flags_arch.h> // ARM_MODE_MASK
+#include <offsets.h> // BOOT_STACK_PHYS
+
+ .text
+
+ .globl start, halt, got_base
+ .extern kernel_stack, glbl_core_data
+
+ // Used to track phys memory allocator limit globally.
+ alloc_top .req x11
+
+start:
+ // On entry:
+ //
+ // MMU disabled
+ // Caches in unknown state, but no lockdown
+ // No TLB lockdown.
+ // CPU is in a priviledged mode.
+ //
+ // TODO: ensure mode
+
+ //init stack
+ ldr x1, =kernel_stack
+ mov sp, x1
+ add sp, sp, #KERNEL_STACK_SIZE
+
+ ldr PIC_REGISTER, got_base
+
+ //prepare argument
+ mov x0, x2
+ b arch_init
+ b halt
+
+
+/**
+ * extern "C" void halt(void) __attribute__((noreturn))
+ */
+halt:
+ b .
+
+/**********************************************************************/
+.ltorg
+
+got_base:
+ .word // Initialized by linker
+
+ .end
--- /dev/null
+#include <kernel.h>
+
+/**
+ * \brief Kernel stack.
+ *
+ * This is the one and only kernel stack for a kernel instance.
+ */
+uintptr_t kernel_stack[KERNEL_STACK_SIZE / sizeof(uintptr_t)] __attribute__ ((aligned(16)));
+
+void arch_init(void *arg);
+void arch_init(void *arg)
+{
+}
--- /dev/null
+#include <kernel.h>
+#include <capabilities.h>
+#include <paging_kernel_arch.h>
+
+#pragma GCC diagnostic ignored "-Wsuggest-attribute=noreturn"
+
+
+void paging_context_switch(lpaddr_t ttbr)
+{
+ panic("NYI");
+}
+
+errval_t caps_copy_to_vnode(struct cte *dest_vnode_cte, cslot_t dest_slot,
+ struct cte *src_cte, uintptr_t flags,
+ uintptr_t offset, uintptr_t pte_count)
+{
+ panic("NYI");
+}
+
+size_t do_unmap(lvaddr_t pt, cslot_t slot, size_t num_pages)
+{
+ panic("NYI");
+}
--- /dev/null
+#include <kernel.h>
+#include <serial.h>
+
+#pragma GCC diagnostic ignored "-Wsuggest-attribute=noreturn"
+
+// base addrs
+#define UART0_BASE 0x1C020000
+#define UART1_BASE 0x1C021000
+#define UART2_BASE 0x1C022000
+#define UART3_BASE 0x1C023000
+
+unsigned serial_console_port, serial_debug_port;
+
+errval_t serial_init(unsigned port, bool initialize_hw)
+{
+
+ panic("NYI");
+}
+
+errval_t serial_early_init(unsigned port)
+{
+
+ panic("NYI");
+}
+
+/**
+ * \brief Prints a single character to a serial port.
+ */
+void serial_putchar(unsigned port, char c)
+{
+
+ panic("NYI");
+}
+
+/**
+ * \brief Reads a single character from the default serial port.
+ * This function spins waiting for a character to arrive.
+ */
+char serial_getchar(unsigned port)
+{
+ panic("NYI");
+}
--- /dev/null
+/**
+ * \file
+ * \brief AArch64 execution and miscellany
+ */
+
+/*
+ * Copyright (c) 2015, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#include <kernel.h>
+#include <dispatch.h>
+#include <init.h>
+#include <aarch64.h>
+#include <arm_hal.h>
+#include <exec.h>
+#include <misc.h>
+#include <cp15.h> // for invalidating tlb and cache
+
+//static arch_registers_state_t upcall_state;
+
+extern uint32_t ctr;
+static inline __attribute__((noreturn))
+void do_resume(uint32_t *regs)
+{
+ panic("NYI");
+}
+
+/// Ensure context is for user-mode with interrupts enabled.
+static inline void
+ensure_user_mode_policy(arch_registers_state_t *state)
+{
+ panic("NYI");
+}
+
+/**
+ * \brief Go to user-space at entry point 'entry'.
+ *
+ * This function goes to user-space and starts executing the program at
+ * its entry point at virtual address 'entry'.
+ *
+ * \param entry Entry point address of program to execute.
+ */
+void __attribute__ ((noreturn))
+execute(lvaddr_t entry)
+{
+ panic("NYI");
+}
+
+/**
+ * \brief Resume the given user-space snapshot.
+ *
+ * This function resumes user-space execution by restoring the CPU
+ * registers with the ones given in the array, pointed to by 'state'.
+ */
+void __attribute__ ((noreturn)) resume(arch_registers_state_t *state)
+{
+ panic("NYI");
+}
+
+void wait_for_interrupt(void)
+{
+ panic("NYI");
+}
--- /dev/null
+#include <kernel.h>
+#include <irq.h>
+
+#pragma GCC diagnostic ignored "-Wsuggest-attribute=noreturn"
+
+errval_t irq_table_notify_domains(struct kcb *kcb)
+{
+ panic("NYI");
+}
--- /dev/null
+/*
+ * Copyright (c) 2009 ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#include <stdbool.h>
+#include <arm_hal.h>
+
+extern void dbg_break(void);
+void dbg_break(void)
+{
+ __asm("brk #0xffff");
+}
+
+extern void arch_benchmarks(void);
+void arch_benchmarks(void) { dbg_break(); }
+
+extern void arch_benchmarks_size(void);
+void arch_benchmarks_size(void) { dbg_break(); }
+
+extern void conio_putchar(void);
+void conio_putchar(void) { /* Don't break here yet! */ }
+
+extern void gdb_arch_continue(void);
+void gdb_arch_continue(void) { dbg_break(); }
+
+extern void gdb_arch_get_register(void);
+void gdb_arch_get_register(void) { dbg_break(); }
+
+extern void gdb_arch_read_byte(void);
+void gdb_arch_read_byte(void) { dbg_break(); }
+
+extern void gdb_arch_registers(void);
+void gdb_arch_registers(void) { dbg_break(); }
+
+extern void gdb_arch_set_register(void);
+void gdb_arch_set_register(void) { dbg_break(); }
+
+extern void gdb_arch_single_step(void);
+void gdb_arch_single_step(void) { dbg_break(); }
+
+extern void gdb_arch_write_byte(void);
+void gdb_arch_write_byte(void) { dbg_break(); }
+
+extern void reboot(void);
+void reboot(void) { dbg_break(); }
+
+struct dcb;
+extern void __attribute__ ((noreturn)) vmkit_vmenter (struct dcb *dcb);
+void vmkit_vmenter(struct dcb *dcb) { dbg_break(); for(;;); }
+
+extern void __aeabi_unwind_cpp_pr0(void);
+void __aeabi_unwind_cpp_pr0(void) { dbg_break(); }
+
+extern void raise(void);
+void raise(void) { dbg_break(); }
+
+extern void breakpoint(void);
+void breakpoint(void) { dbg_break(); }
+
+extern bool arch_core_is_bsp(void);
+// TODO
+bool arch_core_is_bsp(void) { return true; }
--- /dev/null
+/**\r
+ * \file\r
+ * \brief The world's simplest serial driver.\r
+ *\r
+ */\r
+\r
+/*\r
+ * Copyright (c) 2010, ETH Zurich.\r
+ * All rights reserved.\r
+ *\r
+ * This file is distributed under the terms in the attached LICENSE file.\r
+ * If you do not find this file, copies can be found by writing to:\r
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.\r
+ */\r
+\r
+#include <serial.h>\r
+#include <kputchar.h>\r
+#include <global.h>\r
+\r
+#define KPBUFSZ 256\r
+static char kputbuf[KPBUFSZ];\r
+static int kcount = 0;\r
+\r
+static void kflush(void)\r
+{\r
+ for(int i=0; i<kcount; i++)\r
+ serial_console_putchar(kputbuf[i]);\r
+ kcount = 0;\r
+}\r
+\r
+void kprintf_begin(void)\r
+{\r
+ // TODO: locking?\r
+}\r
+\r
+int kputchar(int c)\r
+{\r
+ kputbuf[kcount++] = c;\r
+ if (kcount == KPBUFSZ || c == '\n')\r
+ kflush();\r
+ return c;\r
+}\r
+\r
+void kprintf_end(void)\r
+{\r
+ // TODO: locking?\r
+ kflush();\r
+}\r
+\r
+// End\r
--- /dev/null
+/*
+ * Copyright (c) 2007, 2008, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#include <offsets.h>
+
+OUTPUT_FORMAT("elf64-littleaarch64")
+OUTPUT_ARCH("aarch64")
+
+ENTRY(start)
+
+/*
+PHDRS {
+ headers PT_PHDR PHDRS;
+ text PT_LOAD FILEHDR PHDRS;
+ data PT_LOAD;
+ dynamic PT_DYNAMIC;
+}
+*/
+
+SECTIONS {
+ . = START_KERNEL_PHYS;
+
+ /*kernel_elf_header = .;*/
+ kernel_first_byte = .;
+
+ /*. += SIZEOF_HEADERS; */
+
+ .text : { *(.text); }
+ kernel_text_final_byte = .;
+
+ . = ALIGN(4k);
+ .rodata . :
+ {
+ *(.rodata);
+ }
+
+ .got . :
+ {
+ got_base = .;
+ *(.got);
+ }
+
+ /*.rel.got . : { *(.rel.got); } */
+
+ .bss . :
+ {
+ *(.bss);
+ }
+
+
+ kernel_final_byte = .;
+
+ /***** These sections get discarded *****/
+ /DISCARD/ :
+ {
+ /* Discard exception handler frames and headers -- we don't use em */
+ *(.eh_frame);
+ *(.eh_frame_hdr);
+ *(.note.gnu.build-id);
+ *(.interp);
+/* *(.dynsym); */
+/* *(.dynstr); */
+/* *(.hash); */
+/* *(.gnu.hash); */
+ *(.dynamic);
+ }
+}
--- /dev/null
+/** \file
+ * \brief Miscellaneous kernel support code.
+ *
+ * This file contains miscellaneous architecture-independent kernel support
+ * code that doesn't belong anywhere else.
+ */
+
+/*
+ * Copyright (c) 2007, 2008, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#include <kernel.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <exec.h>
+#include <misc.h>
+
+#define DEFAULT_LOGLEVEL LOG_DEBUG
+#define DEFAULT_SUBSYSTEM_MASK (~0L)
+
+/**
+ * Global kernel loglevel.
+ */
+int kernel_loglevel = DEFAULT_LOGLEVEL;
+
+/**
+ * Default kernel subsystem message mask. Determines messages of what subsystems
+ * get output.
+ */
+int kernel_log_subsystem_mask = DEFAULT_SUBSYSTEM_MASK;
+
+/**
+ * 'true' if kernel should handle and context switch on timer ticks.
+ * Pass the ticks parameter on the kernel command line if you
+ * want to change this.
+ */
+bool kernel_ticks_enabled = true;
+
+/**
+ * The current time since kernel start in timeslices.
+ */
+size_t kernel_now = 0;
+
+/**
+ * \brief Print a message and halt the kernel.
+ *
+ * Something irrecoverably bad happened. Print a panic message, then halt.
+ */
+void panic(const char *msg, ...)
+{
+ va_list ap;
+ static char buf[256];
+
+ va_start(ap, msg);
+ vsnprintf(buf, sizeof(buf), msg, ap);
+ va_end(ap);
+
+ printf("kernel PANIC! %.*s\n", (int)sizeof(buf), buf);
+
+ halt();
+}
+
+/**
+ * \brief Log a kernel message.
+ *
+ * Logs printf()-style message 'msg', having loglevel 'level' to the default
+ * kernel console(s). Additional arguments are like printf(). Whether the
+ * message is put out depends on the current kernel log level.
+ *
+ * \param level Loglevel of message.
+ * \param msg The message (printf() format string)
+ */
+void printk(int level, const char *msg, ...)
+{
+ if(kernel_loglevel > level) {
+ va_list ap;
+ static char buf[256];
+
+ va_start(ap, msg);
+ vsnprintf(buf, sizeof(buf), msg, ap);
+ va_end(ap);
+
+ printf("kernel %.*s", (int)sizeof(buf), buf);
+ }
+}
+
+/**
+ * Helper function used in the implementation of assert()
+ */
+#ifdef CONFIG_NEWLIB
+void __assert_func(const char *file, int line, const char *func, const char *exp)
+#else
+void __assert(const char *exp, const char *file, const char *func, int line)
+#endif
+{
+ panic("kernel assertion \"%s\" failed at %s:%d", exp, file, line);
+}
--- /dev/null
+/*
+ * Copyright (c) 2007, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef __ARM_H
+#define __ARM_H
+
+#include <barrelfish_kpi/types.h>
+#include <barrelfish_kpi/cpu.h>
+
+#endif //__ARM_H
--- /dev/null
+/**
+ * \file
+ * \brief A struct for all shared data between the kernels
+ */
+
+/*
+ * Copyright (c) 2008, 2010 ETH Zurich
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+
+#ifndef KERNEL_ARCH_AARCH64_GLOBAL_H
+#define KERNEL_ARCH_AARCH64_GLOBAL_H
+
+#include <barrelfish_kpi/spinlocks_arch.h>
+#include <barrelfish_kpi/types.h>
+
+/**
+ * \brief Struct passed to app_cores during boot.
+ * Contains information that the bsp_kernel wants to pass to the app_kernels.
+ */
+struct global {
+ /// Shared locks between the kernels
+ struct {
+ spinlock_t print; ///< Lock for printing
+ } locks;
+
+ uint32_t tickspersec;
+
+ genpaddr_t notify[MAX_COREID];
+};
+
+extern struct global *global;
+
+// XXX: check this
+#define GLOBAL_VBASE 0x21000
+
+#endif
--- /dev/null
+/**
+ * \file
+ * \brief Header for ARMv5-specific GDB stub code.
+ */
+
+/*
+ * Copyright (c) 2007, 2008, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#include <aarch64.h>
+
+extern uintptr_t *gdb_arch_registers;
+
+/** Address of saved registers as void * */
+#define GDB_ARCH_REGADDR ((void*)gdb_arch_registers)
+
+/** Number of bytes saved in GDB frame */
+#define GDB_ARCH_REGBYTES (sizeof(uintptr_t) * ARCH_NUMREGS)
--- /dev/null
+/**
+ * \file
+ * \brief Hardware Abstraction Layer interface for ARM boards.
+ *
+ * This file defines the hardware abstraction layer for ARM targets. Each
+ * board is expected to have an implementation that corresponds to this
+ * interface.
+ *
+ * This interface is expected to change as new boards are added.
+ */
+
+/*
+ * Copyright (c) 2007, 2009, 2012 ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef __ARM_HAL_H__
+#define __ARM_HAL_H__
+
+// TODO: check all of these
+
+#include <barrelfish_kpi/types.h>
+
+/**
+ * @return Unique 32-bit identifier associated with current board.
+ */
+uint32_t hal_get_board_id(void);
+
+/**
+ * @return Current processor ordinal. Value has range 0 to n_cpus - 1.
+ */
+uint8_t hal_get_cpu_id(void);
+
+/**
+ * @return true if current processor is bootstrap processor.
+ */
+bool hal_cpu_is_bsp(void);
+
+/*
+ * generic interrupt controller functionality
+ */
+void gic_init(void);
+void gic_distributor_init(void);
+void gic_cpu_interface_init(void);
+void gic_cpu_interface_enable(void);
+void gic_cpu_interface_disable(void);
+void gic_enable_interrupt(uint32_t int_id, uint8_t cpu_targets, uint16_t prio,
+ bool edge_triggered, bool one_to_n);
+void gic_disable_all_irqs(void);
+uint32_t gic_get_active_irq(void);
+void gic_ack_irq(uint32_t irq);
+void gic_raise_softirq(uint8_t cpumask, uint8_t irq);
+
+/*
+ * Timer
+ */
+void pit_init(uint32_t tick_hz, uint8_t pit_id);
+void pit_start(uint8_t pit_id);
+bool pit_handle_irq(uint32_t irq);
+void pit_mask_irq(bool masked, uint8_t pit_id);
+
+/*
+ * Time-stamp counter
+ */
+void tsc_init(void);
+uint32_t tsc_read(void);
+uint32_t tsc_get_hz(void);
+
+/*
+ * system control unit
+ * only for multi-core
+ */
+void scu_initialize(void);
+void scu_enable(void);
+int scu_get_core_count(void);
+
+void write_sysflags_reg(uint32_t regval);
+
+/* [2009-11-17 orion] TODO: device enumeration */
+
+#endif // __ARM_HAL_H__
--- /dev/null
+/*
+ * Copyright (c) 2009 ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef __CP15_H__
+#define __CP15_H__
+
+/**
+ * \brief Read domain access control register
+ */
+static inline uint32_t cp15_read_dacr(void)
+{
+ panic("NYI");
+}
+
+/**
+ * \brief Read instruction fault status register.
+ */
+static inline uint32_t cp15_read_ifsr(void)
+{
+ panic("NYI");
+}
+
+/**
+ * \brief Read data fault status register.
+ */
+static inline uint32_t cp15_read_dfsr(void)
+{
+ panic("NYI");
+}
+
+/**
+ * \brief Read fault address register.
+ */
+static inline uint32_t cp15_read_far(void)
+{
+ panic("NYI");
+}
+
+static inline lpaddr_t cp15_read_ttbr0(void)
+{
+ panic("NYI");
+}
+
+static inline lpaddr_t cp15_read_ttbr1(void)
+{
+ panic("NYI");
+}
+
+static inline void cp15_write_ttbr0(lpaddr_t ttbr)
+{
+ panic("NYI");
+}
+
+static inline void cp15_write_ttbr1(lpaddr_t ttbr)
+{
+ panic("NYI");
+}
+
+static inline uint32_t cp15_read_ttbcr(void)
+{
+ panic("NYI");
+}
+
+static inline void cp15_write_ttbcr(uint32_t ttbcr)
+{
+ panic("NYI");
+}
+
+extern void cp15_invalidate_d_cache(void);
+extern void cp15_invalidate_i_and_d_caches(void);
+extern void cp15_invalidate_i_and_d_caches_fast(void);
+extern void cp15_invalidate_tlb_fn(void);
+extern void cp15_enable_mmu(void);
+extern void cp15_enable_alignment(void);
+
+static inline uint32_t cp15_read_cache_status(void){
+ panic("NYI");
+}
+
+
+static inline void cp15_disable_cache(void){
+
+ cp15_invalidate_i_and_d_caches_fast();
+
+ panic("NYI");
+
+ printf("WARNING! Caching has been disabled, configuration is: %"PRIx32"\n", cp15_read_cache_status());
+
+}
+
+static inline void cp15_invalidate_tlb(void)
+{
+ panic("NYI");
+}
+
+static inline uint8_t cp15_get_cpu_id(void) {
+ panic("NYI");
+}
+
+/*
+ * Get the configuration base address
+ * This is described in the Cortex A9 TRM, 4.2.32
+ */
+static inline uint32_t cp15_read_cbar(void)
+{
+ panic("NYI");
+}
+
+#endif // __CP15_H__
--- /dev/null
+/**
+ * \file
+ * \brief ARM architecture initialization
+ */
+
+/*
+ * Copyright (c) 2007, 2008, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef INIT_H
+#define INIT_H
+
+#ifndef __ASSEMBLER__
+
+void arch_init(void *pointer)
+//void arch_init(uint32_t board_id, struct atag *atag_paddr,
+// lvaddr_t ttbase,
+// lvaddr_t phys_alloc_top)
+ __attribute__((noreturn));
+
+//struct phys_mmap;
+void arm_kernel_startup(void)
+ __attribute__((noreturn));
+
+#endif // __ASSEMBLER__
+
+#endif // INIT_H
--- /dev/null
+/*
+ * Copyright (c) 2010, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef KERNEL_ARCH_ARM_IRQ_H
+#define KERNEL_ARCH_ARM_IRQ_H
+
+/*
+ * TODO: Fix for Cortex-A57 / GIC400
+ * Interrupt controller (Cortex-A9 MPU INTC) with up to 128 interrupt requests
+ */
+#define NUM_INTR (128+32)
+
+/// Size of hardware IRQ dispatch table == #NIDT - #NEXCEPTIONS exceptions
+#define NDISPATCH (NUM_INTR)
+
+struct capability;
+struct idc_recv_msg;
+//struct sysret irq_table_set(struct capability *to, struct idc_recv_msg *msg);
+//struct sysret irq_table_delete(struct capability *to, struct idc_recv_msg *msg);
+errval_t irq_table_set(unsigned int nidt, capaddr_t endpoint);
+errval_t irq_table_delete(unsigned int nidt);
+struct kcb;
+errval_t irq_table_notify_domains(struct kcb *kcb);
+void send_user_interrupt(int irq);
+
+#endif // KERNEL_ARCH_ARM_IRQ_H
--- /dev/null
+/**
+ * \file
+ * \brief A struct for all shared data between the kernels
+ */
+
+/*
+ * Copyright (c) 2008, 2010 ETH Zurich
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef KERNEL_ARCH_ARM_KPUTCHAR_H
+#define KERNEL_ARCH_ARM_KPUTCHAR_H
+
+#include <serial.h>
+
+void kprintf_begin(void);
+int kputchar(int c);
+void kprintf_end(void);
+
+#endif // KERNEL_ARCH_ARM_KPUTCHAR_H
--- /dev/null
+/* [2009-07-30 ohodson] TODO: implement! */
+
+/**
+ * \file
+ * \brief Miscellaneous architecture-specific functions
+ */
+
+/*
+ * Copyright (c) 2008, 2009, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef ARCH_MISC_H
+#define ARCH_MISC_H
+
+//
+// Helpers for pasting #defined values into inline assembler.
+//
+#define STR(x) #x
+#define XTR(x) STR(x)
+
+/**
+ * \brief Set thread-local-storage register.
+ */
+static inline void arch_set_thread_register(uintptr_t value)
+{
+ __asm (
+ "mov "XTR(THREAD_REGISTER)", %[value]" :: [value] "r" (value)
+ );
+}
+
+static inline uintptr_t arch_get_thread_register(void)
+{
+ uintptr_t result;
+ __asm (
+ "mov %[result]," XTR(THREAD_REGISTER) : [result] "=r" (result)
+ );
+ return result;
+}
+
+#endif /* ARCH_MISC_H */
--- /dev/null
+/**
+ * \file
+ * \brief AArch64 address space sizes and offsets
+ *
+ * The layout of the ARM virtual address space can be summarized as
+ * follows:
+ *
+ *
+ * User-space maps user-space programs. Physical memory maps all
+ * available physical memory (up to PADDR_SPACE_LIMIT). Kernel-space
+ * maps only the kernel text and data.
+ *
+ * This partition is static and can only be changed at compile-time.
+ *
+ */
+
+/* [2009-07-30 ohodson]TODO: This is a first-cut, layout likely
+ * does not make sense.
+ */
+
+/*
+ * Copyright (c) 2007, 2008, 2009, 2012, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef OFFSETS_H
+#define OFFSETS_H
+
+#define GEN_ADDR(bits) (((genpaddr_t)1) << bits)
+
+/**
+ * Absolute size of virtual address space. This is 48-bit on AArch64.
+ * TODO: might be implementation-specific
+ */
+#define VADDR_SPACE_SIZE GEN_ADDR(48);
+
+/**
+ * Absolute size of physical address space.
+ * Depends on value in ID_AA64MMFR0_EL1 (ARMv8-A TRM, D4-1733)
+ * current options are 4G, 64G, 1T, 4T, 16T, 256T
+ * set to 256T for now
+ */
+#define PADDR_SPACE_SIZE GEN_ADDR(48)
+
+/**
+ * Start address of kernel image in physical memory. This is passed to
+ * the linker also. This address is chosen to be the same as Linux on ARM
+ * for GEM5 and/or bootloader compatibility.
+ *
+ * Entry point is 0x11000.
+ *
+ * TODO: do we need this?
+ */
+//#define START_KERNEL_PHYS (0x10000 + 0x1000)
+#define START_KERNEL_PHYS 0x100000
+
+/**
+ * Physical address of the kernel stack at boot time.
+ */
+#define BOOT_STACK_PHYS 0x10000
+
+/**
+ * Kernel offset - virtual base of kernel.
+ */
+#define KERNEL_OFFSET 0xc0000000
+
+/**
+ * Maximum physical address space mappable by the kernel. Adjust this
+ * for a bigger physical address space.
+ */
+#define PADDR_SPACE_LIMIT 0xFFFFFFFF
+
+/**
+ * Kernel address space limit is 1 MB currently.
+ */
+#define KERNEL_SPACE_LIMIT (1L << 20)
+
+/**
+ * Static address space limit for the init user-space domain. The
+ * static space is used to map in code and static data of the init
+ * module, as well as all loaded multiboot modules. init can freely
+ * allocate dynamic memory as soon as it is running. This is 32 MBytes
+ * right now.
+ *
+ * You should make this constant a multiple of #BASE_PAGE_SIZE *
+ * #PTABLE_SIZE or you'll restrict init's static address space
+ * unneccessarily. init's lowest segment should also be based at these
+ * multiples or it restricts itself.
+ *
+ *
+ * NB 32MB is size of the fast context switch extension
+ * per-process address space.
+ */
+#define INIT_SPACE_LIMIT (32 * 1024 * 1024)
+
+/**
+ * Base address of init address space in virtual memory. init should
+ * start at 4 MByte. The kernel maps in important structures at 2
+ * MByte. This address should be page-table size aligned (i.e. with 4
+ * KByte pages, a page table maps 2 MBytes. Thus, align it to
+ * multiples of 2 MBytes).
+ */
+#define INIT_VBASE (2 * 1024 * 1024)
+
+/**
+ * Initial amount of physical memory to map during bootup. The low
+ * 1MByte of memory is always expected to be there and has to be
+ * specified here at minimum. If you need more during bootup, increase
+ * this value. This value is also the amount of memory you _expect_ to
+ * be in the system during bootup, or the kernel will crash!
+ */
+#define KERNEL_INIT_MEMORY (1 * 1024 * 1024)
+
+/**
+ * Absolute offset of mapped physical memory within virtual address
+ * space.
+ *
+ * 2GB.
+ */
+#define MEMORY_OFFSET GEN_ADDR(31)
+// 2G (2 ** 31)
+
+/**
+ * Absolute start of RAM in physical memory.
+ */
+#define PHYS_MEMORY_START 0x0
+
+/*
+ * Device offset to map devices in high memory.
+ */
+#define DEVICE_OFFSET 0xff000000
+
+/**
+ * Kernel stack size -- 16KB
+ */
+#define KERNEL_STACK_SIZE 0x4000
+
+/**
+ * The size of the whole kernel image.
+ */
+#define KERNEL_IMAGE_SIZE (size_t)(&kernel_final_byte - &kernel_first_byte)
+
+/*
+ * Bytes per kernel copy for each core (1 Section)
+ */
+#define KERNEL_SECTION_SIZE 0x100000
+// 1MB, (2 ** 20)
+
+#define KERNEL_STACK_ADDR (lpaddr_t)kernel_stack
+
+#ifndef __ASSEMBLER__
+
+static inline lvaddr_t local_phys_to_mem(lpaddr_t addr)
+{
+ // On the PandaBoard, this is a nop, because the physical memory is mapped
+ // at the same address in virtual memory
+ // i.e., MEMORY_OFFSET == PHYS_MEMORY_START
+ if(PADDR_SPACE_LIMIT - PHYS_MEMORY_START > 0) {
+ assert(addr < PHYS_MEMORY_START + PADDR_SPACE_LIMIT);
+ }
+ return (lvaddr_t)(addr + ((lpaddr_t)MEMORY_OFFSET - (lpaddr_t)PHYS_MEMORY_START));
+}
+
+static inline lpaddr_t mem_to_local_phys(lvaddr_t addr)
+{
+ assert(addr >= MEMORY_OFFSET);
+ return (lpaddr_t)(addr - ((lvaddr_t)MEMORY_OFFSET - (lvaddr_t)PHYS_MEMORY_START));
+}
+
+static inline lpaddr_t gen_phys_to_local_phys(genpaddr_t addr)
+{
+ //assert(addr < PADDR_SPACE_SIZE);
+ return (lpaddr_t)addr;
+}
+
+static inline genpaddr_t local_phys_to_gen_phys(lpaddr_t addr)
+{
+ return (genpaddr_t)addr;
+}
+
+/**
+ * Symbol: Start of kernel image. This symbol points to the start
+ * address of the kernel image.
+ */
+extern uint8_t kernel_first_byte;
+
+/**
+ * Symbol: End of kernel image. This symbol points to the end address
+ * of the kernel image.
+ */
+extern uint8_t kernel_text_final_byte;
+
+/**
+ * Symbol: End of kernel image. This symbol points to the end address
+ * of the kernel image.
+ */
+extern uint8_t kernel_final_byte;
+
+extern uint8_t kernel_elf_header;
+
+/**
+ * \brief The kernel stack.
+ *
+ * Declared in boot.S.
+ */
+extern uintptr_t kernel_stack[KERNEL_STACK_SIZE/sizeof(uintptr_t)];
+
+#endif // __ASSEMBLER__
+
+/**
+ * Kernel interrupt jump table
+ */
+#define INT_HANDLER_TABLE 0xFFFF0100
+
+
+
+
+#endif // OFFSETS_H
--- /dev/null
+/**
+ * \file
+ * \brief ARM kernel page-table structures.
+ */
+
+/*
+ * Copyright (c) 2007, 2008, 2009, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef KERNEL_ARCH_ARM_PAGING_H
+#define KERNEL_ARCH_ARM_PAGING_H
+
+// XXX: Not sure if these includes are required
+#include <capabilities.h>
+#include <barrelfish_kpi/cpu.h>
+#include <barrelfish_kpi/paging_arch.h>
+#include <cp15.h>
+
+/**
+ * Setup bootstrap page table with direct and relocated mappings for kernel.
+ *
+ * This function does not enable paging.
+ *
+ * @param initial_base
+ * @param initial_size
+ */
+void paging_map_kernel(uintptr_t initial_base, size_t initial_size);
+
+lvaddr_t paging_map_device(lpaddr_t base, size_t size);
+
+
+/**
+ * Maps a device to a l2 page.
+ * Assumption: corresponding L1 entry already set
+ *
+ */
+
+void paging_map_device_page(uintptr_t l1_table,
+ lvaddr_t device_vbase,
+ lpaddr_t device_pbase,
+ size_t device_bytes);
+
+/**
+ * Add kernel mappings to newly constructed page table.
+ *
+ * @param new_table_addr address of newly constructed page table.
+ * @param new_table_bytes size of newly constructed page table.
+ */
+void paging_make_good(lvaddr_t new_table_addr, size_t new_table_bytes);
+
+void paging_map_user_pages_l1(lvaddr_t table_addr, lvaddr_t vaddr, lpaddr_t paddr);
+
+void paging_set_l2_entry(uintptr_t* l2entry, lpaddr_t paddr, uintptr_t flags);
+
+void paging_context_switch(lpaddr_t table_addr);
+
+void paging_arm_reset(lpaddr_t paddr, size_t bytes);
+
+
+// REVIEW: [2010-05-04 orion]
+// these were deprecated in churn, enabling now to get system running again.
+
+void paging_map_kernel_section(uintptr_t ttbase,lvaddr_t vbase, lpaddr_t pbase);
+void paging_map_memory(uintptr_t ttbase, lpaddr_t paddr, size_t bytes);
+
+static inline bool is_root_pt(enum objtype type) {
+ return type == ObjType_VNode_ARM_l1;
+}
+
+static inline size_t get_pte_size(void) {
+ // both l1_entry and l2_entry are 4 bytes
+ return 4;
+}
+
+static inline void do_one_tlb_flush(genvaddr_t vaddr)
+{
+ // TODO: figure out selective flushing for ARM
+ cp15_invalidate_tlb();
+}
+
+static inline void do_selective_tlb_flush(genvaddr_t vaddr, genvaddr_t vend)
+{
+ // TODO: figure out selective flushing for ARM
+ cp15_invalidate_tlb();
+}
+
+static inline void do_full_tlb_flush(void)
+{
+ cp15_invalidate_tlb();
+}
+
+
+#endif // KERNEL_ARCH_ARM_PAGING_H
case ObjType_VNode_ARM_l2:
page_size = BASE_PAGE_SIZE;
break;
+#elif defined(__ARM_ARCH_8A__)
+ // TODO: define ARMv8 paging
#else
#error setup page sizes for arch
#endif
default:
+ panic("cannot find page size for cap type: %d\n", leaf_pt->cap);
break;
}
assert(page_size);
--- /dev/null
+/**
+ * \file
+ * \brief architecture-specific registers code
+ */
+
+/*
+ * Copyright (c) 2015, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef ARCH_AARCH64_BARRELFISH_REGISTERS_H
+#define ARCH_AARCH64_BARRELFISH_REGISTERS_H
+
+#include <barrelfish/curdispatcher_arch.h> // XXX For curdispatcher()
+#include "threads_priv.h"
+
+static inline uint64_t
+curgotbase(void)
+{
+ uint32_t ret;
+ __asm (
+ "mov %[ret], r10" : [ret] "=r" (ret)
+ );
+ return ret;
+
+}
+
+static inline void
+registers_set_initial(arch_registers_state_t *regs, struct thread *thread,
+ lvaddr_t entry, lvaddr_t stack, uint32_t arg1,
+ uint32_t arg2, uint32_t arg3, uint32_t arg4)
+{
+ regs->named.cpsr = ARM_MODE_USR | CPSR_F_MASK;
+ regs->named.r0 = arg1;
+ regs->named.r1 = arg2;
+ regs->named.r2 = arg3;
+ regs->named.r3 = arg4;
+ regs->named.stack = stack;
+ regs->named.rtls = (uintptr_t)curdispatcher(); // XXX API bug means this must be run same-core
+ regs->named.r10 = (uintptr_t)curgotbase();
+ regs->named.pc = entry;
+}
+
+#endif // ARCH_AARCH64_BARRELFISH_REGISTERS_H
--- /dev/null
+/**
+ * \file
+ * \brief Threads architecture-specific code
+ */
+
+/*
+ * Copyright (c) 2009, ETH Zurich
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef LIBBARRELFISH_ARCH_THREADS_H
+#define LIBBARRELFISH_ARCH_THREADS_H
+
+/* this is a label defined in the assembler code that implements cap_invoke() */
+extern void barrelfish_cap_invoke_post_syscall_instr(void);
+extern void barrelfish_lrpc_post_syscall_instr(void);
+
+/**
+ * Returns true iff the thread with the given save area has successfully
+ * performed a syscall. Used for the thread_invoke_cap_and_exit() hack.
+ */
+static inline bool thread_check_syscall_succeeded(uintptr_t *save_area)
+{
+ assert(!"thread_check_syscall_succeeded: called");
+ abort();
+#if 0
+ return ((save_area[RIP_REG] == (vaddr_t)barrelfish_cap_invoke_post_syscall_instr
+ || save_area[RIP_REG] == (vaddr_t)barrelfish_lrpc_post_syscall_instr)
+ && save_area[RAX_REG] == 0);
+#endif
+}
+
+#endif // LIBBARRELFISH_ARCH_THREADS_H
--- /dev/null
+/**
+ * \file
+ * \brief libc startup code.
+ */
+
+/*
+ * Copyright (c) 2007, 2008, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#include <asmoffsets.h>
+ .syntax unified
+ .text
+ .globl _start, _start_init
+
+_start:
+ // Entry for processes other than init
+ // Prepare arg1 init_dom_arg (false)
+ mov r0, r9
+ mov r1, #0
+
+_start_generic:
+ ldr sp, =crt0_temp_stack
+ // Call barrelfish_init_disabled(struct dispatcher* d, bool init_dom_arg)
+ b barrelfish_init_disabled
+
+_start_init:
+ // Entry for the init process
+ // Prepare arg1 init_dom_arg (true)
+ mov r0, r9
+ mov r1, #1
+ b _start_generic
+
+ // TODO: Remove. These are temporary kludges. It looks as though
+ // We need to build own libgcc to avoid these.
+ .globl __aeabi_unwind_cpp_pr0
+ .globl __aeabi_unwind_cpp_pr1
+ .globl raise
+
+raise:
+__aeabi_unwind_cpp_pr0:
+__aeabi_unwind_cpp_pr1:
+ bl abort
+
+.section ".bss"
+crt0_temp_stack_base:
+ .space 8192
+crt0_temp_stack:
ASSERT(sizeof(struct dispatcher_x86_32) <= (1 << DISPATCHER_FRAME_BITS));
#elif defined __arm__
ASSERT(sizeof(struct dispatcher_arm) <= (1 << DISPATCHER_FRAME_BITS));
+#elif defined __aarch64__
+ ASSERT(sizeof(struct dispatcher_aarch64) <= (1 << DISPATCHER_FRAME_BITS));
#else
#error "Define architecture"
#endif
lrpc_words = 0
}
+aarch64 = Arch {
+ archname = "aarch64",
+ wordsize = 64,
+ ptrsize = 64,
+ sizesize = 64,
+ enum_type = Int32,
+ lmp_words = 10, -- XXX: TODO: check this
+ lrpc_words = 0
+}
+
-- settings for the xeon phi. TODO: Verify.
k1om = Arch {
archname = "k1om",
lrpc_words = 4
}
-all_archs = [x86_64, x86_32, arm, k1om]
+all_archs = [x86_64, x86_32, arm, aarch64, k1om]
-- for option parsing: find the matching arch info
parse_arch :: String -> Maybe Arch
#elif defined(__ARM_ARCH_7M__)
# define MONITOR_NAME BF_BINARY_PREFIX "armv7-m/sbin/monitor"
# define MEM_SERV_NAME BF_BINARY_PREFIX "armv7-m/sbin/mem_serv"
-#elif defined(__arm__)
+#elif defined(__ARM_ARCH_5__)
# define MONITOR_NAME BF_BINARY_PREFIX "armv5/sbin/monitor"
# define MEM_SERV_NAME BF_BINARY_PREFIX "armv5/sbin/mem_serv"
+#elif defined(__ARM_ARCH_8A__)
+# define MONITOR_NAME BF_BINARY_PREFIX "armv8/sbin/monitor"
+# define MEM_SERV_NAME BF_BINARY_PREFIX "armv8/sbin/mem_serv"
#else
# error Unknown architecture
#endif
#elif defined(__arm__)
/* XXX This is better if < 32! - but there were no compile time warnings! */
# define MAXSIZEBITS 31
+#elif defined(__aarch64__)
+/* XXX what's the right value here? */
+# define MAXSIZEBITS 48
#else
# error Unknown architecture
#endif
#elif defined(__arm__)
/* XXX This is better if < 32! - but there were no compile time warnings! */
# define MAXSIZEBITS 31
+#elif defined(__aarch64__)
+# define MAXSIZEBITS 48 // XXX: what's the right value here?
#else
# error Unknown architecture
#endif
--- /dev/null
+/**
+ * \file
+ * \brief Capability invocations specific to the monitors
+ */
+
+/*
+ * Copyright (c) 2007, 2008, 2009, 2010, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef MONITOR_INVOCATIONS_ARCH_H
+#define MONITOR_INVOCATIONS_ARCH_H
+
+#include <barrelfish/syscall_arch.h>
+#include <barrelfish/caddr.h>
+#include <barrelfish/invocations_arch.h>
+#include <barrelfish_kpi/cpu.h>
+#include <barrelfish_kpi/syscalls.h>
+#include "monitor_debug.h"
+
+static inline errval_t
+invoke_monitor_identify_cap(capaddr_t cap, int bits, struct capability *out)
+{
+ DEBUG_INVOCATION("%s: called from %p\n", __FUNCTION__, __builtin_return_address(0));
+ uint8_t invoke_bits = get_cap_valid_bits(cap_kernel);
+ capaddr_t invoke_cptr = get_cap_addr(cap_kernel) >> (CPTR_BITS - invoke_bits);
+
+ return syscall5((invoke_bits << 16) | (KernelCmd_Identify_cap << 8)
+ | SYSCALL_INVOKE, invoke_cptr, cap, bits,
+ (uintptr_t)out).error;
+}
+
+static inline errval_t
+invoke_monitor_identify_domains_cap(capaddr_t root_cap, int root_bits,
+ capaddr_t cap, int bits,
+ struct capability *out)
+{
+ DEBUG_INVOCATION("%s: called from %p\n", __FUNCTION__, __builtin_return_address(0));
+ uint8_t invoke_bits = get_cap_valid_bits(cap_kernel);
+ capaddr_t invoke_cptr = get_cap_addr(cap_kernel) >> (CPTR_BITS - invoke_bits);
+
+ return syscall7((invoke_bits << 16) | (KernelCmd_Identify_domains_cap << 8)
+ | SYSCALL_INVOKE, invoke_cptr, root_cap, root_bits,
+ cap, bits, (uintptr_t)out).error;
+}
+
+static inline errval_t
+invoke_monitor_nullify_cap(capaddr_t cap, int bits)
+{
+ DEBUG_INVOCATION("%s: called from %p\n", __FUNCTION__, __builtin_return_address(0));
+ uint8_t invoke_bits = get_cap_valid_bits(cap_kernel);
+ capaddr_t invoke_cptr = get_cap_addr(cap_kernel) >> (CPTR_BITS - invoke_bits);
+
+ return syscall4((invoke_bits << 16) | (KernelCmd_Nullify_cap << 8)
+ | SYSCALL_INVOKE, invoke_cptr, cap, bits).error;
+}
+
+static inline errval_t
+invoke_monitor_create_cap(uint64_t *raw, capaddr_t caddr, int bits, capaddr_t slot, coreid_t owner)
+{
+ DEBUG_INVOCATION("%s: called from %p\n", __FUNCTION__, __builtin_return_address(0));
+ return cap_invoke6(cap_kernel, KernelCmd_Create_cap, caddr, bits, slot,
+ owner, (uintptr_t)raw).error;
+}
+
+static inline errval_t
+invoke_monitor_register(struct capref ep)
+{
+ DEBUG_INVOCATION("%s: called from %p\n", __FUNCTION__, __builtin_return_address(0));
+ uint8_t invoke_bits = get_cap_valid_bits(cap_kernel);
+ capaddr_t invoke_cptr = get_cap_addr(cap_kernel) >> (CPTR_BITS - invoke_bits);
+
+ return syscall3((invoke_bits << 16) | (KernelCmd_Register << 8)
+ | SYSCALL_INVOKE, invoke_cptr, get_cap_addr(ep)).error;
+}
+
+static inline errval_t
+invoke_monitor_remote_cap_retype(capaddr_t rootcap_addr, uint8_t rootcap_vbits,
+ capaddr_t src, enum objtype newtype,
+ int objbits, capaddr_t to, capaddr_t slot,
+ int bits)
+{
+ DEBUG_INVOCATION("%s: called from %p\n", __FUNCTION__, __builtin_return_address(0));
+ return cap_invoke7(cap_kernel, KernelCmd_Retype,
+ src, (newtype << 16) | (objbits << 8) | bits, to, slot,
+ rootcap_addr, rootcap_vbits).error;
+}
+
+static inline errval_t
+invoke_monitor_get_cap_owner(capaddr_t root, int rbits, capaddr_t cap, int cbits, coreid_t *ret_owner)
+{
+ DEBUG_INVOCATION("%s: called from %p\n", __FUNCTION__, __builtin_return_address(0));
+ struct sysret sysret = cap_invoke5(cap_kernel, KernelCmd_Get_cap_owner,
+ root, rbits, cap, cbits);
+ if (err_is_ok(sysret.error)) {
+ *ret_owner = sysret.value;
+ }
+ return sysret.error;
+}
+
+static inline errval_t
+invoke_monitor_set_cap_owner(capaddr_t root, int rbits, capaddr_t cap, int cbits, coreid_t owner)
+{
+ DEBUG_INVOCATION("%s: called from %p\n", __FUNCTION__, __builtin_return_address(0));
+ return cap_invoke6(cap_kernel, KernelCmd_Set_cap_owner, root, rbits, cap, cbits, owner).error;
+}
+
+
+static inline errval_t
+invoke_monitor_remote_relations(capaddr_t root_cap, int root_bits,
+ capaddr_t cap, int bits,
+ uint8_t relations, uint8_t mask,
+ uint8_t *ret_remote_relations)
+{
+ DEBUG_INVOCATION("%s: called from %p\n", __FUNCTION__, __builtin_return_address(0));
+ struct sysret r = cap_invoke6(cap_kernel, KernelCmd_Remote_relations,
+ root_cap, root_bits, cap, bits,
+ ((uint16_t)relations) | (((uint16_t)mask)<<8));
+ if (err_is_ok(r.error) && ret_remote_relations) {
+ *ret_remote_relations = r.value;
+ }
+ return r.error;
+}
+
+static inline errval_t
+invoke_monitor_cap_has_relations(capaddr_t caddr, uint8_t bits, uint8_t mask, uint8_t *res)
+{
+ DEBUG_INVOCATION("%s: called from %p\n", __FUNCTION__, __builtin_return_address(0));
+ assert(res);
+ struct sysret ret = cap_invoke4(cap_kernel, KernelCmd_Cap_has_relations,
+ caddr, bits, mask);
+ if (err_is_ok(ret.error)) {
+ *res = ret.value;
+ }
+ return ret.error;
+}
+
+
+static inline errval_t
+invoke_monitor_lock_cap(capaddr_t root, int rbits, capaddr_t cap, int cbits)
+{
+ DEBUG_INVOCATION("%s: called from %p\n", __FUNCTION__, __builtin_return_address(0));
+ return cap_invoke5(cap_kernel, KernelCmd_Lock_cap, root, rbits, cap, cbits).error;
+}
+
+static inline errval_t
+invoke_monitor_unlock_cap(capaddr_t root, int rbits, capaddr_t cap, int cbits)
+{
+ DEBUG_INVOCATION("%s: called from %p\n", __FUNCTION__, __builtin_return_address(0));
+ return cap_invoke5(cap_kernel, KernelCmd_Unlock_cap, root, rbits, cap, cbits).error;
+}
+
+static inline errval_t
+invoke_monitor_delete_last(capaddr_t root, int rbits, capaddr_t cap, int cbits,
+ capaddr_t retcn, int retcnbits, cslot_t retslot)
+{
+ DEBUG_INVOCATION("%s: called from %p\n", __FUNCTION__, __builtin_return_address(0));
+ assert(rbits <= 0xff);
+ assert(cbits <= 0xff);
+ assert(retcnbits <= 0xff);
+
+ return cap_invoke6(cap_kernel, KernelCmd_Delete_last, root, cap,
+ retcn, retslot, ((cbits<<16)|(rbits<<8)|retcnbits)).error;
+}
+
+static inline errval_t
+invoke_monitor_delete_foreigns(capaddr_t cap, int bits)
+{
+ DEBUG_INVOCATION("%s: called from %p\n", __FUNCTION__, __builtin_return_address(0));
+ return cap_invoke3(cap_kernel, KernelCmd_Delete_foreigns, cap, bits).error;
+}
+
+static inline errval_t
+invoke_monitor_revoke_mark_target(capaddr_t root, int rbits,
+ capaddr_t cap, int cbits)
+{
+ DEBUG_INVOCATION("%s: called from %p\n", __FUNCTION__, __builtin_return_address(0));
+ return cap_invoke5(cap_kernel, KernelCmd_Revoke_mark_target,
+ root, rbits, cap, cbits).error;
+}
+
+static inline errval_t
+invoke_monitor_revoke_mark_relations(uint64_t *raw_base)
+{
+ DEBUG_INVOCATION("%s: called from %p\n", __FUNCTION__, __builtin_return_address(0));
+ // XXX: this is assumed in client code of this function!
+ assert(sizeof(struct capability) / sizeof(uint64_t) <= 4);
+ return cap_invoke2(cap_kernel, KernelCmd_Revoke_mark_relations,
+ (uintptr_t)raw_base).error;
+}
+
+static inline errval_t
+invoke_monitor_delete_step(capaddr_t retcn, int retcnbits, cslot_t retslot)
+{
+ DEBUG_INVOCATION("%s: called from %p\n", __FUNCTION__, __builtin_return_address(0));
+ return cap_invoke4(cap_kernel, KernelCmd_Delete_step,
+ retcn, retcnbits, retslot).error;
+}
+
+static inline errval_t
+invoke_monitor_clear_step(capaddr_t retcn, int retcnbits, cslot_t retslot)
+{
+ DEBUG_INVOCATION("%s: called from %p\n", __FUNCTION__, __builtin_return_address(0));
+ return cap_invoke4(cap_kernel, KernelCmd_Clear_step,
+ retcn, retcnbits, retslot).error;
+}
+
+static inline errval_t
+invoke_monitor_has_descendants(uint64_t *raw, bool *res)
+{
+ DEBUG_INVOCATION("%s: called from %p\n", __FUNCTION__, __builtin_return_address(0));
+ // XXX: this is assumed in client code of this function!
+ assert(sizeof(struct capability) / sizeof(uint64_t) <= 4);
+
+ struct sysret sysret;
+ sysret = cap_invoke2(cap_kernel, KernelCmd_Has_descendants,
+ (uintptr_t)raw);
+ if (err_is_ok(sysret.error)) {
+ *res = sysret.value;
+ }
+ return sysret.error;
+}
+
+
+/**
+ * \brief Set up tracing in the kernel
+ *
+ */
+static inline errval_t
+invoke_trace_setup(struct capref cap)
+{
+ DEBUG_INVOCATION("%s: called from %p\n", __FUNCTION__, __builtin_return_address(0));
+ USER_PANIC("NYI");
+ return LIB_ERR_NOT_IMPLEMENTED;
+}
+
+static inline errval_t
+invoke_domain_id(struct capref cap, uint64_t domain_id)
+{
+ DEBUG_INVOCATION("%s: called from %p\n", __FUNCTION__, __builtin_return_address(0));
+ USER_PANIC("NYI");
+ return LIB_ERR_NOT_IMPLEMENTED;
+}
+
+static inline errval_t
+invoke_monitor_rck_register(struct capref kern_cap, struct capref ep,
+ int chanid)
+{
+ DEBUG_INVOCATION("%s: called from %p\n", __FUNCTION__, __builtin_return_address(0));
+ USER_PANIC("NYI");
+ return LIB_ERR_NOT_IMPLEMENTED;
+}
+
+static inline errval_t
+invoke_monitor_rck_delete(struct capref kern_cap, int chanid)
+{
+ DEBUG_INVOCATION("%s: called from %p\n", __FUNCTION__, __builtin_return_address(0));
+ USER_PANIC("NYI");
+ return LIB_ERR_NOT_IMPLEMENTED;
+}
+
+static inline errval_t invoke_monitor_sync_timer(uint64_t synctime)
+{
+ DEBUG_INVOCATION("%s: called from %p\n", __FUNCTION__, __builtin_return_address(0));
+ uint8_t invoke_bits = get_cap_valid_bits(cap_kernel);
+ capaddr_t invoke_cptr = get_cap_addr(cap_kernel) >> (CPTR_BITS - invoke_bits);
+
+ return syscall4((invoke_bits << 16) | (KernelCmd_Sync_timer << 8)
+ | SYSCALL_INVOKE, invoke_cptr, synctime >> 32,
+ synctime & 0xffffffff).error;
+}
+
+static inline errval_t
+invoke_monitor_get_arch_id(uintptr_t *arch_id)
+{
+ DEBUG_INVOCATION("%s: called from %p\n", __FUNCTION__, __builtin_return_address(0));
+ assert(arch_id != NULL);
+
+ struct sysret sysret = cap_invoke1(cap_kernel, KernelCmd_Get_arch_id);
+ if (err_is_ok(sysret.error)) {
+ *arch_id = sysret.value;
+ }
+ return sysret.error;
+}
+
+static inline errval_t
+invoke_monitor_ipi_register(struct capref ep, int chanid)
+{
+ DEBUG_INVOCATION("%s: called from %p\n", __FUNCTION__, __builtin_return_address(0));
+ uint8_t invoke_bits = get_cap_valid_bits(cap_kernel);
+ capaddr_t invoke_cptr = get_cap_addr(cap_kernel) >> (CPTR_BITS - invoke_bits);
+
+ return syscall4((invoke_bits << 16) | (KernelCmd_IPI_Register << 8)
+ | SYSCALL_INVOKE, invoke_cptr,
+ get_cap_addr(ep),
+ chanid).error;
+}
+
+static inline errval_t
+invoke_monitor_ipi_delete(int chanid)
+{
+ DEBUG_INVOCATION("%s: called from %p\n", __FUNCTION__, __builtin_return_address(0));
+ uint8_t invoke_bits = get_cap_valid_bits(cap_kernel);
+ capaddr_t invoke_cptr = get_cap_addr(cap_kernel) >> (CPTR_BITS - invoke_bits);
+
+ return syscall3((invoke_bits << 16) | (KernelCmd_IPI_Delete << 8)
+ | SYSCALL_INVOKE, invoke_cptr,
+ chanid).error;
+}
+
+static inline errval_t
+invoke_monitor_copy_existing(uint64_t *raw, capaddr_t cn_addr, int cn_bits, cslot_t slot)
+{
+ DEBUG_INVOCATION("%s: called from %p\n", __FUNCTION__, __builtin_return_address(0));
+ // XXX: this is assumed in client code of this function!
+ assert(sizeof(struct capability) <= 4*sizeof(uint64_t));
+
+ return cap_invoke5(cap_kernel, KernelCmd_Copy_existing,
+ cn_addr, cn_bits, slot, (uintptr_t)raw).error;
+}
+
+static inline errval_t
+invoke_monitor_add_kcb(uintptr_t kcb_base)
+{
+ assert(kcb_base);
+ return cap_invoke2(cap_kernel, KernelCmd_Add_kcb, kcb_base).error;
+}
+
+static inline errval_t
+invoke_monitor_remove_kcb(uintptr_t kcb_base)
+{
+ assert(kcb_base);
+ return cap_invoke2(cap_kernel, KernelCmd_Remove_kcb, kcb_base).error;
+}
+
+static inline errval_t
+invoke_monitor_suspend_kcb_scheduler(bool suspend)
+{
+ return cap_invoke2(cap_kernel, KernelCmd_Suspend_kcb_sched, suspend).error;
+}
+
+#endif