/*
* Copyright (c) 2009, 2010, 2012, ETH Zurich.
+ * Copyright (c) 2015, Hewlett Packard Enterprise Development LP.
* All rights reserved.
*
* This file is distributed under the terms in the attached LICENSE file.
* If you do not find this file, copies can be found by writing to:
- * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
*/
/**
/** We can define some constants using the "define" construct **/
-/* XXX: these must match the corresponding OBJBITS definitions in barrelfish_kpi/capabilities.h */
+/* XXX: these must match the corresponding OBJBITS definitions in
+ * barrelfish_kpi/capabilities.h */
/* Size of CNode entry: */
define cte_size 7;
size_bits { vnode_size };
};
+/* ARM AArch64-specific capabilities: */
+
+cap VNode_AARCH64_l1 from RAM {
+ /* L1 Page Table */
+ address genpaddr base; /* Base address of VNode */
+ size_bits { vnode_size };
+};
+
+cap VNode_AARCH64_l2 from RAM {
+ /* L2 Page Table */
+ address genpaddr base; /* Base address of VNode */
+ size_bits { vnode_size };
+};
+
+cap VNode_AARCH64_l3 from RAM {
+ /* L3 Page Table */
+ address genpaddr base; /* Base address of VNode */
+ size_bits { vnode_size };
+};
+
+
/** IRQTable and IO are slightly different **/
cap IRQTable is_always_copy {
--------------------------------------------------------------------------
--- Copyright (c) 2007-2010, ETH Zurich.
+-- Copyright (c) 2015, ETH Zurich.
-- All rights reserved.
--
-- This file is distributed under the terms in the attached LICENSE file.
-- If you do not find this file, copies can be found by writing to:
--- ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+-- ETH Zurich D-INFK, CAB F.78, Universitaetstr. 6, CH-8092 Zurich.
+-- Attn: Systems Group.
--
--- Architectural definitions for Barrelfish on ARMv5 ISA.
---
--- The build target is the integratorcp board on QEMU with the default
--- ARM926EJ-S cpu.
+-- Architectural definitions for Barrelfish on ARMv8.
--
--------------------------------------------------------------------------
arch = "armv8"
archFamily = "aarch64"
-toolprefix = "aarch64-none-elf-"
-
-compiler = toolprefix ++ "gcc"
-objcopy = toolprefix ++ "objcopy"
-objdump = toolprefix ++ "objdump"
-ar = toolprefix ++ "ar"
-ranlib = toolprefix ++ "ranlib"
-cxxcompiler = toolprefix ++ "g++"
+compiler = Config.aarch64_cc
+objcopy = Config.aarch64_objcopy
+objdump = Config.aarch64_objdump
+ar = Config.aarch64_ar
+ranlib = Config.aarch64_ranlib
+cxxcompiler = Config.aarch64_cxx
ourCommonFlags = [ Str "-fno-unwind-tables",
Str "-Wno-packed-bitfield-compat",
Str "-mcpu=cortex-a57",
Str "-march=armv8-a",
Str "-mabi=lp64",
+ Str "-mstrict-align",
Str "-DPIC_REGISTER=X10",
Str "-fPIE",
- Str "-ffixed-r9",
- Str "-DTHREAD_REGISTER=X9",
+ Str "-ffixed-x18",
+ Str "-DTHREAD_REGISTER=X18",
Str "-D__ARM_CORTEX__",
Str "-D__ARM_ARCH_8A__",
+ Str "-DPREFER_SIZE_OVER_SPEED",
Str "-Wno-unused-but-set-variable",
Str "-Wno-format"
]
"-mcpu=cortex-a57",
"-march=armv8-a",
"-mabi=lp64",
+ "-mstrict-align",
"-fPIE",
"-U__linux__",
"-Wall",
"-ffreestanding",
"-fomit-frame-pointer",
"-Wmissing-noreturn",
- "-ffixed-r9",
- "-DTHREAD_REGISTER=X9",
+ "-DPIC_REGISTER=X10",
+ "-ffixed-x18",
+ "-DTHREAD_REGISTER=X18",
"-D__ARM_CORTEX__",
"-D__ARM_ARCH_8A__",
+ "-DPREFER_SIZE_OVER_SPEED",
"-Wno-unused-but-set-variable",
- "-Wno-format"
+ "-Wno-format",
+ "-Wno-suggest-attribute=noreturn"
]]
kernelLdFlags = [ Str "-Wl,-N",
--------------------------------------------------------------------------
--- Copyright (c) 2007-2010, ETH Zurich.
+-- Copyright (c) 2007-2015 ETH Zurich.
-- All rights reserved.
--
-- This file is distributed under the terms in the attached LICENSE file.
import qualified Tools
-- Set by hake.sh
-arm_toolspec :: Maybe ToolDetails
-thumb_toolspec :: Maybe ToolDetails
-armeb_toolspec :: Maybe ToolDetails
-x86_toolspec :: Maybe ToolDetails
-k1om_toolspec :: Maybe ToolDetails
+toolroot :: Maybe FilePath
+arm_toolspec :: Maybe (Maybe FilePath -> ToolDetails)
+aarch64_toolspec :: Maybe (Maybe FilePath -> ToolDetails)
+thumb_toolspec :: Maybe (Maybe FilePath -> ToolDetails)
+armeb_toolspec :: Maybe (Maybe FilePath -> ToolDetails)
+x86_toolspec :: Maybe (Maybe FilePath -> ToolDetails)
+k1om_toolspec :: Maybe (Maybe FilePath -> ToolDetails)
-- Default toolchains
-arm_tools = fromMaybe Tools.arm_system arm_toolspec
-thumb_tools = fromMaybe Tools.arm_netos_arm_2015q2 thumb_toolspec
-armeb_tools = fromMaybe Tools.arm_netos_linaro_be_2015_02 armeb_toolspec
-x86_tools = fromMaybe Tools.x86_system x86_toolspec
-k1om_tools = fromMaybe Tools.k1om_netos_mpss_3_4 k1om_toolspec
+arm_tools = fromMaybe Tools.arm_system
+ arm_toolspec
+ toolroot
+aarch64_tools = fromMaybe Tools.arm_netos_linaro_aarch64_2014_11
+ aarch64_toolspec
+ toolroot
+thumb_tools = fromMaybe Tools.arm_netos_arm_2015q2
+ thumb_toolspec
+ toolroot
+armeb_tools = fromMaybe Tools.arm_netos_linaro_be_2015_02
+ armeb_toolspec
+ toolroot
+x86_tools = fromMaybe Tools.x86_system
+ x86_toolspec
+ toolroot
+k1om_tools = fromMaybe Tools.k1om_netos_mpss_3_4
+ k1om_toolspec
+ toolroot
-- ARM toolchain
arm_gnu_tool = findTool (toolPath arm_tools) (toolPrefix arm_tools)
arm_ranlib = arm_gnu_tool "ranlib"
arm_cxx = arm_gnu_tool "g++"
+-- ARM AArch64
+aarch64_gnu_tool = findTool (toolPath aarch64_tools) (toolPrefix aarch64_tools)
+aarch64_cc = aarch64_gnu_tool "gcc"
+aarch64_objcopy = aarch64_gnu_tool "objcopy"
+aarch64_objdump = aarch64_gnu_tool "objdump"
+aarch64_ar = aarch64_gnu_tool "ar"
+aarch64_ranlib = aarch64_gnu_tool "ranlib"
+aarch64_cxx = aarch64_gnu_tool "g++"
+
-- ARM thumb (e.g. -M profile) toolchain
thumb_gnu_tool = findTool (toolPath thumb_tools) (toolPrefix thumb_tools)
thumb_cc = thumb_gnu_tool "gcc"
-- Lazy THC implementation (requires use_fp = True)
lazy_thc :: Bool
-lazy_thc | elem "armv7" architectures = False
- | elem "armv5" architectures = False
- | elem "xscale" architectures = False
- | otherwise = True
+lazy_thc
+ | elem "armv7" architectures = False
+ | elem "armv5" architectures = False
+ | elem "armv8" architectures = False
+ | elem "xscale" architectures = False
+ | otherwise = True
-- Enable capability tracing debug facility
caps_trace :: Bool
-------------------------------------------------------------------------
--- Copyright (c) 2007-2011, 2012 ETH Zurich.
+-- Copyright (c) 2007-2011, 2012, 2015 ETH Zurich.
-- All rights reserved.
--
-- This file is distributed under the terms in the attached LICENSE file.
ARMv7.makeDepend opts phase src obj depfile
| optArch opts == "armv7-m" =
ARMv7_M.makeDepend opts phase src obj depfile
- | optArch opts == "armv8" =
+ | optArch opts == "armv8" =
ARMv8.makeDepend opts phase src obj depfile
| otherwise = [ ErrorMsg ("no dependency generator for " ++ (optArch opts)) ]
module Tools where
import System.FilePath
+import Data.Maybe(fromMaybe)
findTool path prefix tool = path </> (prefix ++ tool)
toolPrefix :: String
}
+-- This is the default root under which toolchains are installed at ETH.
+-- It can be overridden when running Hake.
+mkRoot root = fromMaybe "/home/netos/tools" root
+
--
-- ARM Cortex-A little-endian toolchains (armv7,armv5)
--
-- System (Ubuntu) ARM toolchain
-arm_system
+arm_system _
= ToolDetails {
toolPath = "",
toolPrefix = "arm-linux-gnueabi-"
}
-- Linaro 2015.06 (GCC 4.8)
-arm_netos_linaro_2015_06
+arm_netos_linaro_2015_06 root
= ToolDetails {
- toolPath = "/home/netos/tools/linaro" </>
+ toolPath = mkRoot root </> "linaro" </>
"gcc-linaro-4.8-2015.06-x86_64_arm-eabi" </>
"bin",
toolPrefix = "arm-eabi-"
}
-- Linaro 2015.05 (GCC 4.9)
-arm_netos_linaro_2015_05
+arm_netos_linaro_2015_05 root
= ToolDetails {
- toolPath = "/home/netos/tools/linaro" </>
+ toolPath = mkRoot root </> "linaro" </>
"gcc-linaro-4.9-2015.05-x86_64_arm-eabi" </>
"bin",
toolPrefix = "arm-eabi-"
}
-- Linaro 2015.02 (GCC 4.9)
-arm_netos_linaro_2015_02
+arm_netos_linaro_2015_02 root
= ToolDetails {
- toolPath = "/home/netos/tools/linaro" </>
+ toolPath = mkRoot root </> "linaro" </>
"gcc-linaro-4.9-2015.02-3-x86_64_arm-eabi" </>
"bin",
toolPrefix = "arm-eabi-"
}
-- Linaro 2014.11 (GCC 4.9)
-arm_netos_linaro_2014_11
+arm_netos_linaro_2014_11 root
= ToolDetails {
- toolPath = "/home/netos/tools/linaro" </>
+ toolPath = mkRoot root </> "linaro" </>
"gcc-linaro-4.9-2014.11-x86_64_arm-eabi" </>
"bin",
toolPrefix = "arm-eabi-"
}
--
+-- ARM AArch64 toolchains
+--
+
+-- Linaro 2014.11 (GCC 4.9)
+arm_netos_linaro_aarch64_2014_11 root
+ = ToolDetails {
+ toolPath = mkRoot root </> "linaro" </>
+ "gcc-linaro-4.9-2014.11-x86_64_aarch64-elf" </>
+ "bin",
+ toolPrefix = "aarch64-none-elf-"
+ }
+
+-- Linaro 2015.02 (GCC 4.9)
+arm_netos_linaro_aarch64_2015_02 root
+ = ToolDetails {
+ toolPath = mkRoot root </> "linaro" </>
+ "gcc-linaro-4.9-2015.02-3-x86_64_aarch64-elf" </>
+ "bin",
+ toolPrefix = "aarch64-elf-"
+ }
+
+--
-- ARM Cortex-M little-endian toolchains (armv7m)
--
-- ARM-GCC 2014q4 (GCC 4.9)
-arm_netos_arm_2014q4
+arm_netos_arm_2014q4 root
= ToolDetails {
- toolPath = "/home/netos/tools/gcc-arm-embedded" </>
+ toolPath = mkRoot root </> "gcc-arm-embedded" </>
"gcc-arm-none-eabi-4_9-2014q4" </>
"bin",
toolPrefix = "arm-none-eabi-"
}
-- ARM-GCC 2015q1 (GCC 4.9)
-arm_netos_arm_2015q1
+arm_netos_arm_2015q1 root
= ToolDetails {
- toolPath = "/home/netos/tools/gcc-arm-embedded" </>
+ toolPath = mkRoot root </> "gcc-arm-embedded" </>
"gcc-arm-none-eabi-4_9-2015q1" </>
"bin",
toolPrefix = "arm-none-eabi-"
}
-- ARM-GCC 2015q2 (GCC 4.9)
-arm_netos_arm_2015q2
+arm_netos_arm_2015q2 root
= ToolDetails {
- toolPath = "/home/netos/tools/gcc-arm-embedded" </>
+ toolPath = mkRoot root </> "gcc-arm-embedded" </>
"gcc-arm-none-eabi-4_9-2015q2" </>
"bin",
toolPrefix = "arm-none-eabi-"
--
-- Linaro 2015.02 (GCC 4.9)
-arm_netos_linaro_be_2015_02
+arm_netos_linaro_be_2015_02 root
= ToolDetails {
- toolPath = "/home/netos/tools/linaro" </>
+ toolPath = mkRoot root </> "linaro" </>
"gcc-linaro-4.9-2015.02-3-x86_64_armeb-eabi" </>
"bin",
toolPrefix = "armeb-eabi-"
--
-- System (Ubuntu) ARM toolchain
-x86_system
+x86_system _
= ToolDetails {
toolPath = "",
toolPrefix = "x86_64-linux-gnu-"
--
-- Intel MPSS 3.4 (GCC 4.7)
-k1om_netos_mpss_3_4
+k1om_netos_mpss_3_4 root
= ToolDetails {
- toolPath = "/home/netos/tools" </>
+ toolPath = mkRoot root </>
"mpss-3.4/x86_64-mpsssdk-linux" </>
"usr/bin/k1om-mpss-linux",
toolPrefix = "k1om-mpss-linux-"
JOBS="$DEFAULT_JOBS"
# Don't override the default toolchain unless asked to.
+TOOLROOT=Nothing
ARM_TOOLSPEC=Nothing
+AARCH64_TOOLSPEC=Nothing
THUMB_TOOLSPEC=Nothing
ARMEB_TOOLSPEC=Nothing
X86_TOOLSPEC=Nothing
echo " for debugging hake)"
echo " -t|--toolchain <arch> <toolchain>: use <toolchain> to build for"
echo " <arch>."
+ echo " -r|--toolroot <path>: where should I look for toolchains (instead"
+ echo " of (/home/netos/tools)"
echo " -j|--jobs: Number of parallel jobs to run (default $DEFAULT_JOBS)."
echo ""
echo " The way you use this script is to create a new directory for your"
"arm")
ARM_TOOLSPEC="Just Tools.$TOOLSPEC"
;;
+ "aarch64")
+ AARCH64_TOOLSPEC="Just Tools.$TOOLSPEC"
+ ;;
"thumb")
THUMB_TOOLSPEC="Just Tools.$TOOLSPEC"
;;
;;
esac
;;
+ "-r"|"--toolroot")
+ TOOLROOT="Just \"$2\""
+ shift
+ ;;
"-j"|"--jobs")
JOBS="$2"
shift
cat >> hake/Config.hs <<EOF
-- Automatically added by hake.sh. Do NOT copy these definitions to the defaults
-source_dir = "$SRCDIR"
-architectures = [ $ARCHS ]
-install_dir = "$INSTALLDIR"
-arm_toolspec = $ARM_TOOLSPEC
-thumb_toolspec = $THUMB_TOOLSPEC
-armeb_toolspec = $ARMEB_TOOLSPEC
-x86_toolspec = $X86_TOOLSPEC
-k1om_toolspec = $K1OM_TOOLSPEC
+source_dir = "$SRCDIR"
+architectures = [ $ARCHS ]
+install_dir = "$INSTALLDIR"
+toolroot = $TOOLROOT
+arm_toolspec = $ARM_TOOLSPEC
+aarch64_toolspec = $AARCH64_TOOLSPEC
+thumb_toolspec = $THUMB_TOOLSPEC
+armeb_toolspec = $ARMEB_TOOLSPEC
+x86_toolspec = $X86_TOOLSPEC
+k1om_toolspec = $K1OM_TOOLSPEC
EOF
else
echo "You already have Config.hs, leaving it as-is."
--- /dev/null
+# Copyright (c) 2015, ETH Zurich.
+# Copyright (c) 2015, Hewlett Packard Enterprise Development LP.
+# All rights reserved.
+#
+# This file is distributed under the terms in the attached LICENSE file.
+# If you do not find this file, copies can be found by writing to:
+# ETH Zurich D-INFK, Universitätstasse 6, CH-8092 Zurich. Attn: Systems Group.
+
+timeout 0
+
+#
+# This script is used to describe the commands to start at
+# boot-time and the arguments they should receive.
+#
+# Kernel arguments are not read from this script. On QEMU they can be
+# set using 'qemu-system-arm -append ...'.
+
+title Barrelfish
+#root (nd)
+kernel /armv8/sbin/cpu_gem5 loglevel=4
+module /armv8/sbin/cpu_gem5
+module /armv8/sbin/init
+
+# Domains spawned by init
+module /armv8/sbin/mem_serv
+module /armv8/sbin/monitor
+
+# Special boot time domains spawned by monitor
+module /armv8/sbin/ramfsd boot
+module /armv8/sbin/skb boot
+module /armv8/sbin/spawnd boot
+module /armv8/sbin/startd boot
+
+# General user domains
+module /armv8/sbin/serial auto
+module /armv8/sbin/fish nospawn
+module /armv8/sbin/angler serial0.terminal xterm
+
+#module /armv8/sbin/memtest core=0
+
+# GEM5 simulates 256MB of RAM starting at 0x0
+# start size id
+mmap map 0x00000000 0x10000000 1
+
##########################################################################
-# Copyright (c) 2009-2014 ETH Zurich.
+# Copyright (c) 2009-2015 ETH Zurich.
# All rights reserved.
#
# This file is distributed under the terms in the attached LICENSE file.
--- /dev/null
+/*-
+ * Copyright (c) 2002, 2003 David Schultz <das@FreeBSD.ORG>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#if defined(__VFP_FP__)
+#define _IEEE_WORD_ORDER _BYTE_ORDER
+#else
+#define _IEEE_WORD_ORDER _BIG_ENDIAN
+#endif
+
+union IEEEl2bits {
+ long double e;
+ struct {
+#if _BYTE_ORDER == _LITTLE_ENDIAN
+#if _IEEE_WORD_ORDER == _LITTLE_ENDIAN
+ unsigned int manl :32;
+#endif
+ unsigned int manh :20;
+ unsigned int exp :11;
+ unsigned int sign :1;
+#if _IEEE_WORD_ORDER == _BIG_ENDIAN
+ unsigned int manl :32;
+#endif
+#else /* _BYTE_ORDER == _LITTLE_ENDIAN */
+ unsigned int sign :1;
+ unsigned int exp :11;
+ unsigned int manh :20;
+ unsigned int manl :32;
+#endif
+ } bits;
+};
+
+#define LDBL_NBIT 0
+#define mask_nbit_l(u) ((void)0)
+
+#define LDBL_MANH_SIZE 32
+#define LDBL_MANL_SIZE 32
+
+#define LDBL_TO_ARRAY32(u, a) do { \
+ (a)[0] = (uint32_t)(u).bits.manl; \
+ (a)[1] = (uint32_t)(u).bits.manh; \
+} while(0)
+
--- /dev/null
+/*
+ * Australian Public Licence B (OZPLB)
+ *
+ * Version 1-0
+ *
+ * Copyright (c) 2004 National ICT Australia
+ *
+ * All rights reserved.
+ *
+ * Developed by: Embedded, Real-time and Operating Systems Program (ERTOS)
+ * National ICT Australia
+ * http://www.ertos.nicta.com.au
+ *
+ * Permission is granted by National ICT Australia, free of charge, to
+ * any person obtaining a copy of this software and any associated
+ * documentation files (the "Software") to deal with the Software without
+ * restriction, including (without limitation) the rights to use, copy,
+ * modify, adapt, merge, publish, distribute, communicate to the public,
+ * sublicense, and/or sell, lend or rent out copies of the Software, and
+ * to permit persons to whom the Software is furnished to do so, subject
+ * to the following conditions:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimers.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimers in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of National ICT Australia, nor the names of its
+ * contributors, may be used to endorse or promote products derived
+ * from this Software without specific prior written permission.
+ *
+ * EXCEPT AS EXPRESSLY STATED IN THIS LICENCE AND TO THE FULL EXTENT
+ * PERMITTED BY APPLICABLE LAW, THE SOFTWARE IS PROVIDED "AS-IS", AND
+ * NATIONAL ICT AUSTRALIA AND ITS CONTRIBUTORS MAKE NO REPRESENTATIONS,
+ * WARRANTIES OR CONDITIONS OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+ * BUT NOT LIMITED TO ANY REPRESENTATIONS, WARRANTIES OR CONDITIONS
+ * REGARDING THE CONTENTS OR ACCURACY OF THE SOFTWARE, OR OF TITLE,
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT,
+ * THE ABSENCE OF LATENT OR OTHER DEFECTS, OR THE PRESENCE OR ABSENCE OF
+ * ERRORS, WHETHER OR NOT DISCOVERABLE.
+ *
+ * TO THE FULL EXTENT PERMITTED BY APPLICABLE LAW, IN NO EVENT SHALL
+ * NATIONAL ICT AUSTRALIA OR ITS CONTRIBUTORS BE LIABLE ON ANY LEGAL
+ * THEORY (INCLUDING, WITHOUT LIMITATION, IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHERWISE) FOR ANY CLAIM, LOSS, DAMAGES OR OTHER
+ * LIABILITY, INCLUDING (WITHOUT LIMITATION) LOSS OF PRODUCTION OR
+ * OPERATION TIME, LOSS, DAMAGE OR CORRUPTION OF DATA OR RECORDS; OR LOSS
+ * OF ANTICIPATED SAVINGS, OPPORTUNITY, REVENUE, PROFIT OR GOODWILL, OR
+ * OTHER ECONOMIC LOSS; OR ANY SPECIAL, INCIDENTAL, INDIRECT,
+ * CONSEQUENTIAL, PUNITIVE OR EXEMPLARY DAMAGES, ARISING OUT OF OR IN
+ * CONNECTION WITH THIS LICENCE, THE SOFTWARE OR THE USE OF OR OTHER
+ * DEALINGS WITH THE SOFTWARE, EVEN IF NATIONAL ICT AUSTRALIA OR ITS
+ * CONTRIBUTORS HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH CLAIM, LOSS,
+ * DAMAGES OR OTHER LIABILITY.
+ *
+ * If applicable legislation implies representations, warranties, or
+ * conditions, or imposes obligations or liability on National ICT
+ * Australia or one of its contributors in respect of the Software that
+ * cannot be wholly or partly excluded, restricted or modified, the
+ * liability of National ICT Australia or the contributor is limited, to
+ * the full extent permitted by the applicable legislation, at its
+ * option, to:
+ * a. in the case of goods, any one or more of the following:
+ * i. the replacement of the goods or the supply of equivalent goods;
+ * ii. the repair of the goods;
+ * iii. the payment of the cost of replacing the goods or of acquiring
+ * equivalent goods;
+ * iv. the payment of the cost of having the goods repaired; or
+ * b. in the case of services:
+ * i. the supplying of the services again; or
+ * ii. the payment of the cost of having the services supplied again.
+ *
+ * The construction, validity and performance of this licence is governed
+ * by the laws in force in New South Wales, Australia.
+ */
+/*
+ Author: Ben Leslie
+*/
+#define __LENGTH_8_MOD "hh"
+#define __LENGTH_16_MOD "h"
+#define __LENGTH_32_MOD
+#define __LENGTH_64_MOD "ll"
+#define __LENGTH_MAX_MOD "ll"
+#define __LENGTH_PTR_MOD
--- /dev/null
+/*
+ * Australian Public Licence B (OZPLB)
+ *
+ * Version 1-0
+ *
+ * Copyright (c) 2004 National ICT Australia
+ *
+ * All rights reserved.
+ *
+ * Developed by: Embedded, Real-time and Operating Systems Program (ERTOS)
+ * National ICT Australia
+ * http://www.ertos.nicta.com.au
+ *
+ * Permission is granted by National ICT Australia, free of charge, to
+ * any person obtaining a copy of this software and any associated
+ * documentation files (the "Software") to deal with the Software without
+ * restriction, including (without limitation) the rights to use, copy,
+ * modify, adapt, merge, publish, distribute, communicate to the public,
+ * sublicense, and/or sell, lend or rent out copies of the Software, and
+ * to permit persons to whom the Software is furnished to do so, subject
+ * to the following conditions:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimers.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimers in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of National ICT Australia, nor the names of its
+ * contributors, may be used to endorse or promote products derived
+ * from this Software without specific prior written permission.
+ *
+ * EXCEPT AS EXPRESSLY STATED IN THIS LICENCE AND TO THE FULL EXTENT
+ * PERMITTED BY APPLICABLE LAW, THE SOFTWARE IS PROVIDED "AS-IS", AND
+ * NATIONAL ICT AUSTRALIA AND ITS CONTRIBUTORS MAKE NO REPRESENTATIONS,
+ * WARRANTIES OR CONDITIONS OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+ * BUT NOT LIMITED TO ANY REPRESENTATIONS, WARRANTIES OR CONDITIONS
+ * REGARDING THE CONTENTS OR ACCURACY OF THE SOFTWARE, OR OF TITLE,
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT,
+ * THE ABSENCE OF LATENT OR OTHER DEFECTS, OR THE PRESENCE OR ABSENCE OF
+ * ERRORS, WHETHER OR NOT DISCOVERABLE.
+ *
+ * TO THE FULL EXTENT PERMITTED BY APPLICABLE LAW, IN NO EVENT SHALL
+ * NATIONAL ICT AUSTRALIA OR ITS CONTRIBUTORS BE LIABLE ON ANY LEGAL
+ * THEORY (INCLUDING, WITHOUT LIMITATION, IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHERWISE) FOR ANY CLAIM, LOSS, DAMAGES OR OTHER
+ * LIABILITY, INCLUDING (WITHOUT LIMITATION) LOSS OF PRODUCTION OR
+ * OPERATION TIME, LOSS, DAMAGE OR CORRUPTION OF DATA OR RECORDS; OR LOSS
+ * OF ANTICIPATED SAVINGS, OPPORTUNITY, REVENUE, PROFIT OR GOODWILL, OR
+ * OTHER ECONOMIC LOSS; OR ANY SPECIAL, INCIDENTAL, INDIRECT,
+ * CONSEQUENTIAL, PUNITIVE OR EXEMPLARY DAMAGES, ARISING OUT OF OR IN
+ * CONNECTION WITH THIS LICENCE, THE SOFTWARE OR THE USE OF OR OTHER
+ * DEALINGS WITH THE SOFTWARE, EVEN IF NATIONAL ICT AUSTRALIA OR ITS
+ * CONTRIBUTORS HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH CLAIM, LOSS,
+ * DAMAGES OR OTHER LIABILITY.
+ *
+ * If applicable legislation implies representations, warranties, or
+ * conditions, or imposes obligations or liability on National ICT
+ * Australia or one of its contributors in respect of the Software that
+ * cannot be wholly or partly excluded, restricted or modified, the
+ * liability of National ICT Australia or the contributor is limited, to
+ * the full extent permitted by the applicable legislation, at its
+ * option, to:
+ * a. in the case of goods, any one or more of the following:
+ * i. the replacement of the goods or the supply of equivalent goods;
+ * ii. the repair of the goods;
+ * iii. the payment of the cost of replacing the goods or of acquiring
+ * equivalent goods;
+ * iv. the payment of the cost of having the goods repaired; or
+ * b. in the case of services:
+ * i. the supplying of the services again; or
+ * ii. the payment of the cost of having the services supplied again.
+ *
+ * The construction, validity and performance of this licence is governed
+ * by the laws in force in New South Wales, Australia.
+ */
+/*
+ Author: Alex Webster
+*/
+
+typedef int jmp_buf[10];
--- /dev/null
+/*
+ * Australian Public Licence B (OZPLB)
+ *
+ * Version 1-0
+ *
+ * Copyright (c) 2004 National ICT Australia
+ *
+ * All rights reserved.
+ *
+ * Developed by: Embedded, Real-time and Operating Systems Program (ERTOS)
+ * National ICT Australia
+ * http://www.ertos.nicta.com.au
+ *
+ * Permission is granted by National ICT Australia, free of charge, to
+ * any person obtaining a copy of this software and any associated
+ * documentation files (the "Software") to deal with the Software without
+ * restriction, including (without limitation) the rights to use, copy,
+ * modify, adapt, merge, publish, distribute, communicate to the public,
+ * sublicense, and/or sell, lend or rent out copies of the Software, and
+ * to permit persons to whom the Software is furnished to do so, subject
+ * to the following conditions:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimers.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimers in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of National ICT Australia, nor the names of its
+ * contributors, may be used to endorse or promote products derived
+ * from this Software without specific prior written permission.
+ *
+ * EXCEPT AS EXPRESSLY STATED IN THIS LICENCE AND TO THE FULL EXTENT
+ * PERMITTED BY APPLICABLE LAW, THE SOFTWARE IS PROVIDED "AS-IS", AND
+ * NATIONAL ICT AUSTRALIA AND ITS CONTRIBUTORS MAKE NO REPRESENTATIONS,
+ * WARRANTIES OR CONDITIONS OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+ * BUT NOT LIMITED TO ANY REPRESENTATIONS, WARRANTIES OR CONDITIONS
+ * REGARDING THE CONTENTS OR ACCURACY OF THE SOFTWARE, OR OF TITLE,
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT,
+ * THE ABSENCE OF LATENT OR OTHER DEFECTS, OR THE PRESENCE OR ABSENCE OF
+ * ERRORS, WHETHER OR NOT DISCOVERABLE.
+ *
+ * TO THE FULL EXTENT PERMITTED BY APPLICABLE LAW, IN NO EVENT SHALL
+ * NATIONAL ICT AUSTRALIA OR ITS CONTRIBUTORS BE LIABLE ON ANY LEGAL
+ * THEORY (INCLUDING, WITHOUT LIMITATION, IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHERWISE) FOR ANY CLAIM, LOSS, DAMAGES OR OTHER
+ * LIABILITY, INCLUDING (WITHOUT LIMITATION) LOSS OF PRODUCTION OR
+ * OPERATION TIME, LOSS, DAMAGE OR CORRUPTION OF DATA OR RECORDS; OR LOSS
+ * OF ANTICIPATED SAVINGS, OPPORTUNITY, REVENUE, PROFIT OR GOODWILL, OR
+ * OTHER ECONOMIC LOSS; OR ANY SPECIAL, INCIDENTAL, INDIRECT,
+ * CONSEQUENTIAL, PUNITIVE OR EXEMPLARY DAMAGES, ARISING OUT OF OR IN
+ * CONNECTION WITH THIS LICENCE, THE SOFTWARE OR THE USE OF OR OTHER
+ * DEALINGS WITH THE SOFTWARE, EVEN IF NATIONAL ICT AUSTRALIA OR ITS
+ * CONTRIBUTORS HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH CLAIM, LOSS,
+ * DAMAGES OR OTHER LIABILITY.
+ *
+ * If applicable legislation implies representations, warranties, or
+ * conditions, or imposes obligations or liability on National ICT
+ * Australia or one of its contributors in respect of the Software that
+ * cannot be wholly or partly excluded, restricted or modified, the
+ * liability of National ICT Australia or the contributor is limited, to
+ * the full extent permitted by the applicable legislation, at its
+ * option, to:
+ * a. in the case of goods, any one or more of the following:
+ * i. the replacement of the goods or the supply of equivalent goods;
+ * ii. the repair of the goods;
+ * iii. the payment of the cost of replacing the goods or of acquiring
+ * equivalent goods;
+ * iv. the payment of the cost of having the goods repaired; or
+ * b. in the case of services:
+ * i. the supplying of the services again; or
+ * ii. the payment of the cost of having the services supplied again.
+ *
+ * The construction, validity and performance of this licence is governed
+ * by the laws in force in New South Wales, Australia.
+ */
+/*
+ Author: Ben Leslie
+*/
+typedef signed char int8_t;
+typedef short int16_t;
+typedef int int32_t;
+typedef long long int64_t;
+
+typedef unsigned char uint8_t;
+typedef unsigned short uint16_t;
+typedef unsigned int uint32_t;
+typedef unsigned long long uint64_t;
+
+#define __PTR_SIZE 64
*/
/*
- * Copyright (c) 2009, 2010, ETH Zurich.
+ * Copyright (c) 2009, 2010, 2015, ETH Zurich.
* All rights reserved.
*
* This file is distributed under the terms in the attached LICENSE file.
* If you do not find this file, copies can be found by writing to:
- * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
*/
#ifndef ARCH_BULK_TRANSFER_H
#define AARCH64_BARRELFISH_CPU_H
#if __ARM_ARCH_8A__
-#define CURRENT_CPU_TYPE CPU_AARCH648
+#define CURRENT_CPU_TYPE CPU_ARM8
#else
#error "must define CURRENT_CPU_TYPE"
#endif
*/
/*
- * Copyright (c) 2007, 2008, 2009, 2010, 2012, 2013, ETH Zurich.
+ * Copyright (c) 2007-2010, 2012, 2013, 2015, ETH Zurich.
+ * Copyright (c) 2015, Hewlett Packard Enterprise Development LP.
* All rights reserved.
*
* This file is distributed under the terms in the attached LICENSE file.
* If you do not find this file, copies can be found by writing to:
- * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
*/
#ifndef INCLUDEBARRELFISH_INVOCATIONS_ARCH_H
#include <barrelfish_kpi/syscalls.h>
#include <barrelfish/caddr.h>
#include <barrelfish_kpi/paging_arch.h>
-#include <barrelfish/debug.h> // for USER_PANIC()
/**
* capability invocation syscall wrapper, copied from x86_32 version
uintptr_t arg8, uintptr_t arg9,
uintptr_t arg10, uintptr_t arg11)
{
- // XXX: TODO
- USER_PANIC("NYI");
uint8_t invoke_bits = get_cap_valid_bits(to);
capaddr_t invoke_cptr = get_cap_addr(to) >> (CPTR_BITS - invoke_bits);
SYSCALL_INVOKE, invoke_cptr).error;
}
+static inline errval_t invoke_dispatcher_dump_capabilities(struct capref dispcap)
+{
+ uint8_t invoke_bits = get_cap_valid_bits(dispcap);
+ capaddr_t invoke_cptr = get_cap_addr(dispcap) >> (CPTR_BITS - invoke_bits);
+
+ return syscall2((invoke_bits << 16) | (DispatcherCmd_DumpCapabilities << 8) |
+ SYSCALL_INVOKE, invoke_cptr).error;
+}
+
static inline errval_t
invoke_dispatcher_properties(
struct capref dispatcher,
#include <barrelfish/syscall_arch.h>
#include <barrelfish/caddr.h>
-#include <barrelfish/debug.h> // for USER_PANIC()
#include <barrelfish_kpi/lmp.h>
#include <barrelfish_kpi/syscalls.h>
uintptr_t arg9
)
{
- USER_PANIC("NYI!");
uint8_t invoke_bits = get_cap_valid_bits(ep);
capaddr_t invoke_cptr = get_cap_addr(ep) >> (CPTR_BITS - invoke_bits);
#define lmp_ep_send9(ep, flags, send_cap, a, b, c, d, e, f, g, h, i) \
lmp_ep_send((ep),(flags),(send_cap),9,(a),(b),(c),(d),(e),(f),(g),(h),(i))
-#define lmp_ep_send8(ep, flags, send_cap, a, b, c, d, e, f, g, h, i) \
+#define lmp_ep_send8(ep, flags, send_cap, a, b, c, d, e, f, g, h) \
lmp_ep_send((ep),(flags),(send_cap),8,(a),(b),(c),(d),(e),(f),(g),(h),0)
#define lmp_ep_send7(ep, flags, send_cap, a, b, c, d, e, f, g) \
lmp_ep_send((ep),(flags),(send_cap),7,(a),(b),(c),(d),(e),(f),(g),0,0)
/*
* Copyright (c) 2010, ETH Zurich.
+ * Copyright (c) 2015, Hewlett Packard Enterprise Development LP.
* All rights reserved.
*
* This file is distributed under the terms in the attached LICENSE file.
* If you do not find this file, copies can be found by writing to:
- * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
*/
#ifndef ARCH_AARCH64_BARRELFISH_PMAP_H
*/
/*
- * Copyright (c) 2007, 2008, 2009, 2010, ETH Zurich.
+ * Copyright (c) 2007-2010, ETH Zurich.
+ * Copyright (c) 2015, Hewlett Packard Enterprise Development LP.
* All rights reserved.
*
* This file is distributed under the terms in the attached LICENSE file.
* If you do not find this file, copies can be found by writing to:
- * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
*/
#ifndef ARCH_AARCH64_BARRELFISH_SYSCALL_H
#define ARCH_AARCH64_BARRELFISH_SYSCALL_H
-
//
// This is the actual system call function. Because the return
-// value is a structure, r0 is setup point to the return
-// structure. The first system call argument supplied at end of
+// value is a structure with two memebers, x0 is setup point 1st
+// member and x1 is setup to point to 2nd member of the structure.
+// The first system call argument supplied at end of
// argument list and moved to r0 before use in syscall. This
-// simplifies the amount of swizzling involved therein as r1 =
-// arg1, r2 = arg2, r3 = arg3, and the remaining args including
-// arg0 are on the stack.
+// simplifies the amount of swizzling involved therein as x1 =
+// arg1, x2 = arg2, x3 = arg3...x7 = arg7 remaining arguments are on
+// the stack
//
extern struct sysret
syscall(uintptr_t b, uintptr_t c, uintptr_t d, uintptr_t e,
uintptr_t j, uintptr_t k, uintptr_t l, uintptr_t a);
#define syscallx(a,b,c,d,e,f,g,h,i,j,k,l) \
- syscall(b,c,d,e,f,g,h,i,j,k,l,a)
+ syscall(a,b,c,d,e,f,g,h,i,j,k,l)
//
// System call argument 0 is encoded thus:
#define syscall1(a) \
syscallx(sysord(a,1),0,0,0,0,0,0,0,0,0,0,0)
-
#endif
/**
* \file
- * \brief
+ * \brief Some arch specific asm inlines
*/
/*
#ifndef ARCH_AARCH64_BARRELFISH_KPI_ASM_INLINES_H
#define ARCH_AARCH64_BARRELFISH_KPI_ASM_INLINES_H
+#ifndef __ASSEMBLER__
+
+static inline void dmb(void)
+{
+ __asm volatile ("dmb sy" : : : "memory");
+}
+
+static inline uint8_t is_cycle_counter_overflow(void)
+{
+ //NYI
+ return 0;
+}
+
+static inline uint32_t get_cycle_count(void)
+{
+ //NYI
+ return 0;
+}
+
+
+static inline void reset_cycle_counter(void)
+{
+ //NYI
+}
+
+#endif // __ASSEMBLER__
+
#endif // ARCH_AARCH64_BARRELFISH_KPI_ASM_INLINES_H
/*
* Copyright (c) 2010, ETH Zurich.
+ * Copyright (c) 2015, Hewlett Packard Enterprise Development LP.
* All rights reserved.
*
* This file is distributed under the terms in the attached LICENSE file.
* If you do not find this file, copies can be found by writing to:
- * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
*/
#ifndef ARCH_AARCH64_BARRELFISH_KPI_DISPATCHER_SHARED_ARCH_H
/*
* Copyright (c) 2010, ETH Zurich.
+ * Copyright (c) 2015, Hewlett Packard Enterprise Development LP.
* All rights reserved.
*
* This file is distributed under the terms in the attached LICENSE file.
* If you do not find this file, copies can be found by writing to:
- * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
*/
#ifndef ARCH_AARCH64_BARRELFISH_KPI_FLAGS_H
#define ARCH_AARCH64_BARRELFISH_KPI_FLAGS_H
-// XXX: TODO: fix these for aarch64
-#define ARM_MODE_USR 0x10
-#define ARM_MODE_FIQ 0x11
-#define ARM_MODE_IRQ 0x12
-#define ARM_MODE_SVC 0x13
-#define ARM_MODE_ABT 0x17
-#define ARM_MODE_UND 0x1b
-#define ARM_MODE_SYS 0x1f
-#define ARM_MODE_MASK 0x1f
-#define ARM_MODE_PRIV 0x0f
+#define AARCH64_MODE_EL0T 0x0
+#define AARCH64_MODE_EL1T 0x4
+#define AARCH64_MODE_EL1H 0x5
+
+#define AARCH64_MODE_USR AARCH64_MODE_EL0T
+#define AARCH64_MODE_FIQ 0x11
+#define AARCH64_MODE_IRQ 0x12
+#define AARCH64_MODE_SVC 0x13
+#define AARCH64_MODE_ABT 0x17
+#define AARCH64_MODE_UND 0x1b
+#define AARCH64_MODE_SYS 0x1f
+#define AARCH64_MODE_MASK 0x1f
+#define AARCH64_MODE_PRIV 0x0f
#define CPSR_IF_MASK 0xc0
#define CPSR_I_MASK 0x80
#ifndef ARCH_AARCH64_BARRELFISH_KPI_PAGING_H
#define ARCH_AARCH64_BARRELFISH_KPI_PAGING_H
-#if defined(__ARM_ARCH_8A__)
#include <target/aarch64/barrelfish_kpi/paging_arm_v8.h>
-#else
-#error "Missing ARM Paging header file"
-#endif
#endif // ARCH_ARM_BARRELFISH_KPI_PAGING_H
/**
* \file
- * \brief architecture-specific registers code
+ * \brief Architecture-specific registers code
*/
/*
* Copyright (c) 2015, ETH Zurich.
+ * Copyright (c) 2015, Hewlett Packard Enterprise Development LP.
* All rights reserved.
*
* This file is distributed under the terms in the attached LICENSE file.
#ifndef ARCH_AARCH64_BARRELFISH_KPI_REGISTERS_H
#define ARCH_AARCH64_BARRELFISH_KPI_REGISTERS_H
-// TODO: update for aarch64
-
#ifndef __ASSEMBLER__
-#include<stddef.h> // for offsetof
-//#include <barrelfish/curdispatcher_arch.h> // XXX For curdispatcher()
+#include <stddef.h> // for offsetof
+#include <barrelfish/curdispatcher_arch.h> // XXX For curdispatcher()
#include <barrelfish_kpi/types.h> // for lvaddr_t
#endif
//
// Offsets of saved registers in save area.
//
-#define CPSR_REG 0
+#define SPSR_REG 0
#define X0_REG 1
#define X1_REG 2
#define X2_REG 3
#define X26_REG 27
#define X27_REG 28
#define X28_REG 29
-#define X29_REG 30
-#define SP_REG 31
-#define LR_REG 32
+#define FP_REG 30
+#define LR_REG 31
+#define SP_REG 32
#define PC_REG 33
#define NUM_REGS 34 /* cpsr, x0-x30, sp, pc */
union registers_aarch64 {
struct registers_aarch64_named {
- uint64_t cpsr;
- uint64_t r0, r1, r2, r3;
- uint64_t r4, r5, r6, r7, r8;
- uint64_t rtls; // r9 is thread local storage
- uint64_t r10; // r10 is for global offset table base.
- uint64_t r11, r12, r13, r14;
- uint64_t r15, r16, r17, r18;
- uint64_t r19, r20, r21, r22;
- uint64_t r23, r24, r25, r26;
- uint64_t r27, r28, r29;
- uint64_t stack; // sp
- uint64_t link; // x30
- uint64_t pc; // pc
+ uint64_t spsr;
+ uint64_t x0, x1, x2, x3, x4, x5, x6, x7;
+ uint64_t x8;
+ uint64_t x9;
+ uint64_t x10; // x10 is for global offset table base.
+ uint64_t x11, x12, x13, x14, x15;
+ uint64_t x16, x17;
+ uint64_t rtls; // x18 is thread local storage
+ uint64_t x19, x20, x21, x22, x23, x24, x25, x26, x27, x28;
+ uint64_t fp;
+ uint64_t link;
+ uint64_t stack;
+ uint64_t pc;
} named;
struct registers_aarch64_syscall_args {
- uint64_t cpsr;
- uint64_t arg0, arg1, arg2, arg3;
- uint64_t arg4, arg5, arg6, arg7, arg8;
+ uint64_t spsr;
+ uint64_t arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7;
+ uint64_t arg8;
uint64_t arg9;
uint64_t arg10;
+ uint64_t arg11, arg12, arg13, arg14, arg15;
+ uint64_t arg16, arg17;
+ uint64_t arg18;
+ uint64_t arg19, arg20, arg21, arg22, arg23, arg24, arg25, arg26, arg28;
uint64_t fp;
- uint64_t arg11, arg12, arg13, arg14;
- uint64_t arg15, arg16, arg17, arg18;
- uint64_t arg19, arg20, arg21, arg22;
- uint64_t arg23, arg24, arg25, arg26;
- uint64_t arg27, arg28;
- uint64_t stack;
uint64_t link;
+ uint64_t stack;
uint64_t pc;
} syscall_args;
uint64_t regs[sizeof(struct registers_aarch64_named) / sizeof(uint64_t)];
static inline void
registers_set_param(arch_registers_state_t *regs, uint64_t param)
{
- regs->named.r0 = param;
+ regs->named.x0 = param;
}
static inline void
registers_get_param(arch_registers_state_t *regs, uint64_t *param)
{
- *param = regs->named.r0;
+ *param = regs->named.x0;
}
static inline uint64_t
/*
* Copyright (c) 2010, ETH Zurich.
+ * Copyright (c) 2015, Hewlett Packard Enterprise Development LP.
* All rights reserved.
*
* This file is distributed under the terms in the attached LICENSE file.
* If you do not find this file, copies can be found by writing to:
- * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
*/
-#ifndef ARCH_ARM_BARRELFISH_KPI_SPINLOCKS_H
-#define ARCH_ARM_BARRELFISH_KPI_SPINLOCKS_H
+#ifndef ARCH_AARCH64_BARRELFISH_KPI_SPINLOCKS_H
+#define ARCH_AARCH64_BARRELFISH_KPI_SPINLOCKS_H
#include <barrelfish_kpi/asm_inlines_arch.h>
typedef volatile uint32_t spinlock_t;
+// Refer to ARM Manual - Load-Acquire Exclusive, Store-Release Exclusive and barriers
static inline void acquire_spinlock(spinlock_t *spinlock)
{
- // TODO
+ unsigned long tmp;
+
+ __asm volatile(
+ " sevl\n"
+ " prfm pstl1keep, %1\n"
+ "1: wfe\n"
+ " ldaxr %w0, %1\n"
+ " cbnz %w0, 1b\n"
+ " stxr %w0, %w2, %1\n"
+ " cbnz %w0, 1b\n"
+ : "=&r" (tmp), "+Q" (*spinlock)
+ : "r" (1)
+ : "memory");
+
+ dmb();
}
static inline void release_spinlock(spinlock_t *spinlock)
{
- // TODO
+ dmb();
+
+ __asm volatile(
+ " stlr %w1, %0\n"
+ : "=Q" (*spinlock) : "r" (0) : "memory");
}
-#endif // ARCH_ARM_BARRELFISH_KPI_SPINLOCKS_H
+#endif // ARCH_AARCH64_BARRELFISH_KPI_SPINLOCKS_H
/*
* Copyright (c) 2010, ETH Zurich.
+ * Copyright (c) 2015, Hewlett Packard Enterprise Development LP.
* All rights reserved.
*
* This file is distributed under the terms in the attached LICENSE file.
* If you do not find this file, copies can be found by writing to:
- * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
*/
#ifndef ARCH_AARCH64_BARRELFISH_KPI_UNKNOWN_H
--- /dev/null
+/*-
+ * Copyright (c) 2004-2005 David Schultz <das@FreeBSD.ORG>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: src/lib/msun/arm/fenv.h,v 1.5 2005/03/16 19:03:45 das Exp $
+ */
+
+#ifndef _FENV_H_
+#define _FENV_H_
+
+#include <sys/_types.h>
+
+typedef __uint32_t fenv_t;
+typedef __uint32_t fexcept_t;
+
+/* Exception flags */
+#define FE_INVALID 0x0001
+#define FE_DIVBYZERO 0x0002
+#define FE_OVERFLOW 0x0004
+#define FE_UNDERFLOW 0x0008
+#define FE_INEXACT 0x0010
+#define FE_ALL_EXCEPT (FE_DIVBYZERO | FE_INEXACT | \
+ FE_INVALID | FE_OVERFLOW | FE_UNDERFLOW)
+
+/* Rounding modes */
+#define FE_TONEAREST 0x0000
+#define FE_TOWARDZERO 0x0001
+#define FE_UPWARD 0x0002
+#define FE_DOWNWARD 0x0003
+#define _ROUND_MASK (FE_TONEAREST | FE_DOWNWARD | \
+ FE_UPWARD | FE_TOWARDZERO)
+__BEGIN_DECLS
+
+/* Default floating-point environment */
+extern const fenv_t __fe_dfl_env;
+#define FE_DFL_ENV (&__fe_dfl_env)
+
+/* We need to be able to map status flag positions to mask flag positions */
+#define _FPUSW_SHIFT 16
+#define _ENABLE_MASK (FE_ALL_EXCEPT << _FPUSW_SHIFT)
+
+#ifdef ARM_HARD_FLOAT
+#define __rfs(__fpsr) __asm __volatile("rfs %0" : "=r" (*(__fpsr)))
+#define __wfs(__fpsr) __asm __volatile("wfs %0" : : "r" (__fpsr))
+#else
+#define __rfs(__fpsr) (*(__fpsr)) = (fexcept_t)0
+#define __wfs(__fpsr)
+#endif
+
+static __inline int
+feclearexcept(int __excepts)
+{
+ fexcept_t __fpsr;
+
+ __rfs(&__fpsr);
+ __fpsr &= ~__excepts;
+ __wfs(__fpsr);
+ return (0);
+}
+
+static __inline int
+fegetexceptflag(fexcept_t *__flagp, int __excepts)
+{
+ fexcept_t __fpsr;
+
+ __rfs(&__fpsr);
+ *__flagp = __fpsr & __excepts;
+ return (0);
+}
+
+static __inline int
+fesetexceptflag(const fexcept_t *__flagp, int __excepts)
+{
+ fexcept_t __fpsr;
+
+ __rfs(&__fpsr);
+ __fpsr &= ~__excepts;
+ __fpsr |= *__flagp & __excepts;
+ __wfs(__fpsr);
+ return (0);
+}
+
+static __inline int
+feraiseexcept(int __excepts)
+{
+ fexcept_t __ex = __excepts;
+
+ fesetexceptflag(&__ex, __excepts); /* XXX */
+ return (0);
+}
+
+static __inline int
+fetestexcept(int __excepts)
+{
+ fexcept_t __fpsr;
+
+ __rfs(&__fpsr);
+ return (__fpsr & __excepts);
+}
+
+static __inline int
+fegetround(void)
+{
+
+ /*
+ * Apparently, the rounding mode is specified as part of the
+ * instruction format on ARM, so the dynamic rounding mode is
+ * indeterminate. Some FPUs may differ.
+ */
+ return (-1);
+}
+
+static __inline int
+fesetround(int __round)
+{
+
+ return (-1);
+}
+
+static __inline int
+fegetenv(fenv_t *__envp)
+{
+
+ __rfs(__envp);
+ return (0);
+}
+
+static __inline int
+feholdexcept(fenv_t *__envp)
+{
+ fenv_t __env;
+
+ __rfs(&__env);
+ *__envp = __env;
+ __env &= ~(FE_ALL_EXCEPT | _ENABLE_MASK);
+ __wfs(__env);
+ return (0);
+}
+
+static __inline int
+fesetenv(const fenv_t *__envp)
+{
+
+ __wfs(*__envp);
+ return (0);
+}
+
+static __inline int
+feupdateenv(const fenv_t *__envp)
+{
+ fexcept_t __fpsr;
+
+ __rfs(&__fpsr);
+ __wfs(*__envp);
+ feraiseexcept(__fpsr & FE_ALL_EXCEPT);
+ return (0);
+}
+
+#if __BSD_VISIBLE
+
+static __inline int
+feenableexcept(int __mask)
+{
+ fenv_t __old_fpsr, __new_fpsr;
+
+ __rfs(&__old_fpsr);
+ __new_fpsr = __old_fpsr | (__mask & FE_ALL_EXCEPT) << _FPUSW_SHIFT;
+ __wfs(__new_fpsr);
+ return ((__old_fpsr >> _FPUSW_SHIFT) & FE_ALL_EXCEPT);
+}
+
+static __inline int
+fedisableexcept(int __mask)
+{
+ fenv_t __old_fpsr, __new_fpsr;
+
+ __rfs(&__old_fpsr);
+ __new_fpsr = __old_fpsr & ~((__mask & FE_ALL_EXCEPT) << _FPUSW_SHIFT);
+ __wfs(__new_fpsr);
+ return ((__old_fpsr >> _FPUSW_SHIFT) & FE_ALL_EXCEPT);
+}
+
+static __inline int
+fegetexcept(void)
+{
+ fenv_t __fpsr;
+
+ __rfs(&__fpsr);
+ return ((__fpsr & _ENABLE_MASK) >> _FPUSW_SHIFT);
+}
+
+#endif /* __BSD_VISIBLE */
+
+__END_DECLS
+
+#endif /* !_FENV_H_ */
-/* $NetBSD: float.h,v 1.6 2005/12/11 12:16:47 christos Exp $ */\r
-/* $NetBSD: float_ieee754.h,v 1.8 2005/12/11 12:25:20 christos Exp $ */\r
-\r
/*\r
* Copyright (c) 1992, 1993\r
* The Regents of the University of California. All rights reserved.\r
#define __OFF_MAX __LONG_MAX /* max value for an off_t */
#define __OFF_MIN __LONG_MIN /* min value for an off_t */
-/* Quads and longs are the same on the amd64. Ensure they stay in sync. */
+/* Quads and longs are the same on AArch64. Ensure they stay in sync. */
#define __UQUAD_MAX __ULONG_MAX /* max value for a uquad_t */
#define __QUAD_MAX __LONG_MAX /* max value for a quad_t */
#define __QUAD_MIN __LONG_MIN /* min value for a quad_t */
--- /dev/null
+/*-
+ * Copyright (c) 2001 David E. O'Brien
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)endian.h 8.1 (Berkeley) 6/10/93
+ * $NetBSD: endian.h,v 1.7 1999/08/21 05:53:51 simonb Exp $
+ * $FreeBSD$
+ */
+
+#ifndef _ENDIAN_H_
+#define _ENDIAN_H_
+
+#include <sys/_types.h>
+
+/*
+ * Definitions for byte order, according to byte significance from low
+ * address to high.
+ */
+#define _LITTLE_ENDIAN 1234 /* LSB first: i386, vax */
+#define _BIG_ENDIAN 4321 /* MSB first: 68000, ibm, net */
+#define _PDP_ENDIAN 3412 /* LSB first in word, MSW first in long */
+
+#ifdef __ARMEB__
+#define _BYTE_ORDER _BIG_ENDIAN
+#else
+#define _BYTE_ORDER _LITTLE_ENDIAN
+#endif /* __ARMEB__ */
+
+#if __BSD_VISIBLE
+#define LITTLE_ENDIAN _LITTLE_ENDIAN
+#define BIG_ENDIAN _BIG_ENDIAN
+#define PDP_ENDIAN _PDP_ENDIAN
+#define BYTE_ORDER _BYTE_ORDER
+#endif
+
+#ifdef __ARMEB__
+#define _QUAD_HIGHWORD 0
+#define _QUAD_LOWWORD 1
+#define __ntohl(x) ((__uint32_t)(x))
+#define __ntohs(x) ((__uint16_t)(x))
+#define __htonl(x) ((__uint32_t)(x))
+#define __htons(x) ((__uint16_t)(x))
+#else
+#define _QUAD_HIGHWORD 1
+#define _QUAD_LOWWORD 0
+#define __ntohl(x) (__bswap32(x))
+#define __ntohs(x) (__bswap16(x))
+#define __htonl(x) (__bswap32(x))
+#define __htons(x) (__bswap16(x))
+#endif /* __ARMEB__ */
+/*
+static __inline __uint64_t
+__bswap64(__uint64_t _x)
+{
+
+ return ((_x >> 56) | ((_x >> 40) & 0xff00) | ((_x >> 24) & 0xff0000) |
+ ((_x >> 8) & 0xff000000) | ((_x << 8) & ((__uint64_t)0xff << 32)) |
+ ((_x << 24) & ((__uint64_t)0xff << 40)) |
+ ((_x << 40) & ((__uint64_t)0xff << 48)) | ((_x << 56)));
+}
+
+static __inline __uint32_t
+__bswap32_var(__uint32_t v)
+{
+ __uint32_t t1;
+
+ __asm __volatile("eor %1, %0, %0, ror #16\n"
+ "bic %1, %1, #0x00ff0000\n"
+ "mov %0, %0, ror #8\n"
+ "eor %0, %0, %1, lsr #8\n"
+ : "+r" (v), "=r" (t1));
+
+ return (v);
+}
+*/
+static __inline __uint64_t
+__bswap64_var(__uint64_t v)
+{
+
+ __asm__("rev %0, %0" : "+r"(v));
+ return (v);
+}
+
+static __inline __uint32_t
+__bswap32_var(__uint32_t v)
+{
+
+ __asm__("rev %w0, %w0" : "+r"(v));
+ return (v);
+}
+
+static __inline __uint16_t
+__bswap16_var(__uint16_t v)
+{
+ __asm__("rev16 %w0, %w0" : "+r"(v));
+ return (v);
+
+}
+
+/*
+static __inline __uint16_t
+__bswap16_var(__uint16_t v)
+{
+ __uint32_t ret = v & 0xffff;
+
+ __asm __volatile(
+ "mov %0, %0, ror #8\n"
+ "orr %0, %0, %0, lsr #16\n"
+ "bic %0, %0, %0, lsl #16"
+ : "+r" (ret));
+
+ return ((__uint16_t)ret);
+}
+*/
+
+
+
+#ifdef __OPTIMIZE__
+
+#define __bswap32_constant(x) \
+ ((((x) & 0xff000000U) >> 24) | \
+ (((x) & 0x00ff0000U) >> 8) | \
+ (((x) & 0x0000ff00U) << 8) | \
+ (((x) & 0x000000ffU) << 24))
+
+#define __bswap16_constant(x) \
+ ((((x) & 0xff00) >> 8) | \
+ (((x) & 0x00ff) << 8))
+
+#define __bswap16(x) \
+ ((__uint16_t)(__builtin_constant_p(x) ? \
+ __bswap16_constant(x) : \
+ __bswap16_var(x)))
+
+#define __bswap32(x) \
+ ((__uint32_t)(__builtin_constant_p(x) ? \
+ __bswap32_constant(x) : \
+ __bswap32_var(x)))
+
+#else
+#define __bswap16(x) __bswap16_var(x)
+#define __bswap32(x) __bswap32_var(x)
+
+#endif /* __OPTIMIZE__ */
+#endif /* !_ENDIAN_H_ */
+
*/
/*
- * Copyright (c) 2007, 2008, 2009, 2010, ETH Zurich.
+ * Copyright (c) 2007-2010, ETH Zurich.
+ * Copyright (c) 2015, Hewlett Packard Enterprise Development LP.
* All rights reserved.
*
* This file is distributed under the terms in the attached LICENSE file.
* If you do not find this file, copies can be found by writing to:
- * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
*/
#ifndef UMP_IMPL_H
*/
/*
- * Copyright (c) 2007, 2008, 2009, 2010, 2011, 2012, ETH Zurich.
+ * Copyright (c) 2007-2012, ETH Zurich.
+ * Copyright (c) 2015, Hewlett Packard Enterprise Development LP.
* All rights reserved.
*
* This file is distributed under the terms in the attached LICENSE file.
* If you do not find this file, copies can be found by writing to:
- * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
*/
#ifndef BARRELFISH_CAPABILITIES_H
static inline bool type_is_vnode(enum objtype type)
{
- STATIC_ASSERT(27 == ObjType_Num, "Check VNode definitions");
+ STATIC_ASSERT(30 == ObjType_Num, "Check VNode definitions");
return (type == ObjType_VNode_x86_64_pml4 ||
type == ObjType_VNode_x86_64_pdpt ||
type == ObjType_VNode_x86_32_pdpt ||
type == ObjType_VNode_x86_32_pdir ||
type == ObjType_VNode_x86_32_ptable ||
+ type == ObjType_VNode_AARCH64_l3 ||
+ type == ObjType_VNode_AARCH64_l2 ||
+ type == ObjType_VNode_AARCH64_l1 ||
type == ObjType_VNode_ARM_l2 ||
type == ObjType_VNode_ARM_l1
);
static inline size_t vnode_objbits(enum objtype type)
{
// This function should be emitted by hamlet or somesuch.
- STATIC_ASSERT(27 == ObjType_Num, "Check VNode definitions");
+ STATIC_ASSERT(30 == ObjType_Num, "Check VNode definitions");
if (type == ObjType_VNode_x86_64_pml4 ||
type == ObjType_VNode_x86_64_pdpt ||
{
return 12; // BASE_PAGE_BITS
}
+ else if (type == ObjType_VNode_AARCH64_l1 ||
+ type == ObjType_VNode_AARCH64_l2 ||
+ type == ObjType_VNode_AARCH64_l3)
+ {
+ return 12;
+ }
else if (type == ObjType_VNode_ARM_l1)
{
return 14;
*/
static inline size_t vnode_entry_bits(enum objtype type) {
// This function should be emitted by hamlet or somesuch.
- STATIC_ASSERT(27 == ObjType_Num, "Check VNode definitions");
+ STATIC_ASSERT(30 == ObjType_Num, "Check VNode definitions");
if (type == ObjType_VNode_x86_64_pml4 ||
type == ObjType_VNode_x86_64_pdpt ||
return 10; // log2(X86_32_PTABLE_SIZE) == log2(X86_32_PDIR_SIZE)
}
#endif
+
+ if (type == ObjType_VNode_AARCH64_l1)
+ {
+ return 2;
+ }
+
+ if (type == ObjType_VNode_AARCH64_l2 ||
+ type == ObjType_VNode_AARCH64_l3)
+ {
+ return 9; // log2(ARM_MAX_ENTRIES)
+ }
+
if (type == ObjType_VNode_ARM_l2)
{
return 9; // log2(ARM_L2_MAX_ENTRIES)
*/
/*
- * Copyright (c) 2007, 2008, 2009, 2010, 2012, ETH Zurich.
+ * Copyright (c) 2007-2010, 2012, ETH Zurich.
+ * Copyright (c) 2015, Hewlett Packard Enterprise Development LP.
* All rights reserved.
*
* This file is distributed under the terms in the attached LICENSE file.
CPU_SCC,
CPU_ARM7,
CPU_ARM5,
+ CPU_ARM8,
CPU_TYPE_NUM // must be last
};
static inline const char *cpu_type_to_archstr(enum cpu_type cpu_type)
{
- STATIC_ASSERT(CPU_TYPE_NUM == 6, "knowledge of all CPU types here");
+ STATIC_ASSERT(CPU_TYPE_NUM == 7, "knowledge of all CPU types here");
switch(cpu_type) {
case CPU_K1OM: return "k1om";
case CPU_X86_64: return "x86_64";
case CPU_SCC: return "scc";
case CPU_ARM7: return "armv7";
case CPU_ARM5: return "armv5";
+ case CPU_ARM8: return "armv8";
default: return "(unknown)";
}
}
static inline const enum cpu_type archstr_to_cputype(char* archstr)
{
- STATIC_ASSERT(CPU_TYPE_NUM == 6, "knowledge of all CPU types here");
+ STATIC_ASSERT(CPU_TYPE_NUM == 7, "knowledge of all CPU types here");
if(strcmp("k1om", archstr) == 0) return CPU_K1OM;
if(strcmp("x86_64", archstr) == 0) return CPU_X86_64;
if(strcmp("scc", archstr) == 0) return CPU_SCC;
if(strcmp("armv7", archstr) == 0) return CPU_ARM7;
if(strcmp("armv5", archstr) == 0) return CPU_ARM5;
+ if(strcmp("armv8", archstr) == 0) return CPU_ARM8;
return CPU_TYPE_NUM;
}
/*
* Copyright (c) 2012, ETH Zurich.
+ * Copyright (c) 2015, Hewlett Packard Enterprise Development LP.
* All rights reserved.
*
* This file is distributed under the terms in the attached LICENSE file.
* If you do not find this file, copies can be found by writing to:
- * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
*/
#ifndef BARRELFISH_DISTCAPS_H
* Predicates related to sharing capabilities
*/
-STATIC_ASSERT(ObjType_Num == 27, "Knowledge of all cap types");
+STATIC_ASSERT(ObjType_Num == 30, "Knowledge of all cap types");
static inline bool
distcap_needs_locality(enum objtype type)
{
case ObjType_VNode_x86_32_ptable:
case ObjType_VNode_ARM_l1:
case ObjType_VNode_ARM_l2:
+ case ObjType_VNode_AARCH64_l1:
+ case ObjType_VNode_AARCH64_l2:
+ case ObjType_VNode_AARCH64_l3:
// XXX: KCB should need locality?
//case ObjType_KernelControlBlock:
return true;
}
}
-STATIC_ASSERT(ObjType_Num == 27, "Knowledge of all cap types");
+STATIC_ASSERT(ObjType_Num == 30, "Knowledge of all cap types");
static inline bool
distcap_is_moveable(enum objtype type)
{
--- /dev/null
+/*
+ * Macros for bit manipulation: masks, etc.
+ *
+ * Copyright (c) 2015, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef __BITMACROS_H
+#define __BITMACROS_H
+
+/* A one-bit mask at bit n */
+#define BIT(n) (1ULL << (n))
+
+/* An n-bit mask, beginning at bit 0 */
+#define MASK(n) (BIT(n) - 1)
+
+/* An n-bit field selector, beginning at bit m */
+#define FIELD(m,n,x) (((x) >> m) & MASK(n))
+
+/* Round n up to the next multiple of size */
+#define ROUND_UP(n, size) ((((n) + (size) - 1)) & (~((size) - 1)))
+
+#endif /* __BITMACROS_H */
*/
/*
- * Copyright (c) 2007, 2008, 2009, 2010, ETH Zurich.
+ * Copyright (c) 2007-2010, ETH Zurich.
+ * Copyright (c) 2015, Hewlett Packard Enterprise Development LP.
* All rights reserved.
*
* This file is distributed under the terms in the attached LICENSE file.
* If you do not find this file, copies can be found by writing to:
- * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
*/
/*-
#define EM_TINYJ 61 /* Advanced Logic Corp. TinyJ processor. */
#define EM_X86_64 62 /* Advanced Micro Devices x86-64 */
#define EM_AMD64 EM_X86_64 /* Advanced Micro Devices x86-64 (compat) */
-#define EM_K1OM 181 /* Intel K1OM (Xeon Phi) */
+#define EM_K1OM 181 /* Intel K1OM (Xeon Phi) */
+#define EM_AARCH64 183 /* ARM 64 bit */
/* Non-standard or deprecated. */
#define EM_486 6 /* Intel i486. */
#define R_ARM_ABS32 2
#define R_ARM_RELATIVE 23
+/* AARCH64 relocations. A LOT MISSING! */
+#define R_AARCH64_NONE 0
+#define R_AARCH64_ABS64 257
+#define R_AARCH64_RELATIVE 1027
+
/**
* \brief ELF64 file header.
*/
/**
* \file
- * \brief Pmap definition common for the aarch64 archs
+ * \brief Pmap definition common for the AARCH64 archs
*/
/*
--- /dev/null
+/**
+ * \file
+ * \brief Data sent to a newly booted kernel
+ */
+
+/*
+ * Copyright (c) 2012, ETH Zurich.
+ * Copyright (c) 2015, Hewlett Packard Enterprise Development LP.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef COREDATA_H
+#define COREDATA_H
+
+struct arm_coredata_modinfo {
+ uint32_t mod_start;
+ uint32_t mod_end;
+ uint32_t string;
+ uint32_t reserved;
+};
+
+struct arm_coredata_mmap {
+ uint32_t size;
+ //uint32_t res;
+ uint64_t base_addr;
+ uint64_t length;
+ uint32_t type;
+} __attribute__ ((packed));
+
+struct arm_coredata_elf {
+ uint32_t num;
+ uint32_t size;
+ uint32_t addr;
+ uint32_t shndx;
+};
+
+/**
+ * \brief Data sent to a newly booted kernel
+ *
+ */
+struct arm_core_data {
+ uint32_t multiboot_flags; ///< The multiboot flags of the cpu module
+ struct arm_coredata_elf elf; ///< elf structure for the cpu module
+ uint32_t module_start; ///< The start of the cpu module
+ uint32_t module_end; ///< The end of the cpu module
+ uint32_t urpc_frame_base;
+ uint8_t urpc_frame_bits;
+ uint32_t monitor_binary;
+ uint32_t monitor_binary_size;
+ uint32_t memory_base_start;
+ uint8_t memory_bits;
+ coreid_t src_core_id;
+ uint8_t src_arch_id;
+ coreid_t dst_core_id;
+ char kernel_cmdline[128];
+
+ uint32_t initrd_start;
+ uint32_t initrd_size;
+
+ uint32_t cmdline;
+ uint32_t mods_count;
+ uint32_t mods_addr;
+
+ uint32_t mmap_length;
+ uint32_t mmap_addr;
+
+ uint32_t start_free_ram;
+
+ uint32_t chan_id;
+
+ genpaddr_t kcb; ///< The kernel control block
+}; //__attribute__ ((packed));
+
+#define ARM_CORE_DATA_PAGES 1100
+
+#endif
-/**
- * \file
- * \brief Arch specific definitions, can be included by others.
- */
-
/*
+ * ARMv8 (VMSAv8-64) page table structures
+ *
* Copyright (c) 2015, ETH Zurich.
+ * Copyright (c) 2015, Hewlett Packard Enterprise Development LP.
* All rights reserved.
*
* This file is distributed under the terms in the attached LICENSE file.
#ifndef TARGET_ARMV8_BARRELFISH_KPI_PAGING_H
#define TARGET_ARMV8_BARRELFISH_KPI_PAGING_H
+#include <bitmacros.h>
+
#ifndef __ASSEMBLER__
typedef uint64_t paging_x86_64_flags_t;
#endif
-/** The system's base page size is 4kB */
-#define ARMv8_BASE_PAGE_BITS 12
-#define ARMv8_BASE_PAGE_SIZE (1<<ARMv8_BASE_PAGE_BITS)
-#define ARMv8_BASE_PAGE_MASK (ARMv8_BASE_PAGE_SIZE - 1)
-#define ARMv8_BASE_PAGE_OFFSET(a) ((a) & ARMv8_BASE_PAGE_MASK)
-
-/** The system's large page size is 2MB */
-#define ARMv8_LARGE_PAGE_BITS 21
-#define ARMv8_LARGE_PAGE_SIZE (1<<ARMv8_LARGE_PAGE_BITS)
-#define ARMv8_LARGE_PAGE_MASK (ARMv8_LARGE_PAGE_SIZE - 1)
-#define ARMv8_LARGE_PAGE_OFFSET(a) ((a) & ARMv8_LARGE_PAGE_MASK)
-
-/** The system's huge page size is 1GB */
-#define ARMv8_HUGE_PAGE_BITS 30
-#define ARMv8_HUGE_PAGE_SIZE (1<<ARMv8_HUGE_PAGE_BITS)
-#define ARMv8_HUGE_PAGE_MASK (ARMv8_HUGE_PAGE_SIZE - 1)
-#define ARMv8_HUGE_PAGE_OFFSET(a) ((a) & ARMv8_HUGE_PAGE_MASK)
-
-/**
- * Bits within the various page directories and tables.
- */
-
-// TODO: check what ptable sizes are available
-#define ARMv8_PTABLE_BITS 9 /**< Page directory/table size in bits */
-/** Page directory/table size */
-#define ARMv8_PTABLE_SIZE (1UL<<ARMv8_PTABLE_BITS)
-#define ARMv8_PTABLE_MASK 0x1ff /**< Page dir/table address mask */
-#define ARMv8_PTABLE_CLEAR 0 /**< Bitmap of a clear table entry */
-
-// XXX: maybe sizeof(union ...)
-#define ARMv8_PTABLE_ENTRY_SIZE sizeof(uint64_t)
-
-// XXX: These may depend on system config registers
-/* Macros to compute the corresponding portions of the vaddr */
-#define ARMv8_PML4_BASE(base) (((uint64_t)(base) >> 39) & ARMv8_PTABLE_MASK)
-#define ARMv8_PDPT_BASE(base) (((uint64_t)(base) >> 30) & ARMv8_PTABLE_MASK)
-#define ARMv8_PDIR_BASE(base) (((uint64_t)(base) >> 21) & ARMv8_PTABLE_MASK)
-#define ARMv8_PTABLE_BASE(base) (((uint64_t)(base) >> 12) & ARMv8_PTABLE_MASK)
-
-// non-prefixed versions
-// XXX: should cleanup arm include mess
-
-/** The system's base page size is 4kB */
-#define BASE_PAGE_BITS ARMv8_BASE_PAGE_BITS
-#define BASE_PAGE_SIZE ARMv8_BASE_PAGE_SIZE
-#define BASE_PAGE_MASK ARMv8_BASE_PAGE_MASK
-#define BASE_PAGE_OFFSET(a) ARMv8_BASE_PAGE_OFFSET(a)
-
-/** The system's large page size is 2MB */
-#define LARGE_PAGE_BITS ARMv8_LARGE_PAGE_BITS
-#define LARGE_PAGE_SIZE ARMv8_LARGE_PAGE_SIZE
-#define LARGE_PAGE_MASK ARMv8_LARGE_PAGE_MASK
-#define LARGE_PAGE_OFFSET(a) ARMv8_LARGE_PAGE_OFFSET(a)
-
-/** The system's huge page size is 1GB */
-#define HUGE_PAGE_BITS ARMv8_HUGE_PAGE_BITS
-#define HUGE_PAGE_SIZE ARMv8_HUGE_PAGE_SIZE
-#define HUGE_PAGE_MASK ARMv8_HUGE_PAGE_MASK
-#define HUGE_PAGE_OFFSET(a) ARMv8_HUGE_PAGE_OFFSET(a)
-
-/**
- * Bits within the various page directories and tables.
+/* In contrast to previous ARMs, ARMv8 has up to four levels of page tables,
+ * with base page size (granule) configurable to 4kB, 16kB, or 64kB. Page
+ * tables at all levels are one base-sized page.
+ *
+ * The current ARMv8 port of Barrelfish uses a 4kB granule, so the 4
+ * translation levels map 21b (2MB), 30b (1GB), 39b (512GB) and 48b (256TB),
+ * respectively. We disable the top-level (L0) table, giving a 512GB virtual
+ * address space, with 4kB, 2MB, and 1GB pages.
+ *
+ * Naming convention: The "ARMv8" prefix refers to the specific configuration
+ * that Barrelfish uses on ARMv8 in 64-bit mode, and not architectural
+ * constants - ARMv8 processors are generally much more configurable.
*/
-// TODO: check what ptable sizes are available
-#define PTABLE_BITS ARMv8_PTABLE_BITS /**< Page directory/table size in bits */
-/** Page directory/table size */
-#define PTABLE_SIZE ARMv8_PTABLE_SIZE
-#define PTABLE_MASK ARMv8_PTABLE_MASK /**< Page dir/table address mask */
-#define PTABLE_CLEAR ARMv8_PTABLE_CLEAR /**< Bitmap of a clear table entry */
-
-#define PTABLE_ENTRY_SIZE ARMv8_PTABLE_ENTRY_SIZE
+/* The system's base page size is 4kB, mapped in the L3 table */
+#define BASE_PAGE_BITS 12
+#define BASE_PAGE_SIZE BIT(BASE_PAGE_BITS)
+#define BASE_PAGE_MASK MASK(BASE_PAGE_BITS)
+#define BASE_PAGE_OFFSET(a) ((a) & BASE_PAGE_MASK)
+
+/* 2MB pages are mapped in the L2 table */
+#define LARGE_PAGE_BITS 21
+#define LARGE_PAGE_SIZE BIT(LARGE_PAGE_BITS)
+#define LARGE_PAGE_MASK MASK(LARGE_PAGE_BITS)
+#define LARGE_PAGE_OFFSET(a) ((a) & LARGE_PAGE_MASK)
+
+/* 1GB pages are mapped in the L1 table */
+#define HUGE_PAGE_BITS 30
+#define HUGE_PAGE_SIZE BIT(HUGE_PAGE_BITS)
+#define HUGE_PAGE_MASK MASK(HUGE_PAGE_BITS)
+#define HUGE_PAGE_OFFSET(a) ((a) & HUGE_PAGE_MASK)
+
+/* All entries are 8 bytes */
+#define PTABLE_ENTRY_BITS 3
+#define PTABLE_ENTRY_SIZE BIT(PTABLE_ENTRY_BITS)
+
+/* All levels resolve 9 bits (in contrast to earlier ARMs). */
+#define PTABLE_BITS 9
+#define PTABLE_SIZE BIT(PTABLE_BITS + PTABLE_ENTRY_BITS)
+#define PTABLE_MASK MASK(PTABLE_BITS + PTABLE_ENTRY_BITS)
+#define PTABLE_CLEAR 0 /* An invalid table entry */
+#define PTABLE_NUM_ENTRIES BIT(PTABLE_BITS)
+
+/* Macros to extract indices from the VAddr */
+#define ARMv8_L1_OFFSET(addr) \
+ FIELD(HUGE_PAGE_BITS, PTABLE_BITS, (uintptr_t)addr)
+#define ARMv8_L2_OFFSET(addr) \
+ FIELD(LARGE_PAGE_BITS, PTABLE_BITS, (uintptr_t)addr)
+#define ARMv8_L3_OFFSET(addr) \
+ FIELD(BASE_PAGE_BITS, PTABLE_BITS, (uintptr_t)addr)
+
+
+/* A descriptor for the next-level table.
+ * These are the same at all levels. */
+struct table_descriptor {
+ uint64_t type :2; // == 3 -> Table
+ uint64_t ign0 :10; // Ignored
+ uint64_t base_address :28; // Table address
+ uint64_t sbz0 :12; // sbz
+ uint64_t ign1 :7; // Ignored
+
+ /* Hierarchical lookup attributes */
+ uint64_t pxn :1; // Privileged eXecute Never
+ uint64_t xn :1; // eXecute Never
+ uint64_t ap :2; // Access Permissions
+ uint64_t ns :1; // NonSecure
+};
+
+union armv8_l1_entry {
+ uint64_t raw;
+
+ /* An invalid L1 entry */
+ struct {
+ uint64_t type :2; // == 0 or 2 -> Invalid
+ } invalid;
+
+ /* An L1 entry for an L2 table */
+ struct table_descriptor page_table;
+
+ /* An L1 entry for a 1GB block (page) */
+ struct {
+ uint64_t type :2; // == 1 -> Block
+
+ /* Lower block attributes */
+ uint64_t ai :3;
+ uint64_t ns :1;
+ uint64_t ap :2; // AP
+ uint64_t sh :2; // AP
+ uint64_t af :1; // AF
+ uint64_t ng :1; // NG
+
+ uint64_t sbz0 :18;
+ uint64_t base_address :18; // block base address
+ uint64_t sbz1 :4;
+
+ /* Upper block attributes */
+ uint64_t ch :1; // CH
+ uint64_t pxn :1; // PXN
+ uint64_t xn :1; // XN
+ uint64_t res :4; // Reserved
+ uint64_t ign1 :5; // Ignored
+ } block;
+};
+
+union armv8_l2_entry {
+ uint64_t raw;
+
+ /* An invalid L2 entry */
+ struct {
+ uint64_t type :2; // == 0 or 2 -> Invalid
+ } invalid;
+
+ /* An L2 entry for an L3 table */
+ struct table_descriptor page_table;
+
+ /* An L2 entry for a 2MB block (page) */
+ struct {
+ uint64_t type :2; // == 1 -> Block
+
+ /* Lower block attributes */
+ uint64_t ai :3;
+ uint64_t ns :1;
+ uint64_t ap :2; // AP
+ uint64_t sh :2; // AP
+ uint64_t af :1; // AF
+ uint64_t ng :1; // NG
+
+ uint64_t sbz0 :9;
+ uint64_t base_address :27; // block base address
+ uint64_t sbz1 :4;
+
+ /* Upper block attributes */
+ uint64_t ch :1; // CH
+ uint64_t pxn :1; // PXN
+ uint64_t xn :1; // XN
+ uint64_t res :4; // Reserved
+ uint64_t ign1 :5; // Ignored
+ } block;
+};
+
+union armv8_l3_entry {
+ uint64_t raw;
+
+ /* An invalid L3 entry */
+ struct {
+ uint64_t type :2; // == 0 or 2 -> Invalid
+ } invalid;
+
+ /* An L3 entry for a 4kB page */
+ struct {
+ uint64_t type :2; // == 1 -> Page
+
+ /* Lower attributes */
+ uint64_t ai :3;
+ uint64_t ns :1;
+ uint64_t ap :2; // AP
+ uint64_t sh :2; // SH
+ uint64_t af :1; // AF
+ uint64_t ng :1; // NG
+
+ uint64_t base_address :36; // page base address
+
+ /* Upper attributes */
+ uint64_t ch :1; // CH
+ uint64_t pxn :1; // PXN
+ uint64_t xn :1; // XN
+ uint64_t res :4; // Reserved
+ uint64_t sbz1 :5; // Ignored
+ } page;
+};
+
+enum armv8_entry_type {
+ ARMv8_Ln_INVALID = 0,
+ ARMv8_Ln_BLOCK = 1,
+ ARMv8_Ln_TABLE = 3,
+ ARMv8_L3_PAGE = 3
+};
+
+/* VMSA-64 page attributes */
+#define AARCH64_L3_CACHEABLE 0x00
+#define AARCH64_L3_BUFFERABLE 0x00
+#define AARCH64_L3_USR_RO 0xc0
+#define AARCH64_L3_USR_RW 0x40
+#define AARCH64_L3_USR_NONE 0x80
+
+#define AARCH64_L2_CACHEABLE 0x08
+#define AARCH64_L2_BUFFERABLE 0x04
+#define AARCH64_L2_USR_RO 0x20
+#define AARCH64_L2_USR_RW 0x30
+#define AARCH64_L2_USR_NONE 0x10
+
+/* Page type independent page options */
+#define KPI_PAGING_FLAGS_READ 0x01
+#define KPI_PAGING_FLAGS_WRITE 0x02
+#define KPI_PAGING_FLAGS_EXECUTE 0x04
+#define KPI_PAGING_FLAGS_NOCACHE 0x08
+#define KPI_PAGING_FLAGS_MASK 0x0f
#endif // TARGET_ARMV8_BARRELFISH_KPI_PAGING_H
/* Barrelfish THC language extensions */
+/*
+ * Copyright (c) 2015, ETH Zurich.
+ * Copyright (c) 2015, Hewlett Packard Enterprise Development LP.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
+ */
+
#ifndef _THC_INTERNAL_H_
#define _THC_INTERNAL_H_
"memory")
#endif
-#elif defined (__aarch64__)
-// XXX: todo sane implementations of these
+#elif defined(__aarch64__)
#define KILL_CALLEE_SAVES() \
- __asm__ volatile ("" : : : "sp")
+ __asm__ volatile ("" : : : \
+ "x19", "x20", "x21", "x22", "x23", "x24", "x25", \
+ "x26", "x27", "x28", \
+ "31", \
+ "memory")
+
#else
#error "Need definition of KILL_CALLEE_SAVES"
#endif
#elif defined(__i386__)
#define FORCE_ARGS_STACK
#define FORCE_ARGS_STACK_CALL
-#elif defined(__arm__)
+#elif defined(__arm__) || defined(__aarch64__)
#define FORCE_ARGS_STACK assert(0 && "THC not yet implemented on ARM")
#define FORCE_ARGS_STACK_CALL assert(0 && "THC not yet implemented on ARM")
#elif defined(__aarch64__)
//
// check_for_lazy_awe() and init_lazy_awe() also need to change.
-#define INIT_LAZY_AWE(_) assert(0 && "THC not yet implemented on ARM")
-#define RETURN_CONT(_) assert(0 && "THC not yet implemented on ARM")
-#define GET_LAZY_AWE(_) assert(0 && "THC not yet implemented on ARM")
+#define INIT_LAZY_AWE(_) assert(0 && "THC not yet implemented on AARCH64")
+#define RETURN_CONT(_) assert(0 && "THC not yet implemented on AARCH64")
+#define GET_LAZY_AWE(_) assert(0 && "THC not yet implemented on AARCH64")
#else
#error "Need definition of INIT_LAZY_AWE & GET_LAZY_AWE"
#endif
: "m" (_NS) \
: "memory", "r0", "r1"); \
}
+#elif defined(__aarch64__) && (defined(linux) || defined(BARRELFISH))
+
+// - NYI
+#define SWIZZLE_DEF(_NAME, _NS, _FN) assert(0 && "THC not yet implemented on AARCH64")
+
#else
#error "No definition of SWIZZLE_DEF for THC"
#endif
*/
/*
- * Copyright (c) 2007, 2008, 2009, 2010, ETH Zurich.
+ * Copyright (c) 2007-2010, ETH Zurich.
+ * Copyright (c) 2015, Hewlett Packard Enterprise Development LP.
* All rights reserved.
*
* This file is distributed under the terms in the attached LICENSE file.
* If you do not find this file, copies can be found by writing to:
- * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
*/
#ifndef LIBBARRELFISH_TRACE_H
--------------------------------------------------------------------------
--- Copyright (c) 2007-2013, ETH Zurich.
+-- Copyright (c) 2007-2015, ETH Zurich.
+-- Copyright (c) 2015, Hewlett Packard Enterprise Development LP.
-- All rights reserved.
--
-- This file is distributed under the terms in the attached LICENSE file.
"arch/x86/cmos.c",
"arch/x86/misc.c",
"arch/x86/serial.c",
- "arch/x86/multiboot.c",
"arch/x86/conio.c",
"arch/x86/syscall.c",
"arch/x86/debugregs.c",
"arch/x86/timing.c",
"arch/x86/startup_x86.c",
"arch/x86/mcheck.c",
+ "arch/x86/multiboot.c",
"arch/x86/ipi_notify.c"
] ++ (if Config.microbenchmarks then ["arch/x86_64/microbenchmarks.c"] else []),
mackerelDevices = [ "lpc_pic",
cFiles = [ "arch/x86/apic.c",
-- "arch/x86/pic.c",
"arch/x86/misc.c",
- "arch/x86/multiboot.c",
"arch/x86/syscall.c",
"arch/x86/debugregs.c",
"arch/x86/perfmon.c",
"arch/k1om/startup_arch.c",
"arch/k1om/mcheck.c",
"arch/k1om/serial.c",
+ "arch/x86/multiboot.c",
"arch/k1om/xeon_phi.c"
] ++ (if Config.microbenchmarks then ["arch/x86_64/microbenchmarks.c"] else []),
"arch/x86/cmos.c",
"arch/x86/misc.c",
"arch/x86/serial.c",
- "arch/x86/multiboot.c",
"arch/x86/conio.c",
"arch/x86/syscall.c",
"arch/x86/debugregs.c",
"arch/x86/rtc.c",
"arch/x86/timing.c",
"arch/x86/startup_x86.c",
+ "arch/x86/multiboot.c",
"arch/x86/ipi_notify.c"
],
mackerelDevices = [ "lpc_pic",
cFiles = [ "arch/arm/exn.c",
"arch/arm/exec.c",
"arch/arm/misc.c",
- "arch/arm/phys_mmap.c",
+ "arch/armv5/phys_mmap.c",
"arch/arm/syscall.c",
"arch/armv5/init.c",
- "arch/armv5/integrator.c",
+ -- "arch/arm/integrator.c",
"arch/armv5/kludges.c",
- "arch/armv5/kputchar.c",
- "arch/armv5/pl011_uart.c",
+ "arch/arm/kputchar.c",
+ "arch/arm/pl011_uart.c",
"arch/armv5/cp15.c",
"arch/armv5/paging.c",
"arch/armv5/startup_arch.c" ],
"arch/armv7/exceptions.S" ],
cFiles = [ "arch/arm/exec.c",
"arch/arm/misc.c",
- "arch/arm/phys_mmap.c",
"arch/arm/exn.c",
"arch/arm/syscall.c",
"arch/arm_gem5/init.c",
- "arch/arm_gem5/integrator.c",
+ "arch/arm/integrator.c",
"arch/arm_gem5/paging.c",
- "arch/arm_gem5/gem5_serial.c",
- "arch/arm_gem5/pl011_uart.c",
+ "arch/arm/gem5/gem5_serial.c",
+ "arch/arm/pl011_uart.c",
"arch/arm_gem5/startup_arch.c",
- "arch/armv7/gic.c",
+ "arch/arm/gic.c",
"arch/armv7/kludges.c",
- "arch/armv7/multiboot.c",
"arch/armv7/paging.c",
- "arch/armv7/irq.c",
- "arch/arm_gem5/start_aps.c",
- "arch/armv7/kputchar.c" ],
+ "arch/arm/irq.c",
+ "arch/arm/gem5/start_aps.c",
+ "arch/arm/multiboot.c",
+ "arch/arm/kputchar.c" ],
+ mackerelDevices = [ "arm",
+ "arm_icp_pit",
+ "pl011_uart",
+ "pl130_gic",
+ "sp804_pit",
+ "cortex_a9_pit",
+ "a9scu" ],
+ addLibraries = [ "elf", "cpio" ]
+ },
+ --
+ -- GEM5 Cortex-A series ARMv8 core
+ --
+ cpuDriver {
+ target = "gem5",
+ architectures = [ "armv8" ],
+ assemblyFiles = [ "arch/armv8/gem5/boot.S",
+ "arch/armv8/sysreg.S",
+ "arch/armv8/exceptions.S" ],
+ cFiles = [ "arch/armv8/exec.c",
+ "arch/armv8/misc.c",
+ "arch/armv8/exn.c",
+ "arch/armv8/paging.c",
+ "arch/armv8/startup_arch.c",
+ "arch/armv8/kludges.c",
+ "arch/armv8/gem5/init.c",
+ "arch/armv8/gem5/paging_gem5.c",
+ "arch/armv8/gem5/platform.c",
+ "arch/armv8/syscall.c",
+ "arch/arm/gem5/gem5_serial.c",
+ "arch/arm/gem5/start_aps.c",
+ "arch/arm/pl011_uart.c",
+ "arch/arm/gic.c",
+ "arch/arm/irq.c",
+ "arch/arm/multiboot.c",
+ "arch/arm/kputchar.c" ],
mackerelDevices = [ "arm",
"arm_icp_pit",
+ "arm_icp_pic0",
"pl011_uart",
"pl130_gic",
"sp804_pit",
cFiles = [ "arch/arm/exn.c",
"arch/arm/exec.c",
"arch/arm/misc.c",
- "arch/arm/phys_mmap.c",
+ "arch/armv5/phys_mmap.c",
"arch/arm/syscall.c",
-- according to the Xscale documentation, the MMU is a
-- standard ARMv5 design, so we should be able to use
"arch/xscale/ixp2800_integrator.c",
"arch/xscale/ixp2800_uart.c",
"arch/xscale/ixp2800_paging.c",
- "arch/xscale/kputchar.c" ],
+ "arch/arm/kputchar.c" ],
mackerelDevices = [ "arm",
"ixp2800_icp_pit",
"ixp2800_icp_pic0",
"arch/arm/misc.c",
"arch/arm/exn.c",
"arch/arm/syscall.c",
- "arch/arm/phys_mmap.c",
- "arch/armv7/gic.c",
+ "arch/arm/gic.c",
"arch/armv7/kludges.c",
- "arch/armv7/multiboot.c",
"arch/armv7/paging.c",
- "arch/armv7/irq.c",
+ "arch/arm/irq.c",
"arch/omap44xx/init.c",
"arch/omap44xx/omap.c",
"arch/omap44xx/paging.c",
"arch/omap44xx/start_aps.c",
"arch/omap44xx/spinlock.c",
"arch/omap44xx/cortexm3_heteropanda.c", --will be empty if heteropanda = False
- "arch/armv7/kputchar.c"],
+ "arch/arm/multiboot.c",
+ "arch/arm/kputchar.c"],
mackerelDevices = [ "arm",
"arm_icp_pit",
"pl130_gic",
"arch/armv7-m/exceptions.S" ],
cFiles = [
"arch/arm/misc.c",
- "arch/arm/phys_mmap.c",
"arch/arm/syscall.c",
"arch/armv7/kludges.c",
- "arch/armv7/multiboot.c",
"arch/armv7-m/exec.c",
"arch/armv7-m/exn.c",
"arch/armv7-m/init.c",
"arch/omap44xx/startup_arch.c",
"arch/omap44xx/omap_uart.c",
-- "arch/omap44xx/start_aps.c",
- "arch/armv7/kputchar.c",
+ "arch/arm/kputchar.c",
+ "arch/arm/multiboot.c",
"arch/omap44xx/spinlock.c"
],
mackerelDevices = [ "arm",
cpuDriver {
target = "apm88xxxx",
architectures = [ "armv8" ],
- assemblyFiles = [ "arch/apm88xxxx/boot.S",
- "arch/armv8/exceptions.S"
+ assemblyFiles = [ "arch/armv8/exceptions.S"
],
cFiles = [ "arch/armv8/exec.c",
"arch/armv8/exn.c",
- "arch/armv8/irq.c",
+ "arch/arm/irq.c",
"arch/armv8/kludges.c",
- "arch/armv8/kputchar.c",
+ "arch/arm/kputchar.c",
+ "arch/arm/multiboot.c",
"arch/armv8/misc.c",
- "arch/apm88xxxx/init.c",
- "arch/apm88xxxx/paging.c",
- "arch/apm88xxxx/uart.c" ],
+ "arch/armv8/apm88xxxx/init.c",
+ "arch/armv8/apm88xxxx/paging.c",
+ "arch/armv8/apm88xxxx/uart.c" ],
mackerelDevices = [ "arm",
"apm88xxxx/apm88xxxx_pc16550"
],
+++ /dev/null
-#include <kernel.h>
-#include <serial.h>
-#include <uefi_mmap.h>
-#include <sysreg.h>
-#include <multiboot.h>
-#include <paging_kernel_arch.h>
-
-/*
- * Create kernel page tables (high 256G)
- * We use GB sections (level 1 entries that point to memory)
- */
-static void paging_init(void)
-{
- return;
-}
-
-static void paging_dump(void)
-{
- union armv8_ttable_entry *lvl0 =
- (union armv8_ttable_entry *)sysreg_read_ttbr0();
- for (int i = 0; i < 512; i++) {
- union armv8_ttable_entry *entry0 = lvl0 + i;
- if (entry0->d.valid && entry0->d.mb1) {
- printf("%d: level 1 table @%lx\n", i, (entry0->d.base)<<BASE_PAGE_BITS);
- union armv8_ttable_entry *lvl1 =
- (union armv8_ttable_entry *)((uint64_t)(entry0->d.base)<<BASE_PAGE_BITS);
- for (int j = 0; j < 512; j++) {
- union armv8_ttable_entry *entry1 = lvl1 + j;
- if (entry1->d.valid && entry1->d.mb1) {
- printf(" %d: level 2 table @%lx\n", j, (entry1->d.base)<<BASE_PAGE_BITS);
- } else if (entry1->block_l1.valid) {
- printf(" %d: level 1 block @%lx\n", j,
- (entry1->block_l1.base) << HUGE_PAGE_BITS);
- }
- }
- }
- }
-}
-
-bool is_bsp = true;
-
-__attribute__((noreturn))
-void arch_init(void *pointer, EFI_MEMORY_DESCRIPTOR *uefi_mmap);
-// Currently
-void arch_init(void *pointer, EFI_MEMORY_DESCRIPTOR *uefi_mmap)
-{
- // uncomment line below to force wait to attach gdb here
- // __asm volatile ("wfi":::);
-
- // set both console ports: UART0 is the one that's connected to the DB9
- // connector on the back of the mustang boxes.
- serial_console_port = 0;
- serial_debug_port = 0;
-
- // init serial console, skip hwinit, as the port is guaranteed to be
- // initialized by UEFI.
- serial_console_init(false);
-
- if (is_bsp) {
- printf("ACPI root table (RSDP): %p\n", pointer);
- printf("UEFI memory map pointer: %p\n", uefi_mmap);
-
- printf("First memory map entry:\n");
- printf(" Type: %x\n", uefi_mmap->Type);
- printf(" PhysStart: 0x%lx\n", uefi_mmap->PhysicalStart);
- printf(" VirtStart: 0x%lx\n", uefi_mmap->VirtualStart);
- printf(" #pages: %lu\n", uefi_mmap->NumberOfPages);
- printf(" Attrs: %lx\n", uefi_mmap->Attribute);
-
- struct multiboot_info *mb = pointer;
- mb = mb;
-
- // TODO: finish BSP core init
- } else {
- // TODO: AP core init
- }
-
- // print something
- printf("Barrelfish APM88xxxx CPU driver starting at addr 0x%"
- PRIxLVADDR" on core %"PRIuCOREID"\n",
- local_phys_to_mem((lpaddr_t)&kernel_first_byte), my_core_id);
-
- paging_dump();
- paging_init();
-
- while(1) {
- __asm volatile ("wfi":::);
- }
-}
#include <paging_kernel_arch.h>
#include <serial.h>
#include <dev/pl011_uart_dev.h>
-#include <pl011_uart.h>
+#include <arch/arm/pl011_uart.h>
#define NUM_PORTS 2
unsigned serial_console_port = 0;
#define UART_DEVICE_BYTES 0x4c
#define UART_MAPPING_DIFF 0x1000
-static pl011_uart_t ports[NUM_PORTS];
+//static pl011_uart_t ports[NUM_PORTS];
+pl011_uart_t ports[NUM_PORTS];
/*
* Initialize a serial port
#include <kernel.h>
#include <stdio.h>
#include <string.h>
-#include <arch/armv7/start_aps.h>
#include <arm_hal.h>
+#include <start_aps.h>
#define STARTUP_TIMEOUT 0xffffff
*/
int start_aps_arm_start(coreid_t core_id, genvaddr_t entry)
{
+ panic("NYI");
+#if 0
//write entry address of new kernel to SYSFLAG reg
write_sysflags_reg(entry);
//raise SWI to signal app core to start
gic_raise_softirq((1 << core_id), 1);
+#endif
return 0;
}
#include <dev/pl130_gic_dev.h>
#include <arm_hal.h>
-#include <gic.h>
+#include <arch/arm/gic.h>
static pl130_gic_t gic;
static uint32_t it_num_lines;
*/
void gic_init(void)
{
- // this function is implemented in armv7 platform-specific code
+ // this function is implemented in platform-specific code
gic_map_and_init(&gic);
// read GIC configuration
#include <serial.h>
#include <arm_hal.h>
#include <cp15.h>
-#include <io.h>
#include <gic.h>
//hardcoded bc gem5 doesn't set board id in ID_Register
#include <kernel.h>
#include <stdio.h>
#include <string.h>
-#include <arm.h>
+#include <arch/arm/arm.h>
#include <barrelfish_kpi/lmp.h>
#include <barrelfish_kpi/syscalls.h>
#include <barrelfish_kpi/sys_debug.h>
#include <arch/armv7/arm_hal.h>
-#include <arch/armv7/start_aps.h>
#include <arch/armv7/irq.h>
#include <paging_kernel_arch.h>
#include <syscall.h>
#include <arch/arm/syscall_arm.h>
#include <kcb.h>
-#include <arch/armv7/start_aps.h>
-
#define GIC_IRQ_PRIO_LOWEST (0xF)
#define GIC_IRQ_CPU_TRG_ALL (0x3) // For two cores on the PandaBoard
-/**\r
- * \file\r
- * \brief The world's simplest serial driver.\r
- *\r
- */\r
-\r
-/*\r
- * Copyright (c) 2010, ETH Zurich.\r
- * All rights reserved.\r
- *\r
- * This file is distributed under the terms in the attached LICENSE file.\r
- * If you do not find this file, copies can be found by writing to:\r
- * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.\r
- */\r
-\r
-#include <serial.h>\r
-#include <kputchar.h>\r
-#include <global.h>\r
-\r
-#ifdef __pandaboard__ //use the spinlock module\r
-#include <spinlock.h>\r
-#endif\r
-\r
-\r
-#define KPBUFSZ 256\r
-static char kputbuf[KPBUFSZ];\r
-static int kcount = 0;\r
-\r
-static void kflush(void)\r
-{\r
- for(int i=0; i<kcount; i++)\r
- serial_console_putchar(kputbuf[i]);\r
- kcount = 0;\r
-}\r
-\r
-void kprintf_begin(void)\r
-{\r
-#ifdef __pandaboard__ //use hardware spinlock module\r
- spinlock_aquire(PRINTF_LOCK);\r
-#else\r
- //acquire_spinlock(&global->locks.print);\r
-#endif \r
- kcount = 0;\r
-}\r
-\r
-int kputchar(int c)\r
-{\r
- kputbuf[kcount++] = c;\r
- if (kcount == KPBUFSZ || c == '\n')\r
- kflush();\r
- return c;\r
-}\r
-\r
-void kprintf_end(void)\r
-{\r
- kflush();\r
-#ifdef __pandaboard__ //use hardware spinlock module\r
- spinlock_release(PRINTF_LOCK);\r
-#else\r
- //release_spinlock(&global->locks.print);\r
-#endif\r
-}\r
-\r
-// End\r
+/**
+ * \file
+ * \brief The world's simplest serial driver.
+ *
+ */
+
+/*
+ * Copyright (c) 2010, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#include <serial.h>
+#include <kputchar.h>
+#include <global.h>
+
+#ifdef __pandaboard__ //use the spinlock module
+#include <spinlock.h>
+#endif
+
+
+#define KPBUFSZ 256
+static char kputbuf[KPBUFSZ];
+static int kcount = 0;
+
+static void kflush(void)
+{
+ for(int i=0; i<kcount; i++)
+ serial_console_putchar(kputbuf[i]);
+ kcount = 0;
+}
+
+void kprintf_begin(void)
+{
+#ifdef __pandaboard__ //use hardware spinlock module
+ spinlock_aquire(PRINTF_LOCK);
+#else
+ //acquire_spinlock(&global->locks.print);
+#endif
+ kcount = 0;
+}
+
+int kputchar(int c)
+{
+ kputbuf[kcount++] = c;
+ if (kcount == KPBUFSZ || c == '\n')
+ kflush();
+ return c;
+}
+
+void kprintf_end(void)
+{
+ kflush();
+#ifdef __pandaboard__ //use hardware spinlock module
+ spinlock_release(PRINTF_LOCK);
+#else
+ //release_spinlock(&global->locks.print);
+#endif
+}
+
+// End
*/
#include <kernel.h>
-#include <arm.h>
+#include <arch/arm/arm.h>
#include <dev/pl011_uart_dev.h>
-#include <pl011_uart.h>
+#include <arch/arm/pl011_uart.h>
#define INTERRUPTS_MASK 0x0070
#include <exec.h>
#include <offsets.h>
#include <paging_kernel_arch.h>
-#include <phys_mmap.h>
#include <serial.h>
#include <stdio.h>
#include <arm_hal.h>
*/
//XXX: We reserve double the space needed to be able to align the pagetable
// to 16K after relocation
-static union arm_l1_entry kernel_l1_table[2*ARM_L1_MAX_ENTRIES]
+static union arm_l1_entry kernel_l1_table[2*PTABLE_NUM_ENTRIES]
__attribute__((aligned(ARM_L1_ALIGN)));
static union arm_l1_entry *aligned_kernel_l1_table;
/**
*/
//XXX: We reserve double the space needed to be able to align the pagetable
// to 1K after relocation
-static union arm_l2_entry low_l2_table[2*ARM_L2_MAX_ENTRIES]
+static union arm_l2_entry low_l2_table[2*PTABLE_NUM_ENTRIES]
__attribute__((aligned(ARM_L2_ALIGN)));
static union arm_l2_entry *aligned_low_l2_table;
#include <exec.h>
#include <offsets.h>
#include <paging_kernel_arch.h>
-#include <phys_mmap.h>
+#include <arch/armv5/phys_mmap.h>
#include <serial.h>
#include <stdio.h>
#include <arm_hal.h>
+++ /dev/null
-/*
- * Copyright (c) 2007, 2009, ETH Zurich.
- * All rights reserved.
- *
- * This file is distributed under the terms in the attached LICENSE file.
- * If you do not find this file, copies can be found by writing to:
- * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
- */
-
-#include <kernel.h>
-#include <paging_kernel_arch.h>
-
-#include <dev/pl011_uart_dev.h>
-#include <dev/arm_icp_pic0_dev.h>
-#include <dev/arm_icp_pit_dev.h>
-
-#include <pl011_uart.h>
-#include <serial.h>
-#include <arm_hal.h>
-
-uint32_t hal_get_board_id(void)
-{
- return 0x113;
-}
-
-uint8_t hal_get_cpu_id(void)
-{
- return 0;
-}
-
-bool hal_cpu_is_bsp(void)
-{
- return true;
-}
-
-//
-// Interrupt controller
-//
-
-static arm_icp_pic0_source_un pic_primary_irqs;
-static arm_icp_pic0_t pic;
-
-void pic_init(void)
-{
- static const uintptr_t PIC_BASE = 0x14000000;
-
- arm_icp_pic0_source_t sources = {
- .SOFTINT = 1, .UARTINT0 = 1, .UARTINT1 = 1, .KBDINT = 1,
- .MOUSEINT = 1, .TIMERINT0 = 1, .TIMERINT1 = 1, .TIMERINT2 = 1,
- .RTCINT = 1, .LM_LLINT0 = 1, .LM_LLINT1 = 1, .CLCDCINT = 1,
- .MMCIINT0 = 1, .MMCIINT1 = 1, .AACIINT = 1, .CPPLDINT = 1,
- .ETH_INT = 1, .TS_PENINT = 1
- };
- pic_primary_irqs.val = sources;
-
- lvaddr_t pic_base = paging_map_device(PIC_BASE, 0x00100000);
- arm_icp_pic0_initialize(&pic, (mackerel_addr_t)pic_base);
-
- pic_disable_all_irqs();
-}
-
-void pic_set_irq_enabled(uint32_t irq, bool en)
-{
- uint32_t m = 1u << irq;
- if (irq < 32 && (pic_primary_irqs.raw & m) == m) {
- if (en) {
- m |= arm_icp_pic0_PIC_IRQ_ENABLESET_rd_raw(&pic);
- arm_icp_pic0_PIC_IRQ_ENABLESET_wr_raw(&pic, m);
- }
- else {
- arm_icp_pic0_PIC_IRQ_ENABLECLR_wr_raw(&pic, m);
- }
- }
- else {
- panic("Unknown IRQ source %"PRIu32, irq);
- }
-}
-
-void pic_disable_all_irqs(void)
-{
- arm_icp_pic0_PIC_IRQ_ENABLECLR_wr_raw(&pic, pic_primary_irqs.raw);
-}
-
-uint32_t pic_get_active_irq(void)
-{
- uint32_t status = arm_icp_pic0_PIC_IRQ_STATUS_rd_raw(&pic);
- uint32_t irq;
-
- for (irq = 0; irq < 32; irq++) {
- if (0 != (status & (1u << irq))) {
- return irq;
- }
- }
- return ~0ul;
-}
-
-void pic_ack_irq(uint32_t irq)
-{
- // From the ARM specs it looks as if just clearing the interrupt at the
- // peripheral will clear the interrupt. No explicit EOI.
-}
-
-//
-// Kernel timer and tsc
-//
-
-static const uintptr_t PIT_BASE = 0x13000000;
-static const uint32_t PIT_IRQ = 6;
-
-static arm_icp_pit_t pit;
-
-static lvaddr_t pit_map_resources(void)
-{
- static lvaddr_t timer_base = 0;
- if (timer_base == 0) {
- timer_base = paging_map_device(PIT_BASE, 0x100000);
- }
- return timer_base;
-}
-
-void pit_init(uint32_t tick_hz)
-{
- // PIT uses timer 1 (hardcoded to 1MHz)
- arm_icp_pit_LOAD_t load = { .value = 1000000 / tick_hz };
- arm_icp_pit_CONTROL_t control = {
- .oneshot = 0, .timer32 = 1, .prescale = arm_icp_pit_none,
- .int_enable = 0, .mode = arm_icp_pit_reload, .enable = 0
- };
-
- lvaddr_t timer_base = pit_map_resources();
-
- arm_icp_pit_initialize(&pit, (mackerel_addr_t)(timer_base + 0x100));
- arm_icp_pit_LOAD_wr(&pit, load);
- arm_icp_pit_CONTROL_wr(&pit, control);
-
- pic_set_irq_enabled(PIT_IRQ, 1);
-}
-
-void pit_start(void)
-{
- arm_icp_pit_CONTROL_t control = arm_icp_pit_CONTROL_rd(&pit);
- control.int_enable = 1;
- control.enable = 1;
- arm_icp_pit_CONTROL_wr(&pit, control);
-}
-
-bool pit_handle_irq(uint32_t irq)
-{
- if (PIT_IRQ == irq) {
- arm_icp_pit_INTCLR_wr_raw(&pit, ~0ul);
- return 1;
- }
- else {
- return 0;
- }
-}
-
-void pit_mask_irq(bool masked)
-{
- arm_icp_pit_CONTROL_t control = arm_icp_pit_CONTROL_rd(&pit);
- if (masked) {
- control.int_enable = 0;
- }
- else {
- control.int_enable = 1;
- }
- arm_icp_pit_CONTROL_wr(&pit, control);
-
- if (masked) {
- // Clear interrupt if pending.
- pit_handle_irq(PIT_IRQ);
- }
-}
-
-//
-// TSC uses timer 0 (assuming 40MHz for QEMU)
-//
-static const uint32_t tsc_hz = 40000000;
-static arm_icp_pit_t tsc;
-
-void tsc_init(void)
-{
- arm_icp_pit_LOAD_t load = { .value = ~0ul };
- arm_icp_pit_CONTROL_t control = {
- .oneshot = 0, .timer32 = 1, .prescale = arm_icp_pit_none,
- .int_enable = 0, .mode = arm_icp_pit_reload, .enable = 1
- };
- pit_map_resources();
-
- arm_icp_pit_initialize(&tsc, (mackerel_addr_t)pit_map_resources());
- arm_icp_pit_LOAD_wr(&tsc, load);
- arm_icp_pit_CONTROL_wr(&tsc, control);
-}
-
-uint32_t tsc_read(void)
-{
- // Timers count down so invert it.
- return ~arm_icp_pit_CURRENT_rd_raw(&tsc);
-}
-
-uint32_t tsc_get_hz(void)
-{
- return tsc_hz;
-}
-
-//
-// Serial console and debugger interfaces
-//
-
-#define NUM_PORTS 2
-unsigned serial_console_port = 0;
-unsigned serial_debug_port = 1;
-const unsigned serial_num_physical_ports = NUM_PORTS;
-
-
-#define UART0_VBASE 0xE0009000
-#define UART0_SECTION_OFFSET 0x9000
-#define UART_DEVICE_BYTES 0x4c
-#define UART_MAPPING_DIFF 0x1000
-
-static pl011_uart_t ports[NUM_PORTS];
-
-errval_t serial_init(unsigned port, bool hwinit)
-{
- if (port < NUM_PORTS) {
- assert(ports[port].base == 0);
-
- lvaddr_t base = paging_map_device(0x16000000ul + port * 0x01000000,
- 0x00100000);
-
- if (hwinit) {
- pl011_uart_init(&ports[port], base);
- }
- return SYS_ERR_OK;
- }
- else {
- return SYS_ERR_SERIAL_PORT_INVALID;
- }
-}
-errval_t serial_early_init(unsigned port)
-{
- return SYS_ERR_OK; // Unused
-}
-
-void serial_putchar(unsigned port, char c)
-{
- assert(port < NUM_PORTS);
- assert(ports[port].base != 0);
- pl011_putchar(&ports[port], c);
-};
-
-char serial_getchar(unsigned port)
-{
- assert(port < NUM_PORTS);
- assert(ports[port].base != 0);
- return pl011_getchar(&ports[port]);
-};
+++ /dev/null
-/**\r
- * \file\r
- * \brief The world's simplest serial driver.\r
- *\r
- */\r
-\r
-/*\r
- * Copyright (c) 2010, ETH Zurich.\r
- * All rights reserved.\r
- *\r
- * This file is distributed under the terms in the attached LICENSE file.\r
- * If you do not find this file, copies can be found by writing to:\r
- * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.\r
- */\r
-\r
-#include <serial.h>\r
-#include <kputchar.h>\r
-\r
-#define KPBUFSZ 256\r
-static char kputbuf[KPBUFSZ];\r
-static int kcount = 0;\r
-\r
-static void kflush(void)\r
-{\r
- for(int i=0; i<kcount; i++)\r
- serial_console_putchar(kputbuf[i]);\r
- kcount = 0;\r
-}\r
-\r
-void kprintf_begin(void)\r
-{\r
- kcount = 0;\r
-}\r
-\r
-int kputchar(int c)\r
-{\r
- kputbuf[kcount++] = c;\r
- if (kcount == KPBUFSZ || c == '\n')\r
- kflush();\r
- return c;\r
-}\r
-\r
-void kprintf_end(void)\r
-{\r
- kflush();\r
-}\r
-\r
-// End\r
#include <assert.h>
#include <stddef.h>
#include <barrelfish_kpi/types.h>
-#include <phys_mmap.h>
+#include <arch/armv5/phys_mmap.h>
#include <stdio.h>
#include <cp15.h>
#include <cpiobin.h>
#include <init.h>
-#include <phys_mmap.h>
+#include <arch/armv5/phys_mmap.h>
#include <barrelfish_kpi/paging_arm_v5.h>
#include <startup.h>
#include <kcb.h>
#include <exec.h>
#include <offsets.h>
#include <paging_kernel_arch.h>
-#include <phys_mmap.h>
#include <serial.h>
#include <spinlock.h>
#include <stdio.h>
--- /dev/null
+#include <kernel.h>
+#include <serial.h>
+#include <uefi_mmap.h>
+#include <sysreg.h>
+#include <multiboot2.h>
+#include <paging_kernel_arch.h>
+
+/* XXX - shouldn't be here. */
+bool is_bsp;
+
+__attribute__((noreturn))
+void plat_init(uint32_t magic, void *pointer);
+
+/* Hagfish guarantees us the following on calling plat_init():
+ * Single core running (not guaranteed to be core 0)
+ * CPU is in highest non-secure privilege mode: EL2 or EL1
+ * MMU enabled, 4k translation granule, 1:1 mapping of all RAM, using TTBR0.
+ * Little-endian mode
+ * Core caches (L1&L2) and TLB enabled
+ * Non-architectural caches disabled
+ * Interrupts enabled
+ * Generic timer initialized and enabled
+ * >= 128KiB stack
+ * ACPI tables available
+ * Register x0 contains handle to ACPI root table
+ * Register x1 contains a pointer to the UEFI memory map
+ */
+
+void plat_init(uint32_t magic, void *pointer) {
+ struct multiboot_header *mb= NULL;
+
+ /* Uncomment the line below to wait here for GDB. */
+ /* __asm volatile ("wfi":::); */
+
+ /* Set both console ports: UART0 is the one that's connected to the DB9
+ connector on the back of the Mustang boxes. */
+ /* XXX - we should get EFI to tell us this. */
+ serial_console_port= 0;
+ serial_debug_port= 0;
+
+ /* Initialise the serial console. Skip hardware initialisation, as the
+ port is guaranteed to have been initialized by UEFI. */
+ serial_console_init(false);
+
+ switch(magic) {
+ case MULTIBOOT2_BOOTLOADER_MAGIC:
+ is_bsp= true;
+ mb= (struct multiboot_header *)pointer;
+ break;
+ default:
+ is_bsp= false;
+ panic("Implement AP booting!");
+ break;
+ }
+
+ if (is_bsp) {
+ // TODO: finish BSP core init
+ } else {
+ // TODO: AP core init
+ panic("AP init");
+ }
+
+ printf("Barrelfish APM88xxxx CPU driver starting at addr 0x%"
+ PRIxLVADDR" on core %"PRIuCOREID"\n",
+ local_phys_to_mem((lpaddr_t)&kernel_first_byte), my_core_id);
+
+ while(1) {
+ __asm volatile ("wfi":::);
+ }
+}
/**
* \file
- * \brief AArch64 execution and miscellany
+ * \brief ARM execution and miscellany
*/
/*
- * Copyright (c) 2015, ETH Zurich.
+ * Copyright (c) 2007-2009, ETH Zurich.
+ * Copyright (c) 2015, Hewlett Packard Enterprise Development LP.
* All rights reserved.
*
* This file is distributed under the terms in the attached LICENSE file.
#include <kernel.h>
#include <dispatch.h>
#include <init.h>
-#include <aarch64.h>
+#include <arch/arm/arm.h>
#include <arm_hal.h>
#include <exec.h>
+#include <exceptions.h>
#include <misc.h>
#include <sysreg.h> // for invalidating tlb and cache
-//static arch_registers_state_t upcall_state;
+static arch_registers_state_t upcall_state;
extern uint32_t ctr;
static inline __attribute__((noreturn))
-void do_resume(uint32_t *regs)
+void do_resume(uint64_t *regs)
{
- panic("NYI");
+ STATIC_ASSERT(SPSR_REG == 0, "");
+ STATIC_ASSERT(X0_REG == 1, "");
+ STATIC_ASSERT(PC_REG == 33, "");
+
+ // Flush cashes and tlb
+ sysreg_invalidate_tlb();
+ sysreg_invalidate_i_and_d_caches();
+
+ __asm volatile(
+ "clrex\n\t"
+ /* Restore cpsr condition bits */
+ " mov x30, %[regs] \n\t"
+ " ldr x2, [x30, #(" XTR(SP_REG) " * 8)] \n\t"
+ " mov sp, x2 \n\t"
+ " ldr x2, [x30, # (" XTR(PC_REG) " * 8)] \n\t"
+ " msr elr_el1, x2 \n\t"
+ " ldr x2, [x30], #8 \n\t"
+ /*" msr spsr_el1, x2 \n\t"*/
+ /* Restore registers */
+ " ldp x0, x1, [x30], #16 \n\t"
+ " ldp x2, x3, [x30], #16 \n\t"
+ " ldp x4, x5, [x30], #16 \n\t"
+ " ldp x6, x7, [x30], #16 \n\t"
+ " ldp x8, x9, [x30], #16 \n\t"
+ " ldp x10, x11, [x30], #16 \n\t"
+ " ldp x12, x13, [x30], #16 \n\t"
+ " ldp x14, x15, [x30], #16 \n\t"
+ " ldp x16, x17, [x30], #16 \n\t"
+ " ldp x18, x19, [x30], #16 \n\t"
+ " ldp x20, x21, [x30], #16 \n\t"
+ " ldp x22, x23, [x30], #16 \n\t"
+ " ldp x24, x25, [x30], #16 \n\t"
+ " ldp x26, x27, [x30], #16 \n\t"
+ " ldp x28, x29, [x30], #16 \n\t"
+ " ldr x30, [x30], #8 \n\t"
+ " eret \n\t"
+ :: [regs] "r" (regs) : "x30");
+
+ panic("do_resume returned.");
}
/// Ensure context is for user-mode with interrupts enabled.
static inline void
ensure_user_mode_policy(arch_registers_state_t *state)
{
- panic("NYI");
+ uintptr_t cpsr_if_mode = CPSR_F_MASK | AARCH64_MODE_USR;
+
+ if ((state->named.spsr & (CPSR_IF_MASK | AARCH64_MODE_MASK)) != cpsr_if_mode) {
+ assert(0 == (state->named.spsr & AARCH64_MODE_PRIV));
+ state->named.spsr &= CPSR_IF_MASK | AARCH64_MODE_MASK;
+ state->named.spsr |= cpsr_if_mode;
+ }
}
/**
void __attribute__ ((noreturn))
execute(lvaddr_t entry)
{
- panic("NYI");
+ dispatcher_handle_t handle = dcb_current->disp;
+ struct dispatcher_shared_aarch64 *disp_aarch64 =
+ get_dispatcher_shared_aarch64(handle);
+
+ arch_registers_state_t *state = &upcall_state;
+ assert(0 != disp_aarch64->got_base);
+
+ state->named.x10 = disp_aarch64->got_base;
+
+ struct dispatcher_shared_generic *disp_gen
+ = get_dispatcher_shared_generic(handle);
+
+ state->named.rtls = disp_gen->udisp;
+
+ state->named.pc = entry;
+ ensure_user_mode_policy(state);
+ do_resume(state->regs);
}
/**
* This function resumes user-space execution by restoring the CPU
* registers with the ones given in the array, pointed to by 'state'.
*/
+uint32_t ctr=0;
void __attribute__ ((noreturn)) resume(arch_registers_state_t *state)
{
- panic("NYI");
+ ctr++;
+ state->named.rtls = arch_get_thread_register();
+ ensure_user_mode_policy(state);
+
+ //printf("thread reg..%p\n",state->named.rtls);
+ /*
+ This function succeeds the first time executed, i.e.
+ when init is started for the first time.
+ If we hold the execution here after the first execption, we are still good
+ */
+ // while(ctr>1);
+ do_resume(state->regs);
}
void wait_for_interrupt(void)
{
- panic("NYI");
+ // REVIEW: Timer interrupt could be masked here.
+
+ // Switch to system mode with interrupts enabled. -- OLD
+ // Switch to priviledged mode with interrupts enabled.
+ __asm volatile(
+ "mov x0, #" XTR(AARCH64_MODE_PRIV) " \n\t"
+ "0: \n\t"
+#if defined(__ARM_ARCH_8A__)
+ "wfi \n\t"
+#else
+ // If no WFI functionality exists on system, just
+ // spinning here is okay.
+#error "Unknown platform for wait_for_interrupt"
+#endif //
+ "b 0b \n\t" ::: "r0");
+
+ panic("wfi returned");
}
/*
- * Copyright (c) 2015, ETH Zurich.
+ * Copyright (c) 2009-2013 ETH Zurich.
+ * Copyright (c) 2015, Hewlett Packard Enterprise Development LP.
* All rights reserved.
*
* This file is distributed under the terms in the attached LICENSE file.
* If you do not find this file, copies can be found by writing to:
* ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
*/
+
+#include <kernel.h>
+#include <dispatch.h>
+#include <arch/arm/arm.h>
+#include <arm_hal.h>
+#include <sysreg.h>
#include <exceptions.h>
+#include <exec.h>
+#include <misc.h>
#include <stdio.h>
+#include <wakeup.h>
+#include <irq.h>
+
+void handle_user_page_fault(lvaddr_t fault_address,
+ arch_registers_state_t* save_area)
+{
+ lvaddr_t handler;
+ struct dispatcher_shared_aarch64 *disp =
+ get_dispatcher_shared_aarch64(dcb_current->disp);
+ uintptr_t saved_pc = save_area->named.pc;
+
+ disp->d.disabled = dispatcher_is_disabled_ip(dcb_current->disp, saved_pc);
+ bool disabled = (disp->d.disabled != 0);
+
+ assert(dcb_current->disp_cte.cap.type == ObjType_Frame);
+
+ printk(LOG_WARN, "user page fault%s in '%.*s': addr %"PRIxLVADDR
+ " IP %"PRIxPTR"\n",
+ disabled ? " WHILE DISABLED" : "", DISP_NAME_LEN,
+ disp->d.name, fault_address, saved_pc);
+
+ if (disabled) {
+ assert(save_area == &disp->trap_save_area);
+ handler = disp->d.dispatcher_pagefault_disabled;
+ dcb_current->faults_taken++;
+ }
+ else {
+ assert(save_area == &disp->enabled_save_area);
+ handler = disp->d.dispatcher_pagefault;
+ }
+
+ if (dcb_current->faults_taken > 2) {
+ printk(LOG_WARN, "handle_user_page_fault: too many faults, "
+ "making domain unrunnable\n");
+ dcb_current->faults_taken = 0; // just in case it gets restarted
+ scheduler_remove(dcb_current);
+ dispatch(schedule());
+ }
+ else {
+ //
+ // Upcall to dispatcher
+ //
+ // NB System might be cleaner with a prototype
+ // dispatch context that has R0-R3 to be overwritten
+ // plus initial stack, thread, and gic registers. Could do
+ // a faster resume_for_upcall().
+ //
+
+ struct dispatcher_shared_generic *disp_gen =
+ get_dispatcher_shared_generic(dcb_current->disp);
+
+ union registers_aarch64 resume_area;
+
+ resume_area.named.spsr = CPSR_F_MASK | AARCH64_MODE_USR;
+ resume_area.named.pc = handler;
+ resume_area.named.x0 = disp_gen->udisp;
+ resume_area.named.x1 = fault_address;
+ resume_area.named.x2 = 0;
+ resume_area.named.x3 = saved_pc;
+ resume_area.named.rtls = disp_gen->udisp;
+ resume_area.named.x10 = disp->got_base;
+
+ // SP is set by handler routine.
+
+ // Upcall user to save area
+ disp->d.disabled = true;
+ printk(LOG_WARN,"page fault at %p calling handler %p\n",fault_address,handler);
+ resume(&resume_area);
+ }
+}
+
+void handle_user_undef(lvaddr_t fault_address,
+ arch_registers_state_t* save_area)
+{
+ union registers_aarch64 resume_area;
+
+ struct dispatcher_shared_aarch64 *disp =
+ get_dispatcher_shared_aarch64(dcb_current->disp);
+
+ bool disabled = dispatcher_is_disabled_ip(dcb_current->disp, save_area->named.pc);
+ disp->d.disabled = disabled;
+
+ assert(dcb_current->disp_cte.cap.type == ObjType_Frame);
+ if (disabled) {
+ // assert(save_area == &disp->trap_save_area);
+ }
+ else {
+ assert(save_area == &disp->enabled_save_area);
+ }
+
+ printk(LOG_WARN, "user undef fault%s in '%.*s': IP %" PRIuPTR "\n",
+ disabled ? " WHILE DISABLED" : "", DISP_NAME_LEN,
+ disp->d.name, fault_address);
+
+ struct dispatcher_shared_generic *disp_gen =
+ get_dispatcher_shared_generic(dcb_current->disp);
+
+ resume_area.named.spsr = CPSR_F_MASK | AARCH64_MODE_USR;
+ resume_area.named.pc = disp->d.dispatcher_trap;
+ resume_area.named.x0 = disp_gen->udisp;
+ resume_area.named.x1 = AARCH64_EVECTOR_UNDEF;
+ resume_area.named.x2 = 0;
+ resume_area.named.x3 = fault_address;
+ resume_area.named.rtls = disp_gen->udisp;
+ resume_area.named.x10 = disp->got_base;
+
+ // Upcall user to save area
+ disp->d.disabled = true;
+ resume(&resume_area);
+}
-void handle_irq(void)
+static int32_t bkpt_decode(lvaddr_t fault_address)
{
- printf("IRQ handler\n");
- return;
+ int32_t bkpt_id = -1;
+ if ((fault_address & 3) == 0 && fault_address >= KERNEL_OFFSET) {
+ const uint32_t bkpt_mask = 0xfff000f0;
+ const uint32_t bkpt_isn = 0xe1200070;
+
+ uintptr_t isn = *((uintptr_t*)fault_address);
+ if ((isn & bkpt_mask) == bkpt_isn) {
+ bkpt_id = (int32_t)((isn & 0xf) | ((isn & 0xfff00) >> 4));
+ }
+ }
+ return bkpt_id;
}
-void page_fault(void *exn_frame)
+void fatal_kernel_fault(uint32_t evector, lvaddr_t address, arch_registers_state_t* save_area
+ )
{
- printf("PF handler: %p\n", exn_frame);
- return;
+ int i;
+ printk(LOG_PANIC, "Kernel fault at %"PRIxLVADDR
+ " vector %"PRIx64"\n\n", address, evector);
+ printk(LOG_PANIC, "Processor save_area at: %p\n", save_area);
+
+ for (i = 0; i < 16; i++) {
+ const char *extrainfo = "";
+
+ switch(i) {
+ case 13:
+ extrainfo = "\t(sp)";
+ break;
+
+ case 14:
+ extrainfo = "\t(lr)";
+ break;
+
+ case 15:
+ {
+ char str[128];
+ snprintf(str, 128, "\t(pc)\t%08lx",
+ save_area->regs[X0_REG + i] -
+ (uint64_t)&kernel_first_byte +
+ 0x100000);
+ extrainfo = str;
+ }
+ break;
+ }
+
+ printk(LOG_PANIC, "x%d\t%"PRIx64"%s\n", i, save_area->regs[X0_REG + i], extrainfo);
+ }
+ printk(LOG_PANIC, "cpsr\t%"PRIx64"\n", save_area->regs[SPSR_REG]);
+ printk(LOG_PANIC, "called from: %#lx\n",
+ (lvaddr_t)__builtin_return_address(0) -
+ (lvaddr_t)&kernel_first_byte + 0x100000);
+
+ switch (evector) {
+ case AARCH64_EVECTOR_UNDEF:
+ panic("Undefined instruction.\n");
+ break;
+
+ case AARCH64_EVECTOR_PABT: {
+ int ifsr = sysreg_read_ifsr();
+ if (ifsr == 0) {
+ int bkpt = bkpt_decode(address);
+ if (bkpt >= 0) {
+ panic("Breakpoint: %4x\n", bkpt);
+ }
+ }
+ panic("Prefetch abort: ifsr %08x\n", ifsr);
+ }
+ break;
+
+ case AARCH64_EVECTOR_DABT:
+ {
+ uint32_t dfsr = sysreg_read_dfsr();
+
+ printf("\n");
+
+ if((dfsr >> 11) & 1) {
+ printf("On write access\n");
+ } else {
+ printf("On read access\n");
+ }
+
+ switch((dfsr & 0xf) | (dfsr & 0x400)) {
+ case 1:
+ printf("Alignment fault\n");
+ break;
+
+ case 4:
+ printf("Instruction cache-maintenance fault\n");
+ break;
+
+ case 5:
+ printf("Translation fault on section\n");
+ break;
+
+ case 6:
+ printf("Translation fault on page\n");
+ break;
+
+ case 8:
+ printf("Synchronous external abort\n");
+ break;
+
+ default:
+ printf("Unknown fault\n");
+ break;
+ }
+
+ panic("Data abort: dfsr %08"PRIx32"\n", dfsr);
+ }
+
+ default:
+ panic("Caused by evector: %02"PRIx32, evector);
+ break;
+ }
}
-void handle_sync_abort(uint64_t esr)
+void handle_irq(arch_registers_state_t* save_area, uintptr_t fault_pc)
{
- printf("Sync Abort: %"PRIx64"\n", esr);
- return;
+ uint32_t irq = 0;
+ irq = gic_get_active_irq();
+
+ debug(SUBSYS_DISPATCH, "IRQ %"PRIu32" while %s\n", irq,
+ dcb_current ? (dcb_current->disabled ? "disabled": "enabled") : "in kernel");
+
+ if (dcb_current != NULL) {
+ dispatcher_handle_t handle = dcb_current->disp;
+ if (save_area == dispatcher_get_disabled_save_area(handle)) {
+ assert(dispatcher_is_disabled_ip(handle, fault_pc));
+ dcb_current->disabled = true;
+ } else {
+/* debug(SUBSYS_DISPATCH,
+ "save_area=%p, dispatcher_get_enabled_save_are(handle)=%p\n",
+ save_area, dispatcher_get_enabled_save_area(handle));
+*/
+
+ assert(save_area == dispatcher_get_enabled_save_area(handle));
+ assert(!dispatcher_is_disabled_ip(handle, fault_pc));
+ dcb_current->disabled = false;
+ }
+ }
+
+ if (pit_handle_irq(irq)) {
+ // Timer interrupt, pit_handle_irq acks it at the timer.
+ assert(kernel_ticks_enabled);
+ kernel_now += kernel_timeslice;
+ wakeup_check(kernel_now);
+ dispatch(schedule());
+ }
+ // this is the (still) unacknowledged startup interrupt sent by the BSP
+ // we just acknowledge it here
+ else if(irq == 1)
+ {
+ gic_ack_irq(irq);
+ dispatch(schedule());
+ }
+ else {
+ gic_ack_irq(irq);
+ send_user_interrupt(irq);
+ panic("Unhandled IRQ %"PRIu32"\n", irq);
+ }
+
}
--- /dev/null
+/**
+ * \file
+ * \brief Bootstrap the kernel.
+ */
+
+/*
+ * Copyright (c) 2015, Hewlett Packard Enterprise Development LP.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef __ASSEMBLER__
+#define __ASSEMBLER__ 1
+#endif
+
+#include <barrelfish_kpi/flags_arch.h> // ARM_MODE_MASK
+#include <offsets.h> // BOOT_STACK_PHYS
+
+ .text
+
+ .globl start, halt, got_base
+ .extern kernel_stack, glbl_core_data
+
+ // Used to track phys memory allocator limit globally.
+ alloc_top .req x11
+
+start:
+ // On entry:
+ //
+ // MMU disabled
+ // Caches disabled
+ // CPU is in EL1.
+ //
+ mov x2, x0
+ mov x0, #3 << 20
+ msr cpacr_el1, x0 // FP and ASIMD instructions does not
+ // cause any instruction to be trapped.
+
+ //init stack
+ ldr x0, =kernel_stack
+ add x0, x0, #KERNEL_STACK_SIZE
+ mov sp, x0
+
+ ldr PIC_REGISTER, =got_base
+
+ //prepare argument
+ mov x0, x2
+ b arch_init
+ b halt
+
+
+/**
+ * extern "C" void halt(void) __attribute__((noreturn))
+ */
+halt:
+ b .
+
+.ltorg
+
+got_base:
+ .word // Initialized by linker
+
+ .end
--- /dev/null
+/*
+ * Copyright (c) 2009, ETH Zurich.
+ * Copyright (c) 2015, Hewlett Packard Enterprise Development LP.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#include <kernel.h>
+#include <string.h>
+#include <init.h>
+#include <exceptions.h>
+#include <exec.h>
+#include <offsets.h>
+#include <paging_kernel_arch.h>
+#include <serial.h>
+#include <stdio.h>
+#include <arm_hal.h>
+#include <cpiobin.h>
+#include <getopt/getopt.h>
+#include <sysreg.h>
+#include <elf/elf.h>
+#include <barrelfish_kpi/arm_core_data.h>
+
+#include <startup_arch.h>
+#include <kernel_multiboot.h>
+#include <global.h>
+#include <start_aps.h>
+#include <kcb.h>
+#include <coreboot.h>
+
+#define GEM5_RAM_SIZE (256UL*1024*1024)
+
+/*
+ * Used to store the address of global struct passed during boot across kernel
+ * relocations.
+ */
+//static uint32_t addr_global;
+
+/*
+ * Kernel stack.
+ *
+ * This is the one and only kernel stack for a kernel instance.
+ * ARMv8 requires that the stack is 16-byte aligned.
+ */
+uintptr_t kernel_stack[KERNEL_STACK_SIZE/sizeof(uintptr_t)]
+ __attribute__ ((aligned(16)));
+
+/*
+ * Boot-up L1 page table for addresses up to 2GB (translated by TTBR0)
+ *
+ * We reserve double the space needed, so we can to align the pagetables to
+ * their size after relocation.
+ */
+static union armv8_l1_entry boot_l1_low[2 * PTABLE_NUM_ENTRIES]
+ __attribute__ ((aligned(PTABLE_ENTRY_SIZE)));
+static union armv8_l1_entry *aligned_boot_l1_low;
+/*
+ * Boot-up L1 page table for addresses >=2GB (translated by TTBR1)
+ */
+static union armv8_l1_entry boot_l1_high[2 * PTABLE_NUM_ENTRIES]
+ __attribute__ ((aligned(PTABLE_ENTRY_SIZE)));
+static union armv8_l1_entry *aligned_boot_l1_high;
+
+/* XXX - this shouldn't be here. */
+#define MIN(a,b) ((a) < (b) ? (a) : (b))
+#define MAX(a,b) ((a) > (b) ? (a) : (b))
+#define CONSTRAIN(x, a, b) MIN(MAX(x, a), b)
+
+/*
+ * Kernel command line variables and binding options
+ */
+
+static int timeslice = 5; // Scheduling interval in ms
+
+static struct cmdarg cmdargs[] = {
+ { "consolePort", ArgType_UInt, { .uinteger = &serial_console_port}},
+ { "debugPort", ArgType_UInt, { .uinteger = &serial_debug_port}},
+ { "loglevel", ArgType_Int, { .integer = &kernel_loglevel }},
+ { "logmask", ArgType_Int, { .integer = &kernel_log_subsystem_mask }},
+ { "timeslice", ArgType_Int, { .integer = ×lice }},
+ {NULL, 0, {NULL}}
+};
+
+static inline void __attribute__ ((always_inline))
+relocate_stack(lvaddr_t offset)
+{
+ __asm volatile (
+ "add sp, sp, %[offset]\n\t" :: [offset] "r" (offset)
+ );
+}
+
+static inline void __attribute__ ((always_inline))
+relocate_got_base(lvaddr_t offset)
+{
+ __asm volatile (
+ "add x10, x10, %[offset]\n\t" :: [offset] "r" (offset)
+ );
+}
+
+static void paging_init(void)
+{
+ sysreg_invalidate_tlb_fn();
+
+ /* Ensure that pagetables are aligned to 4KB. */
+ aligned_boot_l1_low = (union armv8_l1_entry *)
+ ROUND_UP((uintptr_t)boot_l1_low, PTABLE_SIZE);
+ aligned_boot_l1_high = (union armv8_l1_entry *)
+ ROUND_UP((uintptr_t)boot_l1_high, PTABLE_SIZE);
+
+ lvaddr_t vbase = MEMORY_OFFSET - KERNEL_OFFSET, base = 0;
+
+ for(size_t i=0;
+ i < INIT_L1_ENTRIES/2;
+ i++, base += HUGE_PAGE_SIZE, vbase += HUGE_PAGE_SIZE)
+ {
+ // Create a temporary mapping at low addresses.
+ paging_map_kernel_l1_block(
+ (uintptr_t)aligned_boot_l1_low, base, base);
+
+ // Alias the same region above MEMORY_OFFSET.
+ paging_map_kernel_l1_block(
+ (uintptr_t)aligned_boot_l1_high, vbase, base);
+ }
+
+ // Activate the new page tables.
+ sysreg_write_ttbr1_el1((lpaddr_t)aligned_boot_l1_high);
+ sysreg_write_ttbr0_el1((lpaddr_t)aligned_boot_l1_low);
+}
+
+void kernel_startup_early(void)
+{
+ const char *cmdline;
+ assert(glbl_core_data != NULL);
+ cmdline = MBADDR_ASSTRING(glbl_core_data->cmdline);
+ parse_commandline(cmdline, cmdargs);
+ timeslice = CONSTRAIN(timeslice, 1, 20);
+}
+
+/**
+ * \brief Continue kernel initialization in kernel address space.
+ *
+ * This function resets paging to map out low memory and map in physical
+ * address space, relocating all remaining data structures. It sets up exception handling,
+ * initializes devices and enables interrupts. After that it
+ * calls arm_kernel_startup(), which should not return (if it does, this function
+ * halts the kernel).
+ */
+static void __attribute__ ((noinline,noreturn)) text_init(void)
+{
+ errval_t errval;
+
+ // Relocate glbl_core_data to "memory"
+ glbl_core_data = (struct arm_core_data *)
+ local_phys_to_mem((lpaddr_t)glbl_core_data);
+
+ // Relocate global to "memory"
+ global = (struct global*)local_phys_to_mem((lpaddr_t)global);
+
+ // Relocate kcb_current to "memory"
+ kcb_current = (struct kcb *)
+ local_phys_to_mem((lpaddr_t) kcb_current);
+
+ // Map-out low memory
+ if(glbl_core_data->multiboot_flags & MULTIBOOT_INFO_FLAG_HAS_MMAP) {
+ struct arm_coredata_mmap *mmap = (struct arm_coredata_mmap *)
+ local_phys_to_mem(glbl_core_data->mmap_addr);
+ paging_arm_reset(mmap->base_addr, mmap->length);
+ } else {
+ paging_arm_reset(PHYS_MEMORY_START, GEM5_RAM_SIZE);
+ }
+
+ exceptions_init();
+
+ //kernel_startup_early();
+
+ //initialize console
+ serial_console_init(true);
+
+ // do not remove/change this printf: needed by regression harness
+ printf("Barrelfish CPU driver starting on ARMv8 Board id 0x%08"PRIx32"\n",
+ hal_get_board_id());
+ printf("The address of paging_map_kernel_section is %p\n",
+ paging_map_kernel_section);
+
+ errval = serial_debug_init();
+ if (err_is_fail(errval)) {
+ printf("Failed to initialize debug port: %d", serial_debug_port);
+ }
+
+ my_core_id = hal_get_cpu_id();
+
+ gic_init();
+ if(hal_cpu_is_bsp()) {
+ // init SCU if more than one core present
+ if(scu_get_core_count() > 4) {
+ panic("ARM SCU doesn't support more than 4 cores!");
+ }
+ if(scu_get_core_count() > 1) {
+ scu_enable();
+ }
+ }
+
+ pit_init(timeslice, 0);
+ //pit_init(timeslice, 1);
+ tsc_init();
+
+ coreboot_set_spawn_handler(CPU_ARM8, start_aps_arm_start);
+ arm_kernel_startup();
+}
+
+/**
+ * Entry point called from boot.S for bootstrap processor.
+ * if is_bsp == true, then pointer points to multiboot_info
+ * else pointer points to a global struct
+ */
+
+void arch_init(void *pointer)
+{
+ void __attribute__ ((noreturn)) (*reloc_text_init)(void) =
+ (void *)local_phys_to_mem((lpaddr_t)text_init);
+
+ struct Elf64_Shdr *rela, *symtab;
+ struct arm_coredata_elf *elf = NULL;
+
+ serial_early_init(serial_console_port);
+
+ if(hal_cpu_is_bsp()) {
+ struct multiboot_info *mb = (struct multiboot_info *)pointer;
+ elf = (struct arm_coredata_elf *)&mb->syms.elf;
+ memset(glbl_core_data, 0, sizeof(struct arm_core_data));
+ glbl_core_data->start_free_ram =
+ ROUND_UP(max(multiboot_end_addr(mb),
+ (uintptr_t)&kernel_final_byte),
+ BASE_PAGE_SIZE);
+
+ glbl_core_data->mods_addr = mb->mods_addr;
+ glbl_core_data->mods_count = mb->mods_count;
+ glbl_core_data->cmdline = mb->cmdline;
+ glbl_core_data->mmap_length = mb->mmap_length;
+ glbl_core_data->mmap_addr = mb->mmap_addr;
+ glbl_core_data->multiboot_flags = mb->flags;
+
+ // Construct the global structure
+ memset(&global->locks, 0, sizeof(global->locks));
+ } else {
+ global = (struct global *)GLOBAL_VBASE;
+ memset(&global->locks, 0, sizeof(global->locks));
+ struct arm_core_data *core_data =
+ (struct arm_core_data*)((lvaddr_t)&kernel_first_byte - BASE_PAGE_SIZE);
+ glbl_core_data = core_data;
+
+ glbl_core_data->cmdline = (lpaddr_t)&core_data->kernel_cmdline;
+ my_core_id = core_data->dst_core_id;
+ elf = &core_data->elf;
+ }
+
+ // XXX: print kernel address for debugging with gdb
+ printf("Kernel starting at address 0x%"PRIxLVADDR"\n", &kernel_first_byte);
+
+ // Find relocation section
+ rela = elf64_find_section_header_type((struct Elf64_Shdr *)
+ ((uintptr_t)elf->addr),
+ elf->num, SHT_RELA);
+
+ if (rela == NULL) {
+ panic("Kernel image does not include relocation section!");
+ }
+
+ // Find symtab section
+ symtab = elf64_find_section_header_type((struct Elf64_Shdr *)(lpaddr_t)elf->addr,
+ elf->num, SHT_DYNSYM);
+
+ if (symtab == NULL) {
+ panic("Kernel image does not include symbol table!");
+ }
+
+ printf("Relocating to %p\n",
+ MEMORY_OFFSET + (lvaddr_t)&kernel_first_byte);
+
+ paging_init();
+
+ sysreg_enable_mmu();
+
+ // Relocate kernel image for top of memory
+ elf64_relocate(MEMORY_OFFSET + (lvaddr_t)&kernel_first_byte,
+ (lvaddr_t)&kernel_first_byte,
+ (struct Elf64_Rela *)(rela->sh_addr - START_KERNEL_PHYS +
+ &kernel_first_byte),
+ rela->sh_size,
+ (struct Elf64_Sym *)(symtab->sh_addr - START_KERNEL_PHYS +
+ &kernel_first_byte),
+ symtab->sh_size,
+ START_KERNEL_PHYS, &kernel_first_byte);
+ /*** Aliased kernel available now -- low memory still mapped ***/
+
+ // Relocate stack to aliased location
+ relocate_stack(MEMORY_OFFSET);
+
+ //relocate got_base register to aliased location
+ relocate_got_base(MEMORY_OFFSET);
+
+ // Call aliased text_init() function and continue initialization
+ reloc_text_init();
+}
--- /dev/null
+/*
+ * Copyright (c) 2009-2012,2015, ETH Zurich.
+ * Copyright (c) 2015, Hewlett Packard Enterprise Development LP.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#include <kernel.h>
+#include <sysreg.h>
+#include <paging_kernel_arch.h>
+#include <string.h>
+#include <exceptions.h>
+#include <arm_hal.h>
+#include <cap_predicates.h>
+#include <dispatch.h>
+#include <startup_arch.h>
+
+/**
+ * Kernel L1 page table
+ *
+ * We reserve double the space needed, so we can to align the pagetables to
+ * their size after relocation.
+ */
+static union armv8_l1_entry kernel_l1_table[2 * PTABLE_NUM_ENTRIES]
+ __attribute__((aligned(PTABLE_ENTRY_SIZE)));
+static union armv8_l1_entry *aligned_kernel_l1_table;
+
+/**
+ * L2 page table for device mappings
+ */
+static union armv8_l2_entry device_l2_table[2 * PTABLE_NUM_ENTRIES]
+ __attribute__((aligned(PTABLE_SIZE)));
+static union armv8_l2_entry *aligned_device_l2_table;
+
+// ------------------------------------------------------------------------
+// Utility declarations
+
+inline static uintptr_t paging_round_down(uintptr_t address, uintptr_t size)
+{
+ return address & ~(size - 1);
+}
+
+inline static uintptr_t paging_round_up(uintptr_t address, uintptr_t size)
+{
+ return (address + size - 1) & ~(size - 1);
+}
+
+inline static int aligned(uintptr_t address, uintptr_t bytes)
+{
+ return (address & (bytes - 1)) == 0;
+}
+
+static void
+paging_write_l1_entry(uintptr_t ttbase, lvaddr_t va, union armv8_l1_entry l1)
+{
+ union armv8_l1_entry *l1_table;
+ if (ttbase == 0) {
+ if(va < MEMORY_OFFSET)
+ ttbase = sysreg_read_ttbr0_el1() + MEMORY_OFFSET;
+ else
+ ttbase = sysreg_read_ttbr1_el1() + MEMORY_OFFSET;
+ }
+ l1_table = (union armv8_l1_entry *) ttbase;
+
+ l1_table[ARMv8_L1_OFFSET(va)] = l1;
+}
+
+
+// ------------------------------------------------------------------------
+// Exported functions
+
+void paging_map_kernel_section(uintptr_t ttbase, lvaddr_t va, lpaddr_t pa)
+{
+
+ union armv8_l1_entry l1;
+
+ l1.raw = 0;
+ l1.block.type = ARMv8_Ln_BLOCK;
+ l1.block.af = 1;
+ l1.block.base_address = pa >> 21u;
+ paging_write_l1_entry(ttbase, va, l1);
+}
+
+void paging_map_kernel_l1_block(uintptr_t ttbase, lvaddr_t va, lpaddr_t pa)
+{
+
+ union armv8_l1_entry l1;
+
+ l1.raw = 0;
+ l1.block.type = ARMv8_Ln_BLOCK;
+ l1.block.af = 1;
+ l1.block.base_address = pa >> 30u;
+ paging_write_l1_entry(ttbase, va, l1);
+}
+
+void paging_map_memory(uintptr_t ttbase, lpaddr_t paddr, size_t bytes)
+{
+ lpaddr_t pend = paging_round_up(paddr + bytes, HUGE_PAGE_SIZE);
+ while (paddr < pend) {
+ paging_map_kernel_l1_block(ttbase,
+ (paddr + MEMORY_OFFSET) - KERNEL_OFFSET,
+ paddr);
+ paddr += HUGE_PAGE_SIZE;
+ }
+}
+
+static void
+paging_map_device_section(uintptr_t ttbase, lvaddr_t va, lpaddr_t pa)
+{
+ union armv8_l2_entry l2;
+ union armv8_l2_entry *l2_table;
+
+ l2.raw = 0;
+ l2.block.type = ARMv8_Ln_BLOCK;
+ l2.block.af = 1;
+ l2.block.base_address = pa >> 21u;
+
+ l2_table = (union armv8_l2_entry *) ttbase;
+
+ l2_table[ARMv8_L2_OFFSET(va)] = l2;
+}
+
+static lvaddr_t dev_alloc;
+
+lvaddr_t paging_map_device(lpaddr_t device_base, size_t device_bytes)
+{
+ // HACK to put device in high memory.
+ // Should likely track these allocations.
+ assert(device_bytes <= LARGE_PAGE_SIZE);
+ dev_alloc -= LARGE_PAGE_SIZE;
+ paging_map_device_section((uintptr_t)aligned_device_l2_table,
+ dev_alloc, device_base);
+
+ return KERNEL_OFFSET + dev_alloc;
+}
+/**
+ * \brief Reset kernel paging.
+ *
+ * This function resets the page maps for kernel and memory-space. It clears
+ * out all other mappings. Use this only at system bootup!
+ */
+void paging_arm_reset(lpaddr_t paddr, size_t bytes)
+{
+ // Make sure kernel pagetables are aligned to 4KB after relocation
+ aligned_kernel_l1_table =
+ (union armv8_l1_entry *)ROUND_UP((uintptr_t)kernel_l1_table,
+ (uintptr_t)PTABLE_SIZE);
+ aligned_device_l2_table =
+ (union armv8_l2_entry *)ROUND_UP((uintptr_t)device_l2_table,
+ (uintptr_t)PTABLE_SIZE);
+
+ // Map the device-region table
+ paging_map_user_pages_l1(
+ (uintptr_t)aligned_kernel_l1_table, 0,
+ mem_to_local_phys((uintptr_t)aligned_device_l2_table));
+ dev_alloc= DEVICE_OFFSET - KERNEL_OFFSET;
+
+ // Re-map physical memory
+ paging_map_memory((uintptr_t)aligned_kernel_l1_table, paddr, bytes);
+
+ sysreg_write_ttbr1_el1(
+ mem_to_local_phys((uintptr_t)aligned_kernel_l1_table));
+
+ sysreg_invalidate_tlb();
+}
+
+void paging_make_good(lpaddr_t base)
+{
+ union armv8_l1_entry *newl1 =
+ (union armv8_l1_entry *)local_phys_to_mem(base);
+ int i;
+
+ // XXX: Disabled till vaddr_t is figured out
+ debug(SUBSYS_PAGING, "Is now a L1: table = 0x%"PRIxLPADDR"\n", base);
+
+ // Map memory
+ for(i = ARMv8_L1_OFFSET(MEMORY_OFFSET); i < PTABLE_NUM_ENTRIES; i++) {
+ newl1[i] = aligned_kernel_l1_table[i];
+ }
+}
+
+/* XXX - This is poorly named. */
+void paging_map_user_pages_l1(lvaddr_t table_base, lvaddr_t va, lpaddr_t pa)
+{
+ assert(aligned(table_base, PTABLE_SIZE));
+ assert(aligned(pa, PTABLE_SIZE));
+
+ union armv8_l1_entry e;
+
+ e.raw = 0;
+ e.page_table.type = ARMv8_Ln_TABLE;
+ e.page_table.base_address = (pa >> 12);
+
+ paging_write_l1_entry(table_base, va, e);
+}
+
+void paging_set_l2_entry(uintptr_t* l2e, lpaddr_t addr, uintptr_t flags)
+{
+ assert(0 == (flags & 0xfffff000));
+ assert(0 == (flags & 0x3));
+ assert(0 == (addr & 0xfff));
+
+ union armv8_l2_entry e;
+ e.raw = flags;
+
+ e.page_table.type = ARMv8_Ln_TABLE;
+ e.page_table.base_address = (addr >> 12);
+
+ *l2e = e.raw;
+}
+
+void paging_set_l3_entry(uintptr_t* l3e, lpaddr_t addr, uintptr_t flags)
+{
+ assert(0 == (flags & 0xfffff000));
+ assert(0 == (flags & 0x3));
+ assert(0 == (addr & 0xfff));
+
+ union armv8_l3_entry e;
+ e.raw = flags;
+
+ e.page.type = ARMv8_L3_PAGE;
+ e.page.af = 1;
+ e.page.base_address = (addr >> 12);
+
+ *l3e = e.raw;
+}
+
+void paging_context_switch(lpaddr_t ttbr)
+{
+ assert(ttbr < MEMORY_OFFSET);
+ //assert((ttbr & 0x3fff) == 0);
+
+ lpaddr_t old_ttbr = sysreg_read_ttbr0_el1();
+ if (ttbr != old_ttbr)
+ {
+ sysreg_write_ttbr0_el1(ttbr);
+ sysreg_invalidate_tlb();
+ //this isn't necessary on gem5, since gem5 doesn't implement the cache
+ //maintenance instructions, but ensures coherency by itself
+ //sysreg_invalidate_i_and_d_caches();
+ }
+}
/*
- * Copyright (c) 2007, 2009, 2012, ETH Zurich.
+ * Copyright (c) 2012,2015, ETH Zurich.
+ * Copyright (c) 2015, Hewlett Packard Enterprise Development LP.
* All rights reserved.
*
* This file is distributed under the terms in the attached LICENSE file.
* If you do not find this file, copies can be found by writing to:
- * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
*/
#include <kernel.h>
#include <dev/arm_icp_pit_dev.h>
#include <dev/a9scu_dev.h>
-#include <pl011_uart.h>
-#include <serial.h>
+#include <arch/arm/pl011_uart.h>
#include <arm_hal.h>
-#include <cp15.h>
-#include <io.h>
-#include <gic.h>
+#include <arch/arm/gic.h>
+#include <sysreg.h>
//hardcoded bc gem5 doesn't set board id in ID_Register
#define VEXPRESS_ELT_BOARD_ID 0x8e0
uint8_t hal_get_cpu_id(void)
{
- return cp15_get_cpu_id();
+ return sysreg_get_cpu_id();
}
//Gem5 ensures that cpu0 is always BSP, this probably also holds in general
bool hal_cpu_is_bsp(void)
{
- return cp15_get_cpu_id() == 0;
+ return sysreg_get_cpu_id() == 0;
}
// clock rate hardcoded to 1GHz
static uint32_t tsc_hz = 1000000000;
//
-// Interrupt controller
-//
-
-// RealView.py l342
-#define GIC_BASE 0x2C000000 // l342
-#define DIST_OFFSET 0x1000 // l342
-#define CPU_OFFSET 0x2000 // l342
-
-void gic_map_and_init(pl130_gic_t *gic)
-{
- lvaddr_t gic_base = paging_map_device(GIC_BASE, ARM_L1_SECTION_BYTES);
- pl130_gic_initialize(gic, (mackerel_addr_t)gic_base + DIST_OFFSET,
- (mackerel_addr_t)gic_base + CPU_OFFSET);
-}
-
-//
-// Kernel timer and tsc
+// Kernel timer and TSC
//
-
-#define PIT_BASE 0x1C100000 // RealView.py l344
-#define PIT0_OFFSET 0x10000 // RealView.py l344f
+#define PIT_BASE 0x1C000000 // RealView.py l344
+#define PIT0_OFFSET 0x100000 // RealView.py l344f
// difference between two PITs
#define PIT_DIFF 0x10000 // from gem5/src/dev/arm/RealView.py l344 f
{
static lvaddr_t timer_base = 0;
if (timer_base == 0) {
- timer_base = paging_map_device(PIT_BASE, ARM_L1_SECTION_BYTES);
+ timer_base = paging_map_device(PIT_BASE, LARGE_PAGE_SIZE);
}
return timer_base;
}
lvaddr_t timer_base = pit_map_resources();
- sp804_pit_initialize(pit, (mackerel_addr_t)(timer_base + PIT0_OFFSET + pit_id*PIT_DIFF));
+ sp804_pit_initialize(pit,
+ (mackerel_addr_t)(timer_base + PIT0_OFFSET + (pit_id+1)*PIT_DIFF));
// if we are BSP we also config the values of the PIT
if(hal_cpu_is_bsp())
{
if (PIT0_IRQ == irq)
{
- sp804_pit_Timer1IntClr_wr(&pit0, ~0ul);
+ sp804_pit_Timer1IntClr_wr(&pit0, -1);
gic_ack_irq(irq);
return 1;
}
else if(PIT1_IRQ == irq)
{
- sp804_pit_Timer1IntClr_wr(&pit1, ~0ul);
+ sp804_pit_Timer1IntClr_wr(&pit1, -1);
gic_ack_irq(irq);
return 1;
}
void tsc_init(void)
{
- lvaddr_t timer_base = paging_map_device(TSC_BASE, ARM_L1_SECTION_BYTES);
+ lvaddr_t timer_base = paging_map_device(TSC_BASE, LARGE_PAGE_SIZE);
cortex_a9_pit_initialize(&tsc, (mackerel_addr_t)timer_base+TSC_OFFSET);
// write load
- uint32_t load = ~0ul;
+ // uint32_t load = ~0ul;
+ uint64_t load = ~0ul;
cortex_a9_pit_TimerLoad_wr(&tsc, load);
//configure tsc
return tsc_hz;
}
+//
+// Interrupt controller
+//
+
+// RealView.py l342
+#define GIC_BASE 0x2C000000 // l342
+#define DIST_OFFSET 0x1000 // l342
+#define CPU_OFFSET 0x2000 // l342
+
+void gic_map_and_init(pl130_gic_t *gic)
+{
+ lvaddr_t gic_base = paging_map_device(GIC_BASE, LARGE_PAGE_SIZE);
+ pl130_gic_initialize(gic, (mackerel_addr_t)gic_base + DIST_OFFSET,
+ (mackerel_addr_t)gic_base + CPU_OFFSET);
+}
//
// Snoop Control Unit
void scu_enable(void)
{
- lvaddr_t scu_base = paging_map_device(TSC_BASE, ARM_L1_SECTION_BYTES);
+ lvaddr_t scu_base = paging_map_device(TSC_BASE, LARGE_PAGE_SIZE);
a9scu_initialize(&scu, (mackerel_addr_t)scu_base);
printf("SYSFLAGSET_BASE is at 0x%x\n", SYSFLAGSET_BASE);
sysflagset_base = paging_map_device(SYSFLAGSET_BASE,
- ARM_L1_SECTION_BYTES);
+ LARGE_PAGE_SIZE);
printf(".. mapped to 0x%"PRIxLVADDR"\n", sysflagset_base);
}
lvaddr_t sysflags = sysflagset_base + SYSFLAGSET_OFFSET;
printf(".. using address 0x%p\n", sysflags);
- writel(regval, (char *) sysflags);
+ *(uint32_t *)sysflags = regval;
}
+++ /dev/null
-#include <kernel.h>
-#include <irq.h>
-
-#pragma GCC diagnostic ignored "-Wsuggest-attribute=noreturn"
-
-errval_t irq_table_notify_domains(struct kcb *kcb)
-{
- panic("NYI");
-}
/*
- * Copyright (c) 2009 ETH Zurich.
+ * Copyright (c) 2009, ETH Zurich.
* All rights reserved.
*
* This file is distributed under the terms in the attached LICENSE file.
* If you do not find this file, copies can be found by writing to:
- * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
*/
#include <stdbool.h>
+++ /dev/null
-/**\r
- * \file\r
- * \brief The world's simplest serial driver.\r
- *\r
- */\r
-\r
-/*\r
- * Copyright (c) 2010, ETH Zurich.\r
- * All rights reserved.\r
- *\r
- * This file is distributed under the terms in the attached LICENSE file.\r
- * If you do not find this file, copies can be found by writing to:\r
- * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.\r
- */\r
-\r
-#include <serial.h>\r
-#include <kputchar.h>\r
-#include <global.h>\r
-\r
-#define KPBUFSZ 256\r
-static char kputbuf[KPBUFSZ];\r
-static int kcount = 0;\r
-\r
-static void kflush(void)\r
-{\r
- for(int i=0; i<kcount; i++)\r
- serial_console_putchar(kputbuf[i]);\r
- kcount = 0;\r
-}\r
-\r
-void kprintf_begin(void)\r
-{\r
- // TODO: locking?\r
-}\r
-\r
-int kputchar(int c)\r
-{\r
- kputbuf[kcount++] = c;\r
- if (kcount == KPBUFSZ || c == '\n')\r
- kflush();\r
- return c;\r
-}\r
-\r
-void kprintf_end(void)\r
-{\r
- // TODO: locking?\r
- kflush();\r
-}\r
-\r
-// End\r
+
/*
* Copyright (c) 2007, 2008, ETH Zurich.
* All rights reserved.
ENTRY(start)
-/*
-PHDRS {
- headers PT_PHDR PHDRS;
- text PT_LOAD FILEHDR PHDRS;
- data PT_LOAD;
- dynamic PT_DYNAMIC;
-}
-*/
-
SECTIONS {
. = START_KERNEL_PHYS;
-
+
/*kernel_elf_header = .;*/
kernel_first_byte = .;
-
- /*. += SIZEOF_HEADERS; */
-
+
.text : { *(.text); }
kernel_text_final_byte = .;
*(.rodata);
}
- /*
.got . :
{
got_base = .;
*(.got);
}
- */
-
+
/*.rel.got . : { *(.rel.got); } */
.bss . :
{
*(.bss);
}
-
+
kernel_final_byte = .;
/* XXX - why is this in an arch-specific directory? */
/*
- * Copyright (c) 2007, 2008, ETH Zurich.
+ * Copyright (c) 2007,2008, ETH Zurich.
* All rights reserved.
*
* This file is distributed under the terms in the attached LICENSE file.
* If you do not find this file, copies can be found by writing to:
- * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
*/
#include <kernel.h>
#include <misc.h>
#define DEFAULT_LOGLEVEL LOG_DEBUG
+//#define DEFAULT_LOGLEVEL (LOG_DEBUG+1)
#define DEFAULT_SUBSYSTEM_MASK (~0L)
/**
--- /dev/null
+/*
+ * Copyright (c) 2009-2012,2015, ETH Zurich.
+ * Copyright (c) 2015, Hewlett Packard Enterprise Development LP.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#include <kernel.h>
+#include <dispatch.h>
+#include <sysreg.h>
+#include <paging_kernel_arch.h>
+#include <string.h>
+#include <exceptions.h>
+#include <arm_hal.h>
+#include <cap_predicates.h>
+#include <dispatch.h>
+
+inline static uintptr_t paging_round_down(uintptr_t address, uintptr_t size)
+{
+ return address & ~(size - 1);
+}
+
+inline static uintptr_t paging_round_up(uintptr_t address, uintptr_t size)
+{
+ return (address + size - 1) & ~(size - 1);
+}
+
+inline static int aligned(uintptr_t address, uintptr_t bytes)
+{
+ return (address & (bytes - 1)) == 0;
+}
+
+static void
+paging_set_flags(union armv8_l3_entry *entry, uintptr_t kpi_paging_flags)
+{
+
+ entry->page.ap = 0;
+
+ if(kpi_paging_flags & KPI_PAGING_FLAGS_WRITE)
+ entry->page.ap = 1;
+ else if (kpi_paging_flags & KPI_PAGING_FLAGS_READ)
+ entry->page.ap = 3;
+ else
+ panic("oops: wrong page flags");
+
+ entry->page.af = 1;
+}
+
+static errval_t
+caps_map_l1(struct capability* dest,
+ cslot_t slot,
+ struct capability* src,
+ uintptr_t kpi_paging_flags,
+ uintptr_t offset,
+ uintptr_t pte_count)
+{
+ //
+ // Note:
+ //
+ // We have chicken-and-egg problem in initializing resources so
+ // instead of treating an L3 table it's actual 1K size, we treat
+ // it as being 4K. As a result when we map an "L3" table we actually
+ // map a page of memory as if it is 4 consecutive L3 tables.
+ //
+ // See lib/barrelfish/arch/arm/pmap_arch.c for more discussion.
+ //
+
+ if (slot >= PTABLE_NUM_ENTRIES) {
+ printf("slot = %"PRIuCSLOT"\n",slot);
+ panic("oops: slot id >= %d", PTABLE_NUM_ENTRIES);
+ return SYS_ERR_VNODE_SLOT_INVALID;
+ }
+
+ if (pte_count != 1) {
+ printf("pte_count = %zu\n",(size_t)pte_count);
+ panic("oops: pte_count");
+ return SYS_ERR_VM_MAP_SIZE;
+ }
+
+ if (src->type != ObjType_VNode_AARCH64_l2) {
+ panic("oops: l1 wrong src type");
+ return SYS_ERR_WRONG_MAPPING;
+ }
+
+ if (slot >= ARMv8_L1_OFFSET(MEMORY_OFFSET)) {
+ printf("slot = %"PRIuCSLOT"\n",slot);
+ panic("oops: l1 slot id");
+ return SYS_ERR_VNODE_SLOT_RESERVED;
+ }
+
+ // Destination
+ lpaddr_t dest_lpaddr = gen_phys_to_local_phys(get_address(dest));
+ lvaddr_t dest_lvaddr = local_phys_to_mem(dest_lpaddr);
+
+ union armv8_l1_entry* entry = (union armv8_l1_entry*)dest_lvaddr + slot;
+
+ // Source
+ genpaddr_t src_gpaddr = get_address(src);
+ lpaddr_t src_lpaddr = gen_phys_to_local_phys(src_gpaddr);
+
+ //union armv8_l2_entry* entry1 = (union armv8_l2_entry*)local_phys_to_mem(src_gpaddr);
+
+
+ assert(offset == 0);
+ assert(aligned(src_lpaddr, 1u << 12));
+ assert((src_lpaddr < dest_lpaddr) || (src_lpaddr >= dest_lpaddr + 32));
+
+ struct cte *src_cte = cte_for_cap(src);
+ src_cte->mapping_info.pte_count = pte_count;
+ src_cte->mapping_info.pte = dest_lpaddr + slot;
+ src_cte->mapping_info.offset = 0;
+
+ entry->raw = 0;
+ entry->page_table.type = ARMv8_Ln_TABLE;
+ entry->page_table.base_address =
+ (src_lpaddr) >> 12;
+ debug(SUBSYS_PAGING, "L1 mapping %"PRIuCSLOT". @%p = %08"PRIx32"\n",
+ slot, entry, entry->raw);
+
+ sysreg_invalidate_tlb();
+
+ return SYS_ERR_OK;
+}
+
+static errval_t
+caps_map_l2(struct capability* dest,
+ cslot_t slot,
+ struct capability* src,
+ uintptr_t kpi_paging_flags,
+ uintptr_t offset,
+ uintptr_t pte_count)
+{
+ //
+ // Note:
+ //
+ // We have chicken-and-egg problem in initializing resources so
+ // instead of treating an L3 table it's actual 1K size, we treat
+ // it as being 4K. As a result when we map an "L3" table we actually
+ // map a page of memory as if it is 4 consecutive L3 tables.
+ //
+ // See lib/barrelfish/arch/arm/pmap_arch.c for more discussion.
+ //
+ if (slot >= PTABLE_NUM_ENTRIES) {
+ printf("slot = %"PRIuCSLOT"\n",slot);
+ panic("oops: slot id >= 512");
+ return SYS_ERR_VNODE_SLOT_INVALID;
+ }
+
+ if (pte_count != 1) {
+ printf("pte_count = %zu\n",(size_t)pte_count);
+ panic("oops: pte_count");
+ return SYS_ERR_VM_MAP_SIZE;
+ }
+
+ if (src->type != ObjType_VNode_AARCH64_l3) {
+ panic("oops: l2 wrong src type");
+ return SYS_ERR_WRONG_MAPPING;
+ }
+
+ if (slot > PTABLE_NUM_ENTRIES) {
+ printf("slot = %"PRIuCSLOT"\n",slot);
+ panic("oops: l2 slot id");
+ return SYS_ERR_VNODE_SLOT_RESERVED;
+ }
+
+ // Destination
+ lpaddr_t dest_lpaddr = gen_phys_to_local_phys(get_address(dest));
+ lvaddr_t dest_lvaddr = local_phys_to_mem(dest_lpaddr);
+
+ union armv8_l2_entry* entry = (union armv8_l2_entry*)dest_lvaddr + slot;
+
+ // Source
+ genpaddr_t src_gpaddr = get_address(src);
+ lpaddr_t src_lpaddr = gen_phys_to_local_phys(src_gpaddr);
+
+ assert(offset == 0);
+ assert(aligned(src_lpaddr, 1u << 12));
+ assert((src_lpaddr < dest_lpaddr) || (src_lpaddr >= dest_lpaddr + 4096));
+
+ struct cte *src_cte = cte_for_cap(src);
+ src_cte->mapping_info.pte_count = pte_count;
+ src_cte->mapping_info.pte = dest_lpaddr + slot;
+ src_cte->mapping_info.offset = 0;
+
+ entry->raw = 0;
+ entry->page_table.type = ARMv8_Ln_TABLE;
+ entry->page_table.base_address =
+ (src_lpaddr) >> 12;
+ debug(SUBSYS_PAGING, "L2 mapping %"PRIuCSLOT". @%p = %08"PRIx32"\n",
+ slot, entry, entry->raw);
+
+ sysreg_invalidate_tlb();
+
+ return SYS_ERR_OK;
+}
+
+
+static errval_t
+caps_map_l3(struct capability* dest,
+ cslot_t slot,
+ struct capability* src,
+ uintptr_t kpi_paging_flags,
+ uintptr_t offset,
+ uintptr_t pte_count)
+{
+ assert(0 == (kpi_paging_flags & ~KPI_PAGING_FLAGS_MASK));
+
+ // ARM L3 has 256 entries, but we treat a 4K page as a consecutive
+ // region of L3 with a single index. 4K == 4 * 1K
+ if (slot >= PTABLE_NUM_ENTRIES) {
+ panic("oops: slot >= 512");
+ return SYS_ERR_VNODE_SLOT_INVALID;
+ }
+
+ if (src->type != ObjType_Frame && src->type != ObjType_DevFrame) {
+ panic("oops: src->type != ObjType_Frame && src->type != ObjType_DevFrame");
+ return SYS_ERR_WRONG_MAPPING;
+ }
+
+ // check offset within frame
+ if ((offset + BASE_PAGE_SIZE > get_size(src)) ||
+ ((offset % BASE_PAGE_SIZE) != 0)) {
+ panic("oops: frame offset invalid");
+ return SYS_ERR_FRAME_OFFSET_INVALID;
+ }
+
+ // check mapping does not overlap leaf page table
+ if (slot + pte_count > PTABLE_NUM_ENTRIES ) {
+ return SYS_ERR_VM_MAP_SIZE;
+ }
+
+ // Destination
+ lpaddr_t dest_lpaddr = gen_phys_to_local_phys(get_address(dest));
+ lvaddr_t dest_lvaddr = local_phys_to_mem(dest_lpaddr);
+
+ union armv8_l3_entry* entry = (union armv8_l3_entry*)dest_lvaddr + slot;
+ if (entry->page.type != ARMv8_Ln_INVALID) {
+ panic("Remapping valid page.");
+ }
+
+ lpaddr_t src_lpaddr = gen_phys_to_local_phys(get_address(src) + offset);
+ if ((src_lpaddr & (BASE_PAGE_SIZE - 1))) {
+ panic("Invalid target");
+ }
+
+ struct cte *src_cte = cte_for_cap(src);
+ src_cte->mapping_info.pte_count = pte_count;
+ src_cte->mapping_info.pte = dest_lpaddr;
+ src_cte->mapping_info.offset = offset;
+
+ for (int i = 0; i < pte_count; i++) {
+ entry->raw = 0;
+
+ entry->page.type = ARMv8_L3_PAGE;
+ paging_set_flags(entry, kpi_paging_flags);
+ entry->page.base_address = (src_lpaddr + i * BASE_PAGE_SIZE) >> 12;
+
+ entry++;
+
+ debug(SUBSYS_PAGING, "L3 mapping %08"PRIxLVADDR"[%"PRIuCSLOT"] @%p = %08"PRIx32"\n",
+ dest_lvaddr, slot, entry, entry->raw);
+ }
+
+ // Flush TLB if remapping.
+ sysreg_invalidate_tlb();
+
+ return SYS_ERR_OK;
+}
+
+/// Create page mappings
+errval_t caps_copy_to_vnode(struct cte *dest_vnode_cte, cslot_t dest_slot,
+ struct cte *src_cte, uintptr_t flags,
+ uintptr_t offset, uintptr_t pte_count)
+{
+ struct capability *src_cap = &src_cte->cap;
+ struct capability *dest_cap = &dest_vnode_cte->cap;
+
+ if (src_cte->mapping_info.pte) {
+ return SYS_ERR_VM_ALREADY_MAPPED;
+ }
+
+ if (ObjType_VNode_AARCH64_l1 == dest_cap->type) {
+ return caps_map_l1(dest_cap, dest_slot, src_cap,
+ flags,
+ offset,
+ pte_count
+ );
+ }
+ else if (ObjType_VNode_AARCH64_l2 == dest_cap->type) {
+ return caps_map_l2(dest_cap, dest_slot, src_cap,
+ flags,
+ offset,
+ pte_count
+ );
+ }
+ else if (ObjType_VNode_AARCH64_l3 == dest_cap->type) {
+ return caps_map_l3(dest_cap, dest_slot, src_cap,
+ flags,
+ offset,
+ pte_count
+ );
+ }
+ else {
+ panic("ObjType not VNode");
+ }
+}
+
+size_t do_unmap(lvaddr_t pt, cslot_t slot, size_t num_pages)
+{
+ size_t unmapped_pages = 0;
+ union armv8_l3_entry *ptentry = (union armv8_l3_entry *)pt + slot;
+ for (int i = 0; i < num_pages; i++) {
+ ptentry++->raw = 0;
+ unmapped_pages++;
+ }
+ return unmapped_pages;
+}
+
+static inline void read_pt_entry(struct capability *pgtable, size_t slot, genpaddr_t *paddr)
+{
+ assert(type_is_vnode(pgtable->type));
+ assert(paddr);
+
+ genpaddr_t gp = get_address(pgtable);
+ lpaddr_t lp = gen_phys_to_local_phys(gp);
+ lvaddr_t lv = local_phys_to_mem(lp);
+
+ switch (pgtable->type) {
+ case ObjType_VNode_AARCH64_l1:
+ {
+ union armv8_l1_entry *e = (union armv8_l1_entry*)lv;
+ *paddr = (genpaddr_t)(e->page_table.base_address) << 12;
+ return;
+ }
+ case ObjType_VNode_AARCH64_l2:
+ {
+ union armv8_l2_entry *e = (union armv8_l2_entry*)lv;
+ *paddr = (genpaddr_t)(e->page_table.base_address) << 12;
+ return;
+ }
+ case ObjType_VNode_AARCH64_l3:
+ {
+ union armv8_l3_entry *e = (union armv8_l3_entry*)lv;
+ *paddr = (genpaddr_t)(e->page.base_address) << 12;
+ return;
+ }
+ default:
+ assert(!"Should not get here");
+ }
+}
+
+errval_t page_mappings_unmap(struct capability *pgtable, struct cte *mapping, size_t slot, size_t num_pages)
+{
+ assert(type_is_vnode(pgtable->type));
+ //printf("page_mappings_unmap(%zd pages, slot = %zd)\n", num_pages, slot);
+
+ // get page table entry data
+ genpaddr_t paddr;
+ //lpaddr_t pte;
+ read_pt_entry(pgtable, slot, &paddr);
+ lvaddr_t pt = local_phys_to_mem(gen_phys_to_local_phys(get_address(pgtable)));
+
+ // get virtual address of first page
+ // TODO: error checking
+ genvaddr_t vaddr;
+ struct cte *leaf_pt = cte_for_cap(pgtable);
+ compile_vaddr(leaf_pt, slot, &vaddr);
+ //genvaddr_t vend = vaddr + num_pages * BASE_PAGE_SIZE;
+ // printf("vaddr = 0x%"PRIxGENVADDR"\n", vaddr);
+ // printf("num_pages = %zu\n", num_pages);
+
+ // get cap for mapping
+ /*
+ struct cte *mem;
+ errval_t err = lookup_cap_for_mapping(paddr, pte, &mem);
+ if (err_is_fail(err)) {
+ printf("page_mappings_unmap: %ld\n", err);
+ return err;
+ }
+ */
+ //printf("state before unmap: mapped_pages = %zd\n", mem->mapping_info.mapped_pages);
+ //printf("state before unmap: num_pages = %zd\n", num_pages);
+
+ if (num_pages != mapping->mapping_info.pte_count) {
+ printf("num_pages = %zu, mapping = %zu\n", num_pages, mapping->mapping_info.pte_count);
+ // want to unmap a different amount of pages than was mapped
+ return SYS_ERR_VM_MAP_SIZE;
+ }
+
+ do_unmap(pt, slot, num_pages);
+
+ // flush TLB for unmapped pages
+ // TODO: selective TLB flush
+ sysreg_invalidate_tlb();
+
+ // update mapping info
+ memset(&mapping->mapping_info, 0, sizeof(struct mapping_info));
+
+ return SYS_ERR_OK;
+}
+
+errval_t paging_modify_flags(struct capability *frame, uintptr_t offset,
+ uintptr_t pages, uintptr_t kpi_paging_flags)
+{
+ // check flags
+ assert(0 == (kpi_paging_flags & ~KPI_PAGING_FLAGS_MASK));
+
+ struct cte *mapping = cte_for_cap(frame);
+ struct mapping_info *info = &mapping->mapping_info;
+
+ /* Calculate location of page table entries we need to modify */
+ lvaddr_t base = local_phys_to_mem(info->pte) + offset;
+
+ for (int i = 0; i < pages; i++) {
+ union armv8_l3_entry *entry =
+ (union armv8_l3_entry *)base + i;
+ paging_set_flags(entry, kpi_paging_flags);
+ }
+
+ return paging_tlb_flush_range(mapping, 0, pages);
+}
+
+void paging_dump_tables(struct dcb *dispatcher)
+{
+ printf("dump_hw_page_tables\n");
+ lvaddr_t l1 = local_phys_to_mem(dispatcher->vspace);
+
+ for (int l1_index = 0; l1_index < PTABLE_NUM_ENTRIES; l1_index++) {
+ // get level3 table
+ union armv8_l1_entry *l1_e = (union armv8_l1_entry *)l1 + l1_index;
+ if (!l1_e->raw) { continue; }
+ genpaddr_t ptable_gp = (genpaddr_t)(l1_e->page_table.base_address) << 10;
+ lvaddr_t ptable_lv = local_phys_to_mem(gen_phys_to_local_phys(ptable_gp));
+
+ for (int entry = 0; entry < PTABLE_NUM_ENTRIES; entry++) {
+ union armv8_l3_entry *e =
+ (union armv8_l3_entry *)ptable_lv + entry;
+ genpaddr_t paddr = (genpaddr_t)(e->page.base_address) << BASE_PAGE_BITS;
+ if (!paddr) {
+ continue;
+ }
+ printf("%d.%d: 0x%"PRIxGENPADDR"\n", l1_index, entry, paddr);
+ }
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2009,2010,2015, ETH Zurich.
+ * Copyright (c) 2015, Hewlett Packard Enterprise Development LP.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#include <kernel.h>
+#include <dispatch.h>
+#include <string.h>
+#include <stdio.h>
+
+#include <barrelfish_kpi/init.h>
+#include <barrelfish_kpi/syscalls.h>
+#include <elf/elf.h>
+
+#include <arm_hal.h>
+#include <paging_kernel_arch.h>
+#include <exceptions.h>
+#include <sysreg.h>
+#include <cpiobin.h>
+#include <init.h>
+#include <barrelfish_kpi/paging_arm_v8.h>
+#include <barrelfish_kpi/arm_core_data.h>
+#include <kernel_multiboot.h>
+#include <offsets.h>
+#include <startup_arch.h>
+#include <global.h>
+#include <kcb.h>
+
+#define CNODE(cte) (cte)->cap.u.cnode.cnode
+#define UNUSED(x) (x) = (x)
+
+#define STARTUP_PROGRESS() debug(SUBSYS_STARTUP, "%s:%d\n", \
+ __FUNCTION__, __LINE__);
+
+#define BSP_INIT_MODULE_NAME BF_BINARY_PREFIX "armv8/sbin/init"
+#define APP_INIT_MODULE_NAME BF_BINARY_PREFIX "armv8/sbin/monitor"
+
+
+//static phys_mmap_t* g_phys_mmap; // Physical memory map
+static union armv8_l1_entry * init_l1; // L1 page table for init
+static union armv8_l2_entry * init_l2; // L2 page tables for init
+static union armv8_l3_entry * init_l3; // L3 page tables for init
+
+static struct spawn_state spawn_state;
+
+/// Pointer to bootinfo structure for init
+struct bootinfo* bootinfo = (struct bootinfo*)INIT_BOOTINFO_VBASE;
+
+/**
+ * Each kernel has a local copy of global and locks. However, during booting and
+ * kernel relocation, these are set to point to global of the pristine kernel,
+ * so that all the kernels can share it.
+ */
+//static struct global myglobal;
+struct global *global = (struct global *)GLOBAL_VBASE;
+
+static inline uintptr_t round_up(uintptr_t value, size_t unit)
+{
+ assert(0 == (unit & (unit - 1)));
+ size_t m = unit - 1;
+ return (value + m) & ~m;
+}
+
+static inline uintptr_t round_down(uintptr_t value, size_t unit)
+{
+ assert(0 == (unit & (unit - 1)));
+ size_t m = unit - 1;
+ return value & ~m;
+}
+
+// Physical memory allocator for spawn_app_init
+static lpaddr_t app_alloc_phys_start, app_alloc_phys_end;
+static lpaddr_t app_alloc_phys(size_t size)
+{
+ uint32_t npages = (size + BASE_PAGE_SIZE - 1) / BASE_PAGE_SIZE;
+
+
+ lpaddr_t addr = app_alloc_phys_start;
+ app_alloc_phys_start += npages * BASE_PAGE_SIZE;
+
+ if (app_alloc_phys_start >= app_alloc_phys_end) {
+ panic("Out of memory, increase CORE_DATA_PAGES");
+ }
+
+ return addr;
+}
+
+static lpaddr_t app_alloc_phys_aligned(size_t size, size_t align)
+{
+ app_alloc_phys_start = round_up(app_alloc_phys_start, align);
+ return app_alloc_phys(size);
+}
+
+/**
+ * The address from where bsp_alloc_phys will start allocating memory
+ */
+static lpaddr_t bsp_init_alloc_addr = 0;
+
+/**
+ * \brief Linear physical memory allocator.
+ *
+ * This function allocates a linear region of addresses of size 'size' from
+ * physical memory.
+ *
+ * \param size Number of bytes to allocate.
+ *
+ * \return Base physical address of memory region.
+ */
+static lpaddr_t bsp_alloc_phys(size_t size)
+{
+ // round to base page size
+ uint32_t npages = (size + BASE_PAGE_SIZE - 1) / BASE_PAGE_SIZE;
+
+ assert(bsp_init_alloc_addr != 0);
+
+ lpaddr_t addr = bsp_init_alloc_addr;
+
+ bsp_init_alloc_addr += npages * BASE_PAGE_SIZE;
+ return addr;
+}
+
+static lpaddr_t bsp_alloc_phys_aligned(size_t size, size_t align)
+{
+ bsp_init_alloc_addr = round_up(bsp_init_alloc_addr, align);
+ return bsp_alloc_phys(size);
+}
+
+/**
+ * Map frames into init process address space. Init has a contiguous set of
+ * l3 entries so this is straightforward.
+ *
+ * @param l3_table pointer to init's L3 table.
+ * @param l3_base virtual address represented by first L3 table entry
+ * @param va_base virtual address to map.
+ * @param pa_base physical address to associate with virtual address.
+ * @param bytes number of bytes to map.
+ * @param l3_flags ARM L3 small page flags for mapped pages.
+ */
+static void
+spawn_init_map(union armv8_l3_entry* l3_table,
+ lvaddr_t l3_base,
+ lvaddr_t va_base,
+ lpaddr_t pa_base,
+ size_t bytes,
+ uintptr_t l3_flags)
+{
+ assert(va_base >= l3_base);
+ assert(0 == (va_base & (BASE_PAGE_SIZE - 1)));
+ assert(0 == (pa_base & (BASE_PAGE_SIZE - 1)));
+ assert(0 == (bytes & (BASE_PAGE_SIZE - 1)));
+
+ long bi = (va_base - l3_base) / BASE_PAGE_SIZE;
+ long li = bi + bytes / BASE_PAGE_SIZE;
+
+ while (bi < li)
+ {
+ paging_set_l3_entry((uintptr_t *)&l3_table[bi], pa_base, l3_flags);
+ pa_base += BASE_PAGE_SIZE;
+ bi++;
+ }
+}
+
+static uint32_t elf_to_l3_flags(uint32_t eflags)
+{
+ switch (eflags & (PF_W|PF_R))
+ {
+ case PF_W|PF_R:
+ return (AARCH64_L3_USR_RW |
+ AARCH64_L3_CACHEABLE |
+ AARCH64_L3_BUFFERABLE);
+ case PF_R:
+ return (AARCH64_L3_USR_RO |
+ AARCH64_L3_CACHEABLE |
+ AARCH64_L3_BUFFERABLE);
+ default:
+ panic("Unknown ELF flags combination.");
+ }
+}
+
+struct startup_l3_info
+{
+ union armv8_l3_entry* l3_table;
+ lvaddr_t l3_base;
+};
+
+static errval_t
+startup_alloc_init(
+ void* state,
+ genvaddr_t gvbase,
+ size_t bytes,
+ uint32_t flags,
+ void** ret
+ )
+{
+ const struct startup_l3_info* s2i = (const struct startup_l3_info*)state;
+
+ lvaddr_t sv = round_down((lvaddr_t)gvbase, BASE_PAGE_SIZE);
+ size_t off = (lvaddr_t)gvbase - sv;
+ lvaddr_t lv = round_up((lvaddr_t)gvbase + bytes, BASE_PAGE_SIZE);
+ lpaddr_t pa;
+
+ //STARTUP_PROGRESS();
+ if(hal_cpu_is_bsp())
+ pa = bsp_alloc_phys_aligned((lv - sv), BASE_PAGE_SIZE);
+ else
+ pa = app_alloc_phys_aligned((lv - sv), BASE_PAGE_SIZE);
+
+ if (lv > sv && (pa != 0))
+ {
+ spawn_init_map(s2i->l3_table, s2i->l3_base, sv,
+ pa, lv - sv, elf_to_l3_flags(flags));
+ *ret = (void*)(local_phys_to_mem(pa) + off);
+ }
+ else
+ {
+ *ret = 0;
+ }
+ return SYS_ERR_OK;
+}
+
+static void
+load_init_image(
+ struct startup_l3_info* l3i,
+ const char *name,
+ genvaddr_t* init_ep,
+ genvaddr_t* got_base
+ )
+{
+ lvaddr_t elf_base;
+ size_t elf_bytes;
+ errval_t err;
+
+
+ *init_ep = *got_base = 0;
+
+ /* Load init ELF32 binary */
+ struct multiboot_modinfo *module = multiboot_find_module(name);
+ if (module == NULL) {
+ panic("Could not find init module!");
+ }
+
+ elf_base = local_phys_to_mem(module->mod_start);
+ elf_bytes = MULTIBOOT_MODULE_SIZE(*module);
+
+ debug(SUBSYS_STARTUP, "load_init_image %p %08x\n", elf_base, elf_bytes);
+ printf("load_init_image %p %08x\n", elf_base, elf_bytes);
+
+ err = elf_load(EM_AARCH64, startup_alloc_init, l3i,
+ elf_base, elf_bytes, init_ep);
+ if (err_is_fail(err)) {
+ //err_print_calltrace(err);
+ panic("ELF load of " BSP_INIT_MODULE_NAME " failed!\n");
+ }
+
+ // TODO: Fix application linkage so that it's non-PIC.
+ struct Elf64_Shdr* got_shdr =
+ elf64_find_section_header_name((lvaddr_t)elf_base, elf_bytes, ".got");
+ if (got_shdr)
+ {
+ *got_base = got_shdr->sh_addr;
+ }
+}
+
+/// Setup the module cnode, which contains frame caps to all multiboot modules
+void create_module_caps(struct spawn_state *st)
+{
+ errval_t err;
+
+ /* Create caps for multiboot modules */
+ struct multiboot_modinfo *module =
+ (struct multiboot_modinfo *)local_phys_to_mem(glbl_core_data->mods_addr);
+
+ // Allocate strings area
+ lpaddr_t mmstrings_phys = bsp_alloc_phys(BASE_PAGE_SIZE);
+ lvaddr_t mmstrings_base = local_phys_to_mem(mmstrings_phys);
+ lvaddr_t mmstrings = mmstrings_base;
+
+ // create cap for strings area in first slot of modulecn
+ assert(st->modulecn_slot == 0);
+ err = caps_create_new(ObjType_Frame, mmstrings_phys, BASE_PAGE_BITS,
+ BASE_PAGE_BITS, my_core_id,
+ caps_locate_slot(CNODE(st->modulecn),
+ st->modulecn_slot++));
+ assert(err_is_ok(err));
+
+ //Nag
+ bootinfo->regions_length = 0;
+
+ /* Walk over multiboot modules, creating frame caps */
+ for (int i = 0; i < glbl_core_data->mods_count; i++) {
+ struct multiboot_modinfo *m = &module[i];
+
+ // Set memory regions within bootinfo
+ struct mem_region *region =
+ &bootinfo->regions[bootinfo->regions_length++];
+
+ genpaddr_t remain = MULTIBOOT_MODULE_SIZE(*m);
+ genpaddr_t base_addr = local_phys_to_gen_phys(m->mod_start);
+ region->mr_type = RegionType_Module;
+ region->mr_base = base_addr;
+ region->mrmod_slot = st->modulecn_slot; // first slot containing caps
+ region->mrmod_size = remain; // size of image _in bytes_
+ region->mrmod_data = mmstrings - mmstrings_base; // offset of string in area
+
+ // round up to page size for caps
+ remain = ROUND_UP(remain, BASE_PAGE_SIZE);
+
+ // Create max-sized caps to multiboot module in module cnode
+ while (remain > 0) {
+ assert((base_addr & BASE_PAGE_MASK) == 0);
+ assert((remain & BASE_PAGE_MASK) == 0);
+
+ // determine size of next chunk
+ uint8_t block_size = bitaddralign(remain, base_addr);
+
+ assert(st->modulecn_slot < (1U << st->modulecn->cap.u.cnode.bits));
+ // create as DevFrame cap to avoid zeroing memory contents
+ err = caps_create_new(ObjType_DevFrame, base_addr, block_size,
+ block_size, my_core_id,
+ caps_locate_slot(CNODE(st->modulecn),
+ st->modulecn_slot++));
+ assert(err_is_ok(err));
+
+ // Advance by that chunk
+ base_addr += ((genpaddr_t)1 << block_size);
+ remain -= ((genpaddr_t)1 << block_size);
+ }
+
+ // Copy multiboot module string to mmstrings area
+ strcpy((char *)mmstrings, MBADDR_ASSTRING(m->string));
+ mmstrings += strlen(MBADDR_ASSTRING(m->string)) + 1;
+ assert(mmstrings < mmstrings_base + BASE_PAGE_SIZE);
+ }
+}
+
+/// Create physical address range or RAM caps to unused physical memory
+static void create_phys_caps(lpaddr_t init_alloc_addr)
+{
+ errval_t err;
+
+ /* Walk multiboot MMAP structure, and create appropriate caps for memory */
+ char *mmap_addr = MBADDR_ASSTRING(glbl_core_data->mmap_addr);
+ genpaddr_t last_end_addr = 0;
+
+ for(char *m = mmap_addr; m < mmap_addr + glbl_core_data->mmap_length;)
+ {
+ struct multiboot_mmap *mmap = (struct multiboot_mmap * SAFE)TC(m);
+
+ debug(SUBSYS_STARTUP, "MMAP %llx--%llx Type %"PRIu32"\n",
+ mmap->base_addr, mmap->base_addr + mmap->length,
+ mmap->type);
+
+ if (last_end_addr >= init_alloc_addr
+ && mmap->base_addr > last_end_addr)
+ {
+ /* we have a gap between regions. add this as a physaddr range */
+ debug(SUBSYS_STARTUP, "physical address range %llx--%llx\n",
+ last_end_addr, mmap->base_addr);
+
+ err = create_caps_to_cnode(last_end_addr,
+ mmap->base_addr - last_end_addr,
+ RegionType_PhyAddr, &spawn_state, bootinfo);
+ assert(err_is_ok(err));
+ }
+
+ if (mmap->type == MULTIBOOT_MEM_TYPE_RAM)
+ {
+ genpaddr_t base_addr = mmap->base_addr;
+ genpaddr_t end_addr = base_addr + mmap->length;
+
+ // only map RAM which is greater than init_alloc_addr
+ if (end_addr > local_phys_to_gen_phys(init_alloc_addr))
+ {
+ if (base_addr < local_phys_to_gen_phys(init_alloc_addr)) {
+ base_addr = local_phys_to_gen_phys(init_alloc_addr);
+ }
+ debug(SUBSYS_STARTUP, "RAM %llx--%llx\n", base_addr, end_addr);
+
+ assert(end_addr >= base_addr);
+ err = create_caps_to_cnode(base_addr, end_addr - base_addr,
+ RegionType_Empty, &spawn_state, bootinfo);
+ assert(err_is_ok(err));
+ }
+ }
+ else if (mmap->base_addr > local_phys_to_gen_phys(init_alloc_addr))
+ {
+ /* XXX: The multiboot spec just says that mapping types other than
+ * RAM are "reserved", but GRUB always maps the ACPI tables as type
+ * 3, and things like the IOAPIC tend to show up as type 2 or 4,
+ * so we map all these regions as platform data
+ */
+ debug(SUBSYS_STARTUP, "platform %llx--%llx\n", mmap->base_addr,
+ mmap->base_addr + mmap->length);
+ assert(mmap->base_addr > local_phys_to_gen_phys(init_alloc_addr));
+ err = create_caps_to_cnode(mmap->base_addr, mmap->length,
+ RegionType_PlatformData, &spawn_state, bootinfo);
+ assert(err_is_ok(err));
+ }
+ last_end_addr = mmap->base_addr + mmap->length;
+ m += mmap->size + 4;
+ }
+
+ // Assert that we have some physical address space
+ assert(last_end_addr != 0);
+
+ if (last_end_addr < PADDR_SPACE_SIZE)
+ {
+ /*
+ * FIXME: adding the full range results in too many caps to add
+ * to the cnode (and we can't handle such big caps in user-space
+ * yet anyway) so instead we limit it to something much smaller
+ */
+ genpaddr_t size = PADDR_SPACE_SIZE - last_end_addr;
+ const genpaddr_t phys_region_limit = 1ULL << 32; // PCI implementation limit
+ if (last_end_addr > phys_region_limit) {
+ size = 0; // end of RAM is already too high!
+ } else if (last_end_addr + size > phys_region_limit) {
+ size = phys_region_limit - last_end_addr;
+ }
+ debug(SUBSYS_STARTUP, "end physical address range %llx--%llx\n",
+ last_end_addr, last_end_addr + size);
+ err = create_caps_to_cnode(last_end_addr, size,
+ RegionType_PhyAddr, &spawn_state, bootinfo);
+ assert(err_is_ok(err));
+ }
+}
+
+static void init_page_tables(void)
+{
+ // Create page table for init
+ if(hal_cpu_is_bsp())
+ {
+ init_l1 =
+ (union armv8_l1_entry *)
+ local_phys_to_mem(bsp_alloc_phys_aligned(INIT_L1_BYTES,
+ PTABLE_SIZE));
+ memset(init_l1, 0, INIT_L1_BYTES);
+
+ init_l2 =
+ (union armv8_l2_entry *)
+ local_phys_to_mem(bsp_alloc_phys_aligned(INIT_L2_BYTES,
+ PTABLE_SIZE));
+ memset(init_l2, 0, INIT_L2_BYTES);
+
+ init_l3 =
+ (union armv8_l3_entry *)
+ local_phys_to_mem(bsp_alloc_phys_aligned(INIT_L3_BYTES,
+ PTABLE_SIZE));
+ memset(init_l3, 0, INIT_L3_BYTES);
+ }
+ else
+ {
+ init_l1 =
+ (union armv8_l1_entry *)
+ local_phys_to_mem(app_alloc_phys_aligned(INIT_L1_BYTES,
+ PTABLE_SIZE));
+ memset(init_l1, 0, INIT_L1_BYTES);
+
+ init_l2 =
+ (union armv8_l2_entry *)
+ local_phys_to_mem(bsp_alloc_phys_aligned(INIT_L2_BYTES,
+ PTABLE_SIZE));
+ memset(init_l2, 0, INIT_L2_BYTES);
+
+ init_l3 =
+ (union armv8_l3_entry *)
+ local_phys_to_mem(app_alloc_phys_aligned(INIT_L3_BYTES,
+ PTABLE_SIZE));
+ memset(init_l3, 0, INIT_L3_BYTES);
+ }
+
+ /* Map pagetables into page CN */
+ int pagecn_pagemap = 0;
+
+ /*
+ * AARCH64 has:
+ *
+ * L1 has 4 entries (4KB).
+ * L2 Coarse has 512 entries (512 * 8B = 4KB).
+ * L3 Coarse has 512 entries (512 * 8B = 4KB).
+ *
+ */
+ caps_create_new(
+ ObjType_VNode_AARCH64_l1,
+ mem_to_local_phys((lvaddr_t)init_l1),
+ vnode_objbits(ObjType_VNode_AARCH64_l1), 0,
+ my_core_id,
+ caps_locate_slot(CNODE(spawn_state.pagecn), pagecn_pagemap++)
+ );
+
+ //STARTUP_PROGRESS();
+ for(size_t i = 0; i < INIT_L2_SIZE; i++) {
+ size_t objbits_vnode = vnode_objbits(ObjType_VNode_AARCH64_l2);
+ assert(objbits_vnode == BASE_PAGE_BITS);
+ caps_create_new(
+ ObjType_VNode_AARCH64_l2,
+ mem_to_local_phys((lvaddr_t)init_l2) + (i << objbits_vnode),
+ objbits_vnode, 0,
+ my_core_id,
+ caps_locate_slot(CNODE(spawn_state.pagecn), pagecn_pagemap++)
+ );
+ }
+
+ // Map L3 into successive slots in pagecn
+ for(size_t i = 0; i < INIT_L3_SIZE; i++) {
+ size_t objbits_vnode = vnode_objbits(ObjType_VNode_AARCH64_l3);
+ assert(objbits_vnode == BASE_PAGE_BITS);
+ caps_create_new(
+ ObjType_VNode_AARCH64_l3,
+ mem_to_local_phys((lvaddr_t)init_l3) + (i << objbits_vnode),
+ objbits_vnode, 0,
+ my_core_id,
+ caps_locate_slot(CNODE(spawn_state.pagecn), pagecn_pagemap++)
+ );
+ }
+
+ /*
+ * Initialize init page tables - this just wires the L1
+ * entries through to the corresponding L2 entries.
+ */
+ for(lvaddr_t vaddr = INIT_VBASE;
+ vaddr < INIT_SPACE_LIMIT;
+ vaddr += HUGE_PAGE_SIZE)
+ {
+ uintptr_t section = (vaddr - INIT_VBASE) / HUGE_PAGE_SIZE;
+ uintptr_t l2_off = section * PTABLE_SIZE;
+ lpaddr_t paddr = mem_to_local_phys((lvaddr_t)init_l2) + l2_off;
+ paging_map_user_pages_l1((lvaddr_t)init_l1, vaddr, paddr);
+ }
+
+ /*
+ * Initialize init page tables - this just wires the L2
+ * entries through to the corresponding L3 entries.
+ */
+ STATIC_ASSERT(0 == (INIT_VBASE % LARGE_PAGE_SIZE), "");
+ for(lvaddr_t vaddr = INIT_VBASE;
+ vaddr < INIT_SPACE_LIMIT;
+ vaddr += LARGE_PAGE_SIZE)
+ {
+ uintptr_t section = (vaddr - INIT_VBASE) / LARGE_PAGE_SIZE;
+ uintptr_t l3_off = section * PTABLE_SIZE;
+
+ lpaddr_t paddr = mem_to_local_phys((lvaddr_t)init_l3) + l3_off;
+
+ paging_set_l2_entry(
+ (uintptr_t *)&init_l2[ARMv8_L2_OFFSET(vaddr)], paddr, 0);
+ }
+
+ paging_context_switch(mem_to_local_phys((lvaddr_t)init_l1));
+}
+
+static struct dcb *spawn_init_common(const char *name,
+ int argc, const char *argv[],
+ lpaddr_t bootinfo_phys,
+ alloc_phys_func alloc_phys)
+{
+ lvaddr_t paramaddr;
+
+ struct dcb *init_dcb = spawn_module(&spawn_state, name,
+ argc, argv,
+ bootinfo_phys, INIT_ARGS_VBASE,
+ alloc_phys, ¶maddr);
+
+ init_page_tables();
+
+ init_dcb->vspace = mem_to_local_phys((lvaddr_t)init_l1);
+
+ spawn_init_map(init_l3, INIT_VBASE, INIT_ARGS_VBASE,
+ spawn_state.args_page, ARGS_SIZE, INIT_PERM_RW);
+
+ // Map dispatcher
+ spawn_init_map(init_l3, INIT_VBASE, INIT_DISPATCHER_VBASE,
+ mem_to_local_phys(init_dcb->disp), DISPATCHER_SIZE,
+ INIT_PERM_RW);
+
+
+ /*
+ * Create a capability that allows user-level applications to
+ * access device memory. This capability will be passed to Kaluga,
+ * split up into smaller pieces and distributed to among device
+ * drivers.
+ *
+ * For armv8_gem5, this is currently a dummy capability. We do not
+ * have support for user-level device drivers in gem5 yet, so we
+ * do not allocate any memory as device memory. Some cap_copy
+ * operations in the bootup code fail if this capability is not
+ * present.
+ *
+ struct cte *iocap = caps_locate_slot(CNODE(spawn_state.taskcn), TASKCN_SLOT_IO);
+ errval_t err = caps_create_new(ObjType_IO, 0, 0, 0, my_core_id, iocap);
+ assert(err_is_ok(err));*/
+
+ struct cte *iocap = caps_locate_slot(CNODE(spawn_state.taskcn), TASKCN_SLOT_IO);
+ errval_t err = caps_create_new(ObjType_DevFrame, 0x10000000, 28, 28, my_core_id, iocap);
+ assert(err_is_ok(err));
+
+ struct dispatcher_shared_generic *disp
+ = get_dispatcher_shared_generic(init_dcb->disp);
+ struct dispatcher_shared_aarch64 *disp_aarch64
+ = get_dispatcher_shared_aarch64(init_dcb->disp);
+
+ /* Initialize dispatcher */
+ disp->disabled = true;
+ strncpy(disp->name, argv[0], DISP_NAME_LEN);
+
+ /* tell init the vspace addr of its dispatcher */
+ disp->udisp = INIT_DISPATCHER_VBASE;
+
+ disp_aarch64->enabled_save_area.named.x0 = paramaddr;
+ disp_aarch64->enabled_save_area.named.spsr = AARCH64_MODE_USR | CPSR_F_MASK;
+ disp_aarch64->enabled_save_area.named.rtls = INIT_DISPATCHER_VBASE;
+ disp_aarch64->disabled_save_area.named.rtls = INIT_DISPATCHER_VBASE;
+
+
+ return init_dcb;
+}
+
+struct dcb *spawn_bsp_init(const char *name, alloc_phys_func alloc_phys)
+{
+ /* Only the first core can run this code */
+ assert(hal_cpu_is_bsp());
+
+ /* Allocate bootinfo */
+ lpaddr_t bootinfo_phys = alloc_phys(BOOTINFO_SIZE);
+ memset((void *)local_phys_to_mem(bootinfo_phys), 0, BOOTINFO_SIZE);
+
+ /* Construct cmdline args */
+ char bootinfochar[16];
+ snprintf(bootinfochar, sizeof(bootinfochar), "%u", INIT_BOOTINFO_VBASE);
+ const char *argv[] = { "init", bootinfochar };
+ int argc = 2;
+
+ struct dcb *init_dcb = spawn_init_common(name, argc, argv,bootinfo_phys, alloc_phys);
+ // Map bootinfo
+ spawn_init_map(init_l3, INIT_VBASE, INIT_BOOTINFO_VBASE,
+ bootinfo_phys, BOOTINFO_SIZE , INIT_PERM_RW);
+
+ struct startup_l3_info l3_info = { init_l3, INIT_VBASE };
+
+ genvaddr_t init_ep, got_base;
+ load_init_image(&l3_info, BSP_INIT_MODULE_NAME, &init_ep, &got_base);
+
+ struct dispatcher_shared_aarch64 *disp_aarch64
+ = get_dispatcher_shared_aarch64(init_dcb->disp);
+ disp_aarch64->enabled_save_area.named.x10 = got_base;
+ disp_aarch64->got_base = got_base;
+
+ disp_aarch64->disabled_save_area.named.pc = init_ep;
+ disp_aarch64->disabled_save_area.named.spsr = AARCH64_MODE_USR | CPSR_F_MASK;
+ disp_aarch64->disabled_save_area.named.x10 = got_base;
+
+ /* Create caps for init to use */
+ create_module_caps(&spawn_state);
+ lpaddr_t init_alloc_end = alloc_phys(0); // XXX
+ create_phys_caps(init_alloc_end);
+
+ /* Fill bootinfo struct */
+ bootinfo->mem_spawn_core = KERNEL_IMAGE_SIZE; // Size of kernel
+ /*
+ // Map dispatcher
+ spawn_init_map(init_l3, INIT_VBASE, INIT_DISPATCHER_VBASE,
+ mem_to_local_phys(init_dcb->disp), DISPATCHER_SIZE,
+ INIT_PERM_RW);
+ disp_aarch64->disabled_save_area.named.rtls = INIT_DISPATCHER_VBASE;
+ */
+ return init_dcb;
+}
+
+struct dcb *spawn_app_init(struct arm_core_data *core_data,
+ const char *name, alloc_phys_func alloc_phys)
+{
+ errval_t err;
+
+ /* Construct cmdline args */
+ // Core id of the core that booted this core
+ char coreidchar[10];
+ snprintf(coreidchar, sizeof(coreidchar), "%d", core_data->src_core_id);
+
+ // IPI channel id of core that booted this core
+ char chanidchar[30];
+ snprintf(chanidchar, sizeof(chanidchar), "chanid=%"PRIu32, core_data->chan_id);
+
+ // Arch id of the core that booted this core
+ char archidchar[30];
+ snprintf(archidchar, sizeof(archidchar), "archid=%d",
+ core_data->src_arch_id);
+
+ const char *argv[5] = { name, coreidchar, chanidchar, archidchar };
+ int argc = 4;
+
+ struct dcb *init_dcb = spawn_init_common(name, argc, argv,0, alloc_phys);
+
+ // Urpc frame cap
+ struct cte *urpc_frame_cte = caps_locate_slot(CNODE(spawn_state.taskcn),
+ TASKCN_SLOT_MON_URPC);
+ // XXX: Create as devframe so the memory is not zeroed out
+ err = caps_create_new(ObjType_DevFrame, core_data->urpc_frame_base,
+ core_data->urpc_frame_bits, core_data->urpc_frame_bits,
+ my_core_id, urpc_frame_cte);
+ assert(err_is_ok(err));
+ urpc_frame_cte->cap.type = ObjType_Frame;
+ lpaddr_t urpc_ptr = gen_phys_to_local_phys(urpc_frame_cte->cap.u.frame.base);
+
+ /* Map urpc frame at MON_URPC_BASE */
+ spawn_init_map(init_l3, INIT_VBASE, MON_URPC_VBASE, urpc_ptr, MON_URPC_SIZE,
+ INIT_PERM_RW);
+
+ struct startup_l3_info l3_info = { init_l3, INIT_VBASE };
+
+ // elf load the domain
+ genvaddr_t entry_point, got_base=0;
+ err = elf_load(EM_AARCH64, startup_alloc_init, &l3_info,
+ local_phys_to_mem(core_data->monitor_binary),
+ core_data->monitor_binary_size, &entry_point);
+ if (err_is_fail(err)) {
+ //err_print_calltrace(err);
+ panic("ELF load of init module failed!");
+ }
+
+ // TODO: Fix application linkage so that it's non-PIC.
+ struct Elf64_Shdr* got_shdr =
+ elf64_find_section_header_name(local_phys_to_mem(core_data->monitor_binary),
+ core_data->monitor_binary_size, ".got");
+ if (got_shdr)
+ {
+ got_base = got_shdr->sh_addr;
+ }
+
+ struct dispatcher_shared_aarch64 *disp_aarch64 =
+ get_dispatcher_shared_aarch64(init_dcb->disp);
+ disp_aarch64->enabled_save_area.named.x10 = got_base;
+ disp_aarch64->got_base = got_base;
+
+ disp_aarch64->disabled_save_area.named.pc = entry_point;
+ disp_aarch64->disabled_save_area.named.spsr = AARCH64_MODE_USR | CPSR_F_MASK;
+ disp_aarch64->disabled_save_area.named.x10 = got_base;
+ //disp_aarch64->disabled_save_area.named.rtls = INIT_DISPATCHER_VBASE;
+
+ return init_dcb;
+}
+
+void arm_kernel_startup(void)
+{
+ /* Initialize the core_data */
+ /* Used when bringing up other cores, must be at consistent global address
+ * seen by all cores */
+ struct arm_core_data *core_data
+ = (void *)((lvaddr_t)&kernel_first_byte - BASE_PAGE_SIZE);
+
+ struct dcb *init_dcb;
+
+ if(hal_cpu_is_bsp())
+ {
+ printf("Doing BSP related bootup \n");
+
+ /* Initialize the location to allocate phys memory from */
+ printf("start_free_ram = 0x%lx\n", glbl_core_data->start_free_ram);
+ bsp_init_alloc_addr = glbl_core_data->start_free_ram;
+
+ /* allocate initial KCB */
+ kcb_current= (struct kcb *)local_phys_to_mem(
+ bsp_alloc_phys(sizeof(*kcb_current)));
+ assert(kcb_current);
+ memset(kcb_current, 0, sizeof(*kcb_current));
+
+ init_dcb = spawn_bsp_init(BSP_INIT_MODULE_NAME, bsp_alloc_phys);
+
+ pit_start(0);
+ }
+ else
+ {
+ printf("Doing non-BSP related bootup \n");
+
+ kcb_current = (struct kcb *)
+ local_phys_to_mem((lpaddr_t) kcb_current);
+
+ my_core_id = core_data->dst_core_id;
+
+ /* Initialize the allocator */
+ app_alloc_phys_start = core_data->memory_base_start;
+ app_alloc_phys_end = ((lpaddr_t)1 << core_data->memory_bits) +
+ app_alloc_phys_start;
+
+ init_dcb = spawn_app_init(core_data, APP_INIT_MODULE_NAME,
+ app_alloc_phys);
+
+ uint32_t irq = gic_get_active_irq();
+ gic_ack_irq(irq);
+ }
+
+ // enable interrupt forwarding to cpu
+ gic_cpu_interface_enable();
+
+ // Should not return
+ dispatch(init_dcb);
+
+ panic("Error spawning init!");
+
+}
--- /dev/null
+/*
+ * Copyright (c) 2009,2011,2015, ETH Zurich.
+ * Copyright (c) 2015, Hewlett Packard Enterprise Development LP.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#include <kernel.h>
+
+#include <barrelfish_kpi/lmp.h>
+#include <barrelfish_kpi/syscalls.h>
+#include <barrelfish_kpi/sys_debug.h>
+#include <mdb/mdb_tree.h>
+
+#include <arm_hal.h>
+#include <irq.h>
+
+#include <paging_kernel_arch.h>
+#include <dispatch.h>
+#include <exec.h>
+#include <stdio.h>
+#include <sys_debug.h>
+#include <syscall.h>
+#include <arch/arm/syscall_arm.h>
+#include <start_aps.h>
+#include <useraccess.h>
+
+// helper macros for invocation handler definitions
+#define INVOCATION_HANDLER(func) \
+static struct sysret \
+func( \
+ struct capability *kernel_cap, \
+ arch_registers_state_t* context, \
+ int argc \
+ )
+
+#define INVOCATION_PRELUDE(n) \
+ assert(n == argc); \
+ struct registers_aarch64_syscall_args* sa = &context->syscall_args
+
+#define NYI(str) printf("armv8: %s\n", str)
+
+__attribute__((noreturn)) void sys_syscall_kernel(void);
+__attribute__((noreturn)) void sys_syscall(arch_registers_state_t* context);
+
+__attribute__((noreturn))
+void sys_syscall_kernel(void)
+{
+ panic("Why is the kernel making a system call?");
+}
+
+static struct sysret
+handle_dispatcher_setup(
+ struct capability* to,
+ arch_registers_state_t* context,
+ int argc
+ )
+{
+ assert(7 == argc);
+
+ struct registers_aarch64_syscall_args* sa = &context->syscall_args;
+
+ capaddr_t odptr = sa->arg2;
+ capaddr_t cptr = sa->arg3;
+ uintptr_t rundepth = sa->arg4;
+ int depth = rundepth & 0xff;
+ int run = rundepth >> 8;
+ capaddr_t vptr = sa->arg5;
+ capaddr_t dptr = sa->arg6;
+
+ return sys_dispatcher_setup(to, cptr, depth, vptr, dptr, run, odptr);
+}
+
+static struct sysret
+handle_dispatcher_properties(
+ struct capability* to,
+ arch_registers_state_t* context,
+ int argc
+ )
+{
+ assert(8 == argc);
+
+ struct registers_aarch64_syscall_args* sa = &context->syscall_args;
+
+ enum task_type type = (enum task_type)(sa->arg3 >> 16);
+ uint16_t weight = sa->arg3 & 0xffff;
+
+ return sys_dispatcher_properties(to, type, sa->arg4,
+ sa->arg5, sa->arg6, sa->arg7, weight);
+}
+
+static struct sysret
+handle_dispatcher_perfmon(
+ struct capability* to,
+ arch_registers_state_t* context,
+ int argc
+ )
+{
+ /* XXX - implement this? */
+ return SYSRET(SYS_ERR_PERFMON_NOT_AVAILABLE);
+}
+
+static struct sysret
+handle_frame_identify(
+ struct capability* to,
+ arch_registers_state_t* context,
+ int argc
+ )
+{
+ assert(2 == argc);
+
+ assert(to->type == ObjType_Frame || to->type == ObjType_DevFrame);
+ assert((to->u.frame.base & BASE_PAGE_MASK) == 0);
+ assert(to->u.frame.bits < BASE_PAGE_SIZE);
+
+ return (struct sysret) {
+ .error = SYS_ERR_OK,
+ .value = to->u.frame.base | to->u.frame.bits,
+ };
+}
+
+static struct sysret
+handle_frame_modify_flags(
+ struct capability *to,
+ arch_registers_state_t *context,
+ int argc
+ )
+{
+ // Modify flags of (part of) mapped region of frame
+ assert (5 == argc);
+
+ assert(to->type == ObjType_Frame || to->type == ObjType_DevFrame);
+
+ // unpack arguments
+ struct registers_aarch64_syscall_args* sa = &context->syscall_args;
+ size_t offset = sa->arg2; // in pages; of first page to modify from first
+ // page in mapped region
+ size_t pages = sa->arg3; // #pages to modify
+ size_t flags = sa->arg4; // new flags
+
+ paging_modify_flags(to, offset, pages, flags);
+
+ return (struct sysret) {
+ .error = SYS_ERR_OK,
+ .value = 0,
+ };
+}
+
+static struct sysret
+handle_mint(
+ struct capability* root,
+ arch_registers_state_t* context,
+ int argc
+ )
+{
+ assert(7 == argc);
+
+ struct registers_aarch64_syscall_args* sa = &context->syscall_args;
+
+ capaddr_t destcn_cptr = sa->arg2;
+ capaddr_t source_cptr = sa->arg3;
+ capaddr_t dest_slot = sa->arg4 >> 16;
+ int destcn_vbits = (sa->arg4 >> 8) & 0xff;
+ int source_vbits = sa->arg4 & 0xff;
+
+ return sys_copy_or_mint(root, destcn_cptr, dest_slot, source_cptr,
+ destcn_vbits, source_vbits, sa->arg5, sa->arg6,
+ true);
+}
+
+static struct sysret
+handle_copy(
+ struct capability* root,
+ arch_registers_state_t* context,
+ int argc
+ )
+{
+ assert(5 == argc);
+
+ struct registers_aarch64_syscall_args* sa = &context->syscall_args;
+
+ capaddr_t destcn_cptr = sa->arg2;
+ capaddr_t source_cptr = sa->arg3;
+ capaddr_t dest_slot = sa->arg4 >> 16;
+ int destcn_vbits = (sa->arg4 >> 8) & 0xff;
+ int source_vbits = sa->arg4 & 0xff;
+
+ return sys_copy_or_mint(root, destcn_cptr, dest_slot, source_cptr,
+ destcn_vbits, source_vbits, 0, 0, false);
+}
+
+static struct sysret
+handle_retype_common(
+ struct capability* root,
+ bool from_monitor,
+ arch_registers_state_t* context,
+ int argc
+ )
+{
+ assert(6 == argc);
+
+ struct registers_aarch64_syscall_args* sa = &context->syscall_args;
+
+ // Source capability cptr
+ capaddr_t source_cptr = sa->arg2;
+ uintptr_t word = sa->arg3;
+ // Type to retype to
+ enum objtype type = word >> 16;
+ // Object bits for variable-sized types
+ uint8_t objbits = (word >> 8) & 0xff;
+ // Destination cnode cptr
+ capaddr_t dest_cnode_cptr = sa->arg4;
+ // Destination slot number
+ capaddr_t dest_slot = sa->arg5;
+ // Valid bits in destination cnode cptr
+ uint8_t dest_vbits = (word & 0xff);
+
+ return sys_retype(root, source_cptr, type, objbits, dest_cnode_cptr,
+ dest_slot, dest_vbits, from_monitor);
+}
+
+static struct sysret
+handle_retype(
+ struct capability* root,
+ arch_registers_state_t* context,
+ int argc
+ )
+{
+ return handle_retype_common(root, false, context, argc);
+}
+
+static struct sysret
+handle_delete(
+ struct capability* root,
+ arch_registers_state_t* context,
+ int argc
+ )
+{
+ assert(4 == argc);
+
+ struct registers_aarch64_syscall_args* sa = &context->syscall_args;
+
+ capaddr_t cptr = (capaddr_t)sa->arg2;
+ int bits = (int)sa->arg3;
+
+ return sys_delete(root, cptr, bits);
+}
+
+static struct sysret
+handle_create(
+ struct capability* root,
+ arch_registers_state_t* context,
+ int argc
+ )
+{
+ assert(5 == argc);
+
+ struct registers_aarch64_syscall_args* sa = &context->syscall_args;
+
+ enum objtype type = (sa->arg2 >> 16) & 0xffff;
+ uint8_t objbits = (sa->arg2 >> 8) & 0xff;
+ capaddr_t dest_cptr = sa->arg3;
+ cslot_t dest_slot = sa->arg4;
+ int bits = sa->arg2 & 0xff;
+ printk(LOG_NOTE, "type = %d, bits = %d\n", type, bits);
+
+ return sys_create(root, type, objbits, dest_cptr, dest_slot, bits);
+}
+
+static struct sysret
+handle_revoke(
+ struct capability* root,
+ arch_registers_state_t* context,
+ int argc
+ )
+{
+ assert(4 == argc);
+
+ struct registers_aarch64_syscall_args* sa = &context->syscall_args;
+
+ capaddr_t cptr = (capaddr_t)sa->arg2;
+ int bits = (int)sa->arg3;
+
+ return sys_revoke(root, cptr, bits);
+}
+
+static struct sysret
+handle_get_state(
+ struct capability* root,
+ arch_registers_state_t* context,
+ int argc
+ )
+{
+ assert(4 == argc);
+
+ struct registers_aarch64_syscall_args* sa = &context->syscall_args;
+
+ capaddr_t cptr = (capaddr_t)sa->arg2;
+ int bits = (int)sa->arg3;
+
+ return sys_get_state(root, cptr, bits);
+}
+
+static struct sysret
+handle_map(
+ struct capability *ptable,
+ arch_registers_state_t *context,
+ int argc
+ )
+{
+ assert(7 == argc);
+
+ struct registers_aarch64_syscall_args* sa = &context->syscall_args;
+
+ /* Retrieve arguments */
+ capaddr_t source_cptr = (capaddr_t)sa->arg2;
+ capaddr_t dest_slot = ((capaddr_t)sa->arg3) >> 16;
+ int source_vbits = ((int)sa->arg3) & 0xff;
+ uintptr_t flags, offset,pte_count;
+ flags = (uintptr_t)sa->arg4;
+ offset = (uintptr_t)sa->arg5;
+ pte_count = (uintptr_t)sa->arg6;
+
+ return sys_map(ptable, dest_slot, source_cptr, source_vbits,
+ flags, offset, pte_count);
+}
+
+static struct sysret
+handle_unmap(
+ struct capability* ptable,
+ arch_registers_state_t* context,
+ int argc
+ )
+{
+ assert(4 == argc);
+
+ struct registers_aarch64_syscall_args* sa = &context->syscall_args;
+
+ /* Retrieve arguments */
+ capaddr_t mapping_cptr = (capaddr_t)sa->arg2;
+ int mapping_bits = (((int)sa->arg3) >> 20) & 0xff;
+ size_t pte_count = (((size_t)sa->arg3) >> 10) & 0x3ff;
+ pte_count += 1;
+ size_t entry = ((size_t)sa->arg3) & 0x3ff;
+
+ errval_t err;
+ struct cte *mapping = NULL;
+ err = caps_lookup_slot(&dcb_current->cspace.cap, mapping_cptr,
+ mapping_bits, &mapping, CAPRIGHTS_READ_WRITE);
+ if (err_is_fail(err)) {
+ return SYSRET(err_push(err, SYS_ERR_CAP_NOT_FOUND));
+ }
+
+ err = page_mappings_unmap(ptable, mapping, entry, pte_count);
+ return SYSRET(err);
+}
+
+/// Different handler for cap operations performed by the monitor
+INVOCATION_HANDLER(monitor_handle_retype)
+{
+ INVOCATION_PRELUDE(8);
+ errval_t err;
+
+ struct capability *root;
+ err = caps_lookup_cap(&dcb_current->cspace.cap, sa->arg6,
+ sa->arg7, &root, CAPRIGHTS_READ);
+ if (err_is_fail(err)) {
+ return SYSRET(err_push(err, SYS_ERR_ROOT_CAP_LOOKUP));
+ }
+
+ /* XXX: this hides the first argument which retype_common doesn't know
+ * about */
+ return handle_retype_common(root, true, context, 6);
+}
+
+INVOCATION_HANDLER(monitor_handle_has_descendants)
+{
+ INVOCATION_PRELUDE(3);
+ // check access to user pointer
+ if (!access_ok(ACCESS_READ, sa->arg2, sizeof(struct capability))) {
+ return SYSRET(SYS_ERR_INVALID_USER_BUFFER);
+ }
+
+ struct capability *src = (struct capability *)sa->arg2;
+
+ struct cte *next = mdb_find_greater(src, false);
+
+ return (struct sysret) {
+ .error = SYS_ERR_OK,
+ .value = (next && is_ancestor(&next->cap, src)),
+ };
+}
+
+INVOCATION_HANDLER(monitor_handle_delete_last)
+{
+ INVOCATION_PRELUDE(7);
+ capaddr_t root_caddr = sa->arg2;
+ capaddr_t target_caddr = sa->arg3;
+ capaddr_t retcn_caddr = sa->arg4;
+ cslot_t retcn_slot = sa->arg5;
+ uint8_t target_vbits = (sa->arg6>>16)&0xff;
+ uint8_t root_vbits = (sa->arg6>>8)&0xff;
+ uint8_t retcn_vbits = sa->arg6&0xff;
+
+ return sys_monitor_delete_last(root_caddr, root_vbits, target_caddr,
+ target_vbits, retcn_caddr, retcn_vbits,
+ retcn_slot);
+}
+
+INVOCATION_HANDLER(monitor_handle_delete_foreigns)
+{
+ INVOCATION_PRELUDE(4);
+ capaddr_t caddr = sa->arg2;
+ uint8_t bits = sa->arg3;
+ return sys_monitor_delete_foreigns(caddr, bits);
+}
+
+INVOCATION_HANDLER(monitor_handle_revoke_mark_tgt)
+{
+ INVOCATION_PRELUDE(6);
+ capaddr_t root_caddr = sa->arg2;
+ uint8_t root_vbits = sa->arg3;
+ capaddr_t target_caddr = sa->arg4;
+ uint8_t target_vbits = sa->arg5;
+
+ return sys_monitor_revoke_mark_tgt(root_caddr, root_vbits,
+ target_caddr, target_vbits);
+}
+
+INVOCATION_HANDLER(monitor_handle_revoke_mark_rels)
+{
+ INVOCATION_PRELUDE(3);
+ // user pointer to src cap, check access
+ if (!access_ok(ACCESS_READ, sa->arg2, sizeof(struct capability))) {
+ return SYSRET(SYS_ERR_INVALID_USER_BUFFER);
+ }
+ struct capability *base = (struct capability*)sa->arg2;
+
+ return sys_monitor_revoke_mark_rels(base);
+}
+
+INVOCATION_HANDLER(monitor_handle_delete_step)
+{
+ INVOCATION_PRELUDE(5);
+ capaddr_t ret_cn_addr = sa->arg2;
+ capaddr_t ret_cn_bits = sa->arg3;
+ capaddr_t ret_slot = sa->arg4;
+
+ return sys_monitor_delete_step(ret_cn_addr, ret_cn_bits, ret_slot);
+}
+
+INVOCATION_HANDLER(monitor_handle_clear_step)
+{
+ INVOCATION_PRELUDE(5);
+ capaddr_t ret_cn_addr = sa->arg2;
+ capaddr_t ret_cn_bits = sa->arg3;
+ capaddr_t ret_slot = sa->arg4;
+
+ return sys_monitor_clear_step(ret_cn_addr, ret_cn_bits, ret_slot);
+}
+
+
+static struct sysret
+monitor_get_core_id(
+ struct capability* to,
+ arch_registers_state_t* context,
+ int argc
+ )
+{
+ assert(2 == argc);
+
+ return (struct sysret) { .error = SYS_ERR_OK, .value = my_core_id };
+}
+
+static struct sysret
+monitor_get_arch_id(
+ struct capability* to,
+ arch_registers_state_t* context,
+ int argc
+ )
+{
+ assert(2 == argc);
+
+ // TODO: ARM doesn't support multicore yet...
+ return (struct sysret) { .error = SYS_ERR_OK, .value = my_core_id };
+}
+
+INVOCATION_HANDLER(monitor_handle_domain_id)
+{
+ INVOCATION_PRELUDE(4);
+ capaddr_t cptr = sa->arg2;
+ domainid_t domain_id = sa->arg3;
+
+ return sys_monitor_domain_id(cptr, domain_id);
+}
+
+INVOCATION_HANDLER(monitor_get_cap_owner)
+{
+ INVOCATION_PRELUDE(6);
+ capaddr_t root_addr = sa->arg2;
+ uint8_t root_bits = sa->arg3;
+ capaddr_t cptr = sa->arg4;
+ uint8_t bits = sa->arg5;
+
+ return sys_get_cap_owner(root_addr, root_bits, cptr, bits);
+}
+
+INVOCATION_HANDLER(monitor_set_cap_owner)
+{
+ INVOCATION_PRELUDE(7);
+ capaddr_t root_addr = sa->arg2;
+ uint8_t root_bits = sa->arg3;
+ capaddr_t cptr = sa->arg4;
+ uint8_t bits = sa->arg5;
+ coreid_t owner = sa->arg6;
+
+ return sys_set_cap_owner(root_addr, root_bits, cptr, bits, owner);
+}
+
+INVOCATION_HANDLER(monitor_lock_cap)
+{
+ INVOCATION_PRELUDE(6);
+ capaddr_t root_addr = sa->arg2;
+ uint8_t root_bits = sa->arg3;
+ capaddr_t cptr = sa->arg4;
+ uint8_t bits = sa->arg5;
+
+ return sys_lock_cap(root_addr, root_bits, cptr, bits);
+}
+
+INVOCATION_HANDLER(monitor_unlock_cap)
+{
+ INVOCATION_PRELUDE(6);
+ capaddr_t root_addr = sa->arg2;
+ uint8_t root_bits = sa->arg3;
+ capaddr_t cptr = sa->arg4;
+ uint8_t bits = sa->arg5;
+
+ return sys_unlock_cap(root_addr, root_bits, cptr, bits);
+}
+
+static struct sysret
+monitor_handle_register(
+ struct capability* to,
+ arch_registers_state_t* context,
+ int argc
+ )
+{
+ assert(3 == argc);
+
+ struct registers_aarch64_syscall_args* sa = &context->syscall_args;
+
+ capaddr_t ep_caddr = (capaddr_t)sa->arg2;
+
+ return sys_monitor_register(ep_caddr);
+}
+
+INVOCATION_HANDLER(monitor_cap_has_relations)
+{
+ INVOCATION_PRELUDE(5);
+ capaddr_t caddr = sa->arg2;
+ uint8_t vbits = sa->arg3;
+ uint8_t mask = sa->arg4;
+
+ return sys_cap_has_relations(caddr, vbits, mask);
+}
+
+INVOCATION_HANDLER(monitor_remote_relations)
+{
+ INVOCATION_PRELUDE(7);
+ capaddr_t root_addr = sa->arg2;
+ int root_bits = sa->arg3;
+ capaddr_t cptr = sa->arg4;
+ int bits = sa->arg5;
+ uint8_t relations = sa->arg6 & 0xFF;
+ uint8_t mask = (sa->arg6 >> 8) & 0xFF;
+
+ return sys_monitor_remote_relations(root_addr, root_bits, cptr, bits,
+ relations, mask);
+}
+
+INVOCATION_HANDLER(monitor_copy_existing)
+{
+ INVOCATION_PRELUDE(6);
+ capaddr_t cnode_cptr = sa->arg2;
+ int cnode_vbits = sa->arg3;
+ size_t slot = sa->arg4;
+
+ // user pointer to src cap, check access
+ if (!access_ok(ACCESS_READ, sa->arg5, sizeof(struct capability))) {
+ return SYSRET(SYS_ERR_INVALID_USER_BUFFER);
+ }
+ /* Get the raw metadata of the capability to create from user pointer */
+ struct capability *src = (struct capability *)sa->arg5;
+
+ return sys_monitor_copy_existing(src, cnode_cptr, cnode_vbits, slot);
+}
+
+INVOCATION_HANDLER(monitor_nullify_cap)
+{
+ INVOCATION_PRELUDE(4);
+ capaddr_t cptr = sa->arg2;
+ int bits = sa->arg3;
+
+ return sys_monitor_nullify_cap(cptr, bits);
+}
+
+static struct sysret
+monitor_create_cap(
+ struct capability *kernel_cap,
+ arch_registers_state_t* context,
+ int argc
+ )
+{
+ assert(7 == argc);
+
+ struct registers_aarch64_syscall_args* sa = &context->syscall_args;
+
+ /* Create the cap in the destination */
+ capaddr_t cnode_cptr = sa->arg2;
+ int cnode_vbits = sa->arg3;
+ size_t slot = sa->arg4;
+ coreid_t owner = sa->arg5;
+ struct capability *src =
+ (struct capability*)sa->arg6;
+
+ /* Cannot create null caps */
+ if (src->type == ObjType_Null ) {
+ return SYSRET(SYS_ERR_ILLEGAL_DEST_TYPE);
+ }
+
+ /* For certain types, only foreign copies can be created here */
+ if ((src->type == ObjType_EndPoint || src->type == ObjType_Dispatcher
+ || src->type == ObjType_Kernel || src->type == ObjType_IRQTable)
+ && owner == my_core_id)
+ {
+ return SYSRET(SYS_ERR_ILLEGAL_DEST_TYPE);
+ }
+
+ return SYSRET(caps_create_from_existing(&dcb_current->cspace.cap,
+ cnode_cptr, cnode_vbits,
+ slot, owner, src));
+}
+
+/**
+ * \brief Spawn a new core and create a kernel cap for it.
+ */
+static struct sysret
+monitor_spawn_core(
+ struct capability *kernel_cap,
+ arch_registers_state_t* context,
+ int argc)
+{
+ /* XXX - Why is this commented out? */
+ //assert(3 == argc);
+
+ struct registers_aarch64_syscall_args* sa = &context->syscall_args;
+
+ coreid_t core_id = sa->arg2;
+ enum cpu_type cpu_type = sa->arg3;
+ genvaddr_t entry = sa->arg5;
+
+ return sys_monitor_spawn_core(core_id, cpu_type, entry);
+}
+
+static struct sysret
+monitor_identify_cap(
+ struct capability *kernel_cap,
+ arch_registers_state_t* context,
+ int argc)
+{
+ struct registers_aarch64_syscall_args* sa = &context->syscall_args;
+
+ capaddr_t cptr = sa->arg2;
+ int bits = sa->arg3;
+ struct capability *retbuf = (void *)sa->arg4;
+
+ return sys_monitor_identify_cap(&dcb_current->cspace.cap, cptr, bits, retbuf);
+}
+
+INVOCATION_HANDLER(monitor_identify_domains_cap)
+{
+ /* XXX - why is this not used consistently? */
+ INVOCATION_PRELUDE(7);
+ errval_t err;
+
+ capaddr_t root_caddr = sa->arg2;
+ capaddr_t root_vbits = sa->arg3;
+ capaddr_t cptr = sa->arg4;
+ int bits = sa->arg5;
+ struct capability *retbuf = (void *)sa->arg6;
+
+ struct capability *root;
+ err = caps_lookup_cap(&dcb_current->cspace.cap, root_caddr, root_vbits,
+ &root, CAPRIGHTS_READ);
+ if (err_is_fail(err)) {
+ return SYSRET(err_push(err, SYS_ERR_ROOT_CAP_LOOKUP));
+ }
+
+ return sys_monitor_identify_cap(root, cptr, bits, retbuf);
+}
+
+static struct sysret handle_irq_table_set( struct capability* to,
+ arch_registers_state_t* context, int argc)
+{
+ struct registers_aarch64_syscall_args* sa = &context->syscall_args;
+
+ return SYSRET(irq_table_set(sa->arg2, sa->arg3));
+}
+
+
+static struct sysret handle_irq_table_delete( struct capability* to,
+ arch_registers_state_t* context,
+ int argc
+ )
+{
+ struct registers_aarch64_syscall_args* sa = &context->syscall_args;
+
+ return SYSRET(irq_table_delete(sa->arg2));
+}
+
+
+static struct sysret dispatcher_dump_ptables(
+ struct capability* to, arch_registers_state_t* context, int argc)
+{
+ assert(to->type == ObjType_Dispatcher);
+ assert(2 == argc);
+
+ printf("kernel_dump_ptables\n");
+
+ struct dcb *dispatcher = to->u.dispatcher.dcb;
+
+ paging_dump_tables(dispatcher);
+
+ return SYSRET(SYS_ERR_OK);
+}
+
+static struct sysret dispatcher_dump_capabilities(struct capability *cap,
+ arch_registers_state_t* context, int argc)
+{
+ assert(cap->type == ObjType_Dispatcher);
+ assert(2 == argc);
+ struct dcb *dispatcher = cap->u.dispatcher.dcb;
+ errval_t err = debug_print_cababilities(dispatcher);
+ return SYSRET(err);
+}
+
+static struct sysret handle_idcap_identify(struct capability *to,
+ arch_registers_state_t *context,
+ int argc)
+{
+ assert(to->type == ObjType_ID);
+ assert(3 == argc);
+
+ struct registers_aarch64_syscall_args* sa = &context->syscall_args;
+ idcap_id_t *idp = (idcap_id_t *) sa->arg2;
+
+ // Check validity of user space pointer
+ if (!access_ok(ACCESS_WRITE, (lvaddr_t) idp, sizeof(*idp))) {
+ return SYSRET(SYS_ERR_INVALID_USER_BUFFER);
+ }
+
+ return sys_idcap_identify(to, idp);
+}
+
+
+static struct sysret handle_kcb_identify(struct capability *to,
+ arch_registers_state_t *context,
+ int argc)
+{
+ return sys_handle_kcb_identify(to);
+}
+
+typedef struct sysret (*invocation_t)(struct capability*,
+ arch_registers_state_t*, int);
+
+static invocation_t invocations[ObjType_Num][CAP_MAX_CMD] = {
+ [ObjType_Dispatcher] = {
+ [DispatcherCmd_Setup] = handle_dispatcher_setup,
+ [DispatcherCmd_Properties] = handle_dispatcher_properties,
+ [DispatcherCmd_PerfMon] = handle_dispatcher_perfmon,
+ [DispatcherCmd_DumpPTables] = dispatcher_dump_ptables,
+ [DispatcherCmd_DumpCapabilities] = dispatcher_dump_capabilities
+ },
+ [ObjType_KernelControlBlock] = {
+ [FrameCmd_Identify] = handle_kcb_identify
+ },
+ [ObjType_Frame] = {
+ [FrameCmd_Identify] = handle_frame_identify,
+ [FrameCmd_ModifyFlags] = handle_frame_modify_flags,
+ },
+ [ObjType_DevFrame] = {
+ [FrameCmd_Identify] = handle_frame_identify,
+ [FrameCmd_ModifyFlags] = handle_frame_modify_flags,
+ },
+ [ObjType_CNode] = {
+ [CNodeCmd_Copy] = handle_copy,
+ [CNodeCmd_Mint] = handle_mint,
+ [CNodeCmd_Retype] = handle_retype,
+ [CNodeCmd_Delete] = handle_delete,
+ [CNodeCmd_Revoke] = handle_revoke,
+ [CNodeCmd_Create] = handle_create,
+ [CNodeCmd_GetState] = handle_get_state,
+ },
+ [ObjType_VNode_AARCH64_l1] = {
+ [VNodeCmd_Map] = handle_map,
+ [VNodeCmd_Unmap] = handle_unmap,
+ },
+ [ObjType_VNode_AARCH64_l2] = {
+ [VNodeCmd_Map] = handle_map,
+ [VNodeCmd_Unmap] = handle_unmap,
+ },
+ [ObjType_VNode_AARCH64_l3] = {
+ [VNodeCmd_Map] = handle_map,
+ [VNodeCmd_Unmap] = handle_unmap,
+ },
+ [ObjType_IRQTable] = {
+ [IRQTableCmd_Set] = handle_irq_table_set,
+ [IRQTableCmd_Delete] = handle_irq_table_delete,
+ },
+ [ObjType_Kernel] = {
+ [KernelCmd_Cap_has_relations] = monitor_cap_has_relations,
+ [KernelCmd_Clear_step] = monitor_handle_clear_step,
+ [KernelCmd_Copy_existing] = monitor_copy_existing,
+ [KernelCmd_Create_cap] = monitor_create_cap,
+ [KernelCmd_Delete_foreigns] = monitor_handle_delete_foreigns,
+ [KernelCmd_Delete_last] = monitor_handle_delete_last,
+ [KernelCmd_Delete_step] = monitor_handle_delete_step,
+ [KernelCmd_Domain_Id] = monitor_handle_domain_id,
+ [KernelCmd_Get_arch_id] = monitor_get_arch_id,
+ [KernelCmd_Get_cap_owner] = monitor_get_cap_owner,
+ [KernelCmd_Get_core_id] = monitor_get_core_id,
+ [KernelCmd_Has_descendants] = monitor_handle_has_descendants,
+ [KernelCmd_Identify_cap] = monitor_identify_cap,
+ [KernelCmd_Identify_domains_cap] = monitor_identify_domains_cap,
+ [KernelCmd_Lock_cap] = monitor_lock_cap,
+ [KernelCmd_Nullify_cap] = monitor_nullify_cap,
+ [KernelCmd_Register] = monitor_handle_register,
+ [KernelCmd_Remote_relations] = monitor_remote_relations,
+ [KernelCmd_Retype] = monitor_handle_retype,
+ [KernelCmd_Revoke_mark_relations] = monitor_handle_revoke_mark_rels,
+ [KernelCmd_Revoke_mark_target] = monitor_handle_revoke_mark_tgt,
+ [KernelCmd_Set_cap_owner] = monitor_set_cap_owner,
+ /* XXX - why is this commented out? */
+ //[KernelCmd_Setup_trace] = handle_trace_setup,
+ [KernelCmd_Spawn_core] = monitor_spawn_core,
+ [KernelCmd_Unlock_cap] = monitor_unlock_cap,
+ },
+ [ObjType_IPI] = {
+ [IPICmd_Send_Start] = monitor_spawn_core,
+ },
+ [ObjType_ID] = {
+ [IDCmd_Identify] = handle_idcap_identify
+ }
+};
+
+static struct sysret
+handle_invoke(arch_registers_state_t *context, int argc)
+{
+ struct registers_aarch64_syscall_args* sa = &context->syscall_args;
+
+ /* XXX - can we generate them from the same source? */
+ //
+ // Must match lib/barrelfish/include/arch/aarch64/arch/invocations.h
+ //
+ uint8_t flags = (sa->arg0 >> 24) & 0xf;
+ uint8_t invoke_bits = (sa->arg0 >> 16) & 0xff;
+ capaddr_t invoke_cptr = sa->arg1;
+
+ debug(SUBSYS_SYSCALL, "sys_invoke(0x%"PRIxCADDR"(%d))\n",
+ invoke_cptr, invoke_bits);
+
+ struct sysret r = { .error = SYS_ERR_OK, .value = 0 };
+
+ struct capability* to;
+ r.error = caps_lookup_cap(&dcb_current->cspace.cap,
+ invoke_cptr, invoke_bits,
+ &to, CAPRIGHTS_READ);
+ if (err_is_ok(r.error))
+ {
+ assert(to != NULL);
+ assert(to->type < ObjType_Num);
+
+ if (ObjType_EndPoint == to->type)
+ {
+ struct dcb *listener = to->u.endpoint.listener;
+ assert(listener != NULL);
+
+ if (listener->disp) {
+ /* XXX - not 64-bit clean */
+ uint8_t length_words = (sa->arg0 >> 28) & 0xff;
+ uint8_t send_bits = (sa->arg0 >> 8) & 0xff;
+ capaddr_t send_cptr = sa->arg2;
+ /* limit length of message from buggy/malicious sender */
+ length_words = min(length_words, LMP_MSG_LENGTH);
+
+ // does the sender want to yield their timeslice on success?
+ bool sync = flags & LMP_FLAG_SYNC;
+ // does the sender want to yield to the target
+ // if undeliverable?
+ bool yield = flags & LMP_FLAG_YIELD;
+ // is the cap (if present) to be deleted on send?
+ bool give_away = flags & LMP_FLAG_GIVEAWAY;
+
+ // Message registers in context are
+ // discontinguous for now so copy message words
+ // to temporary container. This is fixable, but
+ // not in this pass.
+ uintptr_t msg_words[LMP_MSG_LENGTH];
+ msg_words[0] = sa->arg3;
+ msg_words[1] = sa->arg4;
+ msg_words[2] = sa->arg5;
+ msg_words[3] = sa->arg6;
+ msg_words[4] = sa->arg7;
+ msg_words[5] = sa->arg8;
+ msg_words[6] = sa->arg9;
+ msg_words[7] = sa->arg10;
+ msg_words[8] = sa->arg11;
+ STATIC_ASSERT(LMP_MSG_LENGTH == 9, "Oops");
+
+ // try to deliver message
+ r.error = lmp_deliver(to, dcb_current, msg_words,
+ length_words, send_cptr, send_bits,
+ give_away);
+
+ /* Switch to reciever upon successful delivery
+ * with sync flag, or (some cases of)
+ * unsuccessful delivery with yield flag */
+ enum err_code err_code = err_no(r.error);
+ if ((sync && err_is_ok(r.error)) ||
+ (yield &&
+ (err_code == SYS_ERR_LMP_BUF_OVERFLOW
+ || err_code == SYS_ERR_LMP_CAPTRANSFER_DST_CNODE_LOOKUP
+ || err_code == SYS_ERR_LMP_CAPTRANSFER_DST_CNODE_INVALID
+ || err_code == SYS_ERR_LMP_CAPTRANSFER_DST_SLOT_OCCUPIED))
+ ) {
+ if (err_is_fail(r.error)) {
+ struct dispatcher_shared_generic *current_disp =
+ get_dispatcher_shared_generic(dcb_current->disp);
+ struct dispatcher_shared_generic *listener_disp =
+ get_dispatcher_shared_generic(listener->disp);
+ debug(SUBSYS_DISPATCH, "LMP failed; %.*s yields to %.*s: %u\n",
+ DISP_NAME_LEN, current_disp->name,
+ DISP_NAME_LEN, listener_disp->name, err_code);
+ }
+
+ // special-case context switch: ensure correct state in
+ // current DCB
+ dispatcher_handle_t handle = dcb_current->disp;
+ struct dispatcher_shared_aarch64 *disp =
+ get_dispatcher_shared_aarch64(handle);
+ dcb_current->disabled =
+ dispatcher_is_disabled_ip(handle, context->named.pc);
+ if (dcb_current->disabled) {
+ assert(context == &disp->disabled_save_area);
+ context->named.x0 = r.error;
+ }
+ else {
+ assert(context == &disp->enabled_save_area);
+ context->named.x0 = r.error;
+ }
+ dispatch(listener);
+ }
+ }
+ else {
+ r.error = SYS_ERR_LMP_NO_TARGET;
+ }
+ }
+ else
+ {
+ uint8_t cmd = (sa->arg0 >> 8) & 0xff;
+ if (cmd < CAP_MAX_CMD)
+ {
+ invocation_t invocation = invocations[to->type][cmd];
+ if (invocation)
+ {
+ r = invocation(to, context, argc);
+ if (!dcb_current)
+ {
+ // dcb_current was removed, dispatch someone else
+ assert(err_is_ok(r.error));
+ dispatch(schedule());
+ }
+ return r;
+ }
+ }
+ printk(LOG_ERR, "Bad invocation type %d cmd %d\n", to->type, cmd);
+ r.error = SYS_ERR_ILLEGAL_INVOCATION;
+ }
+ }
+
+ return r;
+}
+
+static struct sysret handle_debug_syscall(int msg)
+{
+ struct sysret retval = { .error = SYS_ERR_OK };
+ switch (msg) {
+ case DEBUG_FLUSH_CACHE:
+ /* XXX - implement me */
+ break;
+
+ case DEBUG_CONTEXT_COUNTER_RESET:
+ dispatch_csc_reset();
+ break;
+
+ case DEBUG_CONTEXT_COUNTER_READ:
+ retval.value = dispatch_get_csc();
+ break;
+
+ case DEBUG_TIMESLICE_COUNTER_READ:
+ retval.value = kernel_now;
+ break;
+
+ case DEBUG_HARDWARE_TIMER_READ:
+ retval.value = tsc_read();
+ break;
+
+ case DEBUG_HARDWARE_TIMER_HERTZ_READ:
+ retval.value = tsc_get_hz();
+ break;
+
+ default:
+ printk(LOG_ERR, "invalid sys_debug msg type %d\n", msg);
+ retval.error = err_push(retval.error, SYS_ERR_ILLEGAL_SYSCALL);
+ }
+ return retval;
+}
+
+/* XXX - function documentation is inconsistent. */
+/**
+ * System call dispatch routine.
+ *
+ * @return struct sysret for all calls except yield / invoke.
+ */
+/* XXX - why is this commented out? */
+//__attribute__((noreturn))
+void sys_syscall(arch_registers_state_t* context)
+{
+ STATIC_ASSERT_OFFSETOF(struct sysret, error, 0);
+
+ struct registers_aarch64_syscall_args* sa = &context->syscall_args;
+
+ uintptr_t syscall = sa->arg0 & 0xf;
+ uintptr_t argc = (sa->arg0 >> 4) & 0xf;
+
+ struct sysret r = { .error = SYS_ERR_INVARGS_SYSCALL, .value = 0 };
+
+ switch (syscall)
+ {
+ case SYSCALL_INVOKE:
+ r = handle_invoke(context, argc);
+ break;
+
+ case SYSCALL_YIELD:
+ if (argc == 2)
+ {
+ r = sys_yield((capaddr_t)sa->arg1);
+ }
+ break;
+
+ case SYSCALL_NOP:
+ break;
+
+ case SYSCALL_PRINT:
+ if (argc == 3)
+ {
+ r.error = sys_print((const char*)sa->arg1, (size_t)sa->arg2);
+ }
+ break;
+
+ case SYSCALL_DEBUG:
+ if (argc == 2) {
+ r = handle_debug_syscall(sa->arg1);
+ }
+ break;
+
+ default:
+ panic("Illegal syscall");
+ r.error = SYS_ERR_ILLEGAL_SYSCALL;
+ break;
+ }
+
+ if (r.error) {
+ /* XXX - not 64-bit clean, not AArch64-compatible. */
+ debug(SUBSYS_SYSCALL, "syscall failed %08"PRIx32" => %08"PRIxERRV"\n",
+ sa->arg0, r.error);
+ }
+
+ context->named.x0 = r.error;
+ context->named.x1 = r.value;
+
+ resume(context);
+}
--- /dev/null
+/**
+ * \file
+ * \brief Cache control routines for ARMv8.
+ */
+
+/*
+ * Copyright (c) 2015, Hewlett Packard Enterprise Development LP.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef __ASSEMBLER__
+#define __ASSEMBLER__ 1
+#endif
+
+ .text
+ .globl sysreg_invalidate_d_cache,\
+ sysreg_invalidate_i_and_d_caches,\
+ sysreg_invalidate_i_and_d_caches_fast, \
+ sysreg_invalidate_tlb_fn, \
+ sysreg_enable_mmu
+
+/* Based on algorithm from ARM Architecture Reference Manual */
+sysreg_invalidate_d_cache:
+
+ sub sp, sp, #96
+
+ stp x0, x1, [sp, #16 * 0]
+ stp x2, x3, [sp, #16 * 1]
+ stp x4, x5, [sp, #16 * 2]
+ stp x6, x7, [sp, #16 * 3]
+ stp x8, x9, [sp, #16 * 4]
+ stp x10, x11, [sp, #16 * 5]
+
+ dmb sy // ensure ordering with previous memory accesses
+
+ mrs x0, clidr_el1
+ and w3, w0, #0x07000000 // get 2 x level of coherency
+ lsr w3, w3, #23
+ cbz w3, finished
+ mov w10, #0 // w10 = 2 x cache level
+ mov w8, #1 // w8 = constant 0b1
+
+loop1:
+ add w2, w10, w10, lsr #1 // calculate 3 x cache level
+ lsr w1, w0, w2 // extract 3-bit cache type for this level
+ and w1, w1, #0x7
+ cmp w1, #2
+ b.lt skip // no data or unified cache at this level
+ msr csselr_el1, x10 // select this cache level
+ isb // sync change of csselr
+ mrs x1, ccsidr_el1 // read ccsidr
+ and w2, w1, #7 // w2 = log2(linelen)-4
+ add w2, w2, #4 // w2 = log2(linelen)
+ ubfx w4, w1, #3, #10 // w4 = max way number, right aligned
+ clz w5, w4 // w5 = 32-log2(ways), bit position of way in dc operand
+ lsl w9, w4, w5 // w9 = max way number, aligned to position in dc operand
+ lsl w16, w8, w5 // w16 = amount to decrement way number per iteration
+loop2:
+ ubfx w7, w1, #13, #15 // w7 = max set number, right aligned
+ lsl w7, w7, w2 // w7 = max set number, aligned to position in dc operand
+ lsl w17, w8, w2 // w17 = amount to decrement set number per iteration
+loop3:
+ orr w11, w10, w9 // w11 = combine way number and cache number ...
+ orr w11, w11, w7 // ... and set number for dc operand
+ dc csw, x11 // do data cache clean by set and way
+ subs w7, w7, w17 // decrement set number
+ b.ge loop3
+ subs x9, x9, x16 // decrement way number
+ b.ge loop2
+skip:
+ add w10, w10, #2 // increment 2 x cache level
+ cmp w3, w10
+ b.gt loop1
+finished:
+ mov x10, #0
+ msr csselr_el1, x10
+ dsb sy
+ isb
+
+ ldp x0, x1, [sp], #16
+ ldp x2, x3, [sp], #16
+ ldp x4, x5, [sp], #16
+ ldp x6, x7, [sp], #16
+ ldp x8, x9, [sp], #16
+ ldp x10, x11, [sp], #16
+ ret
+
+sysreg_invalidate_i_and_d_caches:
+ mov x12, x30 //lr
+ bl sysreg_invalidate_d_cache
+ mov x0, #0
+ ic ialluis // I+BTB cache invalidate
+ ret x12
+
+sysreg_invalidate_d_cache_fast:
+ mov x12, x30 //lr
+ bl sysreg_invalidate_d_cache
+ ret x12
+
+sysreg_invalidate_i_and_d_caches_fast:
+ mov x12, x30 //lr
+ bl sysreg_invalidate_d_cache
+ mov x0, #0
+ ic ialluis // I+BTB cache invalidate
+ ret x12
+
+sysreg_invalidate_tlb:
+
+ sub sp, sp, #96
+
+ stp x0, x1, [sp, #16 * 0]
+ stp x2, x3, [sp, #16 * 1]
+ stp x4, x5, [sp, #16 * 2]
+ stp x6, x7, [sp, #16 * 3]
+ stp x8, x9, [sp, #16 * 4]
+ stp x10, x11, [sp, #16 * 5]
+
+ tlbi vmalle1
+ dsb sy
+ isb
+
+ ldp x0, x1, [sp], #16
+ ldp x2, x3, [sp], #16
+ ldp x4, x5, [sp], #16
+ ldp x6, x7, [sp], #16
+ ldp x8, x9, [sp], #16
+ ldp x10, x11, [sp], #16
+ ret
+
+sysreg_invalidate_tlb_fn:
+ mov x12, x30 //lr
+ bl sysreg_invalidate_tlb
+ ret x12
+
+sysreg_enable_mmu:
+ mov x12, x30 //lr
+
+ ic iallu // I+BTB cache invalidate
+ tlbi vmalle1 // invalidate I + D TLBs
+ dsb sy
+
+ ldr x0, =0xff440c0400
+ msr mair_el1, x0
+ isb
+
+ /* TCR - Translation Control Register
+ * 4K granularity, 32-bit addresses, two subranges:
+ *
+ * TTBR1_EL1 -> 0xffff_ffff_8000_0000 to 0xffff_ffff_ffff_ffff
+ * TTBR0_EL1 -> 0x0000_0000_0000_0000 to 0x0000_0000_7fff_ffff
+ */
+
+ ldr x0, =0x10b5203520
+ msr tcr_el1, x0
+ isb
+
+ ldr x0, =0x30d0199d
+ msr sctlr_el1, x0
+ isb
+
+ ret x12
#include <exec.h>
#include <offsets.h>
#include <paging_kernel_arch.h>
-#include <phys_mmap.h>
#include <serial.h>
#include <spinlock.h>
#include <stdio.h>
struct multiboot_modinfo *mod = (struct multiboot_modinfo *)
local_phys_to_mem(glbl_core_data->mods_addr);
+ printf("%p %p\n", &glbl_core_data, glbl_core_data);
+
for(size_t i = 0; i < glbl_core_data->mods_count; i++) {
const char *modname = MBADDR_ASSTRING(mod[i].string), *endstr;
+++ /dev/null
-/**\r
- * \file\r
- * \brief The world's simplest serial driver.\r
- *\r
- */\r
-\r
-/*\r
- * Copyright (c) 2010, ETH Zurich.\r
- * All rights reserved.\r
- *\r
- * This file is distributed under the terms in the attached LICENSE file.\r
- * If you do not find this file, copies can be found by writing to:\r
- * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.\r
- */\r
-\r
-#include <serial.h>\r
-#include <kputchar.h>\r
-\r
-#define KPBUFSZ 256\r
-static char kputbuf[KPBUFSZ];\r
-static int kcount = 0;\r
-\r
-static void kflush(void)\r
-{\r
- for(int i=0; i<kcount; i++) {\r
- if (kputbuf[i] == '\n') {\r
- serial_console_putchar('\r');\r
- }\r
- serial_console_putchar(kputbuf[i]);\r
- }\r
- kcount = 0;\r
-}\r
-\r
-void kprintf_begin(void)\r
-{\r
- kcount = 0;\r
-}\r
-\r
-int kputchar(int c)\r
-{\r
- kputbuf[kcount++] = c;\r
- if (kcount == KPBUFSZ || c == '\n')\r
- kflush();\r
- return c;\r
-}\r
-\r
-void kprintf_end(void)\r
-{\r
- kflush();\r
-}\r
-\r
-// End\r
*/
/*
- * Copyright (c) 2007, 2008, 2009, 2010, 2011, 2012, ETH Zurich.
+ * Copyright (c) 2007-2012,2015, ETH Zurich.
+ * Copyright (c) 2015, Hewlett Packard Enterprise Development LP.
* All rights reserved.
*
* This file is distributed under the terms in the attached LICENSE file.
* If you do not find this file, copies can be found by writing to:
- * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
*/
#include <stdio.h>
#include <trace/trace.h>
#include <trace_definitions/trace_defs.h>
#include <wakeup.h>
-
-/// Sets the specified number of low-order bits to 1
-#define MASK(bits) ((1UL << bits) - 1)
+#include <bitmacros.h>
#ifdef TRACE_PMEM_CAPS
uintptr_t trace_types_enabled = TRACE_TYPES_ENABLED_INITIAL;
return snprintf(buf, len, "ARM L2 table at 0x%" PRIxGENPADDR,
cap->u.vnode_arm_l2.base);
+ case ObjType_VNode_AARCH64_l1:
+ return snprintf(buf, len, "AARCH64 L1 table at 0x%" PRIxGENPADDR,
+ cap->u.vnode_aarch64_l1.base);
+
+ case ObjType_VNode_AARCH64_l2:
+ return snprintf(buf, len, "AARCH64 L2 table at 0x%" PRIxGENPADDR,
+ cap->u.vnode_aarch64_l2.base);
+
+ case ObjType_VNode_AARCH64_l3:
+ return snprintf(buf, len, "AARCH64 L3 table at 0x%" PRIxGENPADDR,
+ cap->u.vnode_aarch64_l3.base);
+
case ObjType_VNode_x86_32_ptable:
return snprintf(buf, len, "x86_32 Page table at 0x%" PRIxGENPADDR,
cap->u.vnode_x86_32_ptable.base);
// If you create more capability types you need to deal with them
// in the table below.
-STATIC_ASSERT(27 == ObjType_Num, "Knowledge of all cap types");
+STATIC_ASSERT(30 == ObjType_Num, "Knowledge of all cap types");
static size_t caps_numobjs(enum objtype type, uint8_t bits, uint8_t objbits)
{
case ObjType_VNode_x86_32_ptable:
case ObjType_VNode_ARM_l1:
case ObjType_VNode_ARM_l2:
+ case ObjType_VNode_AARCH64_l1:
+ case ObjType_VNode_AARCH64_l2:
+ case ObjType_VNode_AARCH64_l3:
{
size_t objbits_vnode = vnode_objbits(type);
if (bits < objbits_vnode) {
*
* For the meaning of the parameters, see the 'caps_create' function.
*/
-STATIC_ASSERT(ObjType_Num == 27, "Knowledge of all cap types");
+STATIC_ASSERT(ObjType_Num == 30, "Knowledge of all cap types");
static errval_t caps_init_objects(enum objtype type, lpaddr_t lpaddr, uint8_t
bits, uint8_t objbits, size_t numobjs)
case ObjType_CNode:
case ObjType_VNode_ARM_l1:
case ObjType_VNode_ARM_l2:
+ case ObjType_VNode_AARCH64_l1:
+ case ObjType_VNode_AARCH64_l2:
+ case ObjType_VNode_AARCH64_l3:
case ObjType_VNode_x86_32_ptable:
case ObjType_VNode_x86_32_pdir:
case ObjType_VNode_x86_32_pdpt:
*/
// If you create more capability types you need to deal with them
// in the table below.
-STATIC_ASSERT(27 == ObjType_Num, "Knowledge of all cap types");
+STATIC_ASSERT(30 == ObjType_Num, "Knowledge of all cap types");
static errval_t caps_create(enum objtype type, lpaddr_t lpaddr, uint8_t bits,
uint8_t objbits, size_t numobjs, coreid_t owner,
break;
}
+ case ObjType_VNode_AARCH64_l1:
+ {
+ size_t objbits_vnode = vnode_objbits(type);
+
+ TRACE(KERNEL, BZERO, 1);
+ memset((void*)lvaddr, 0, 1UL << bits);
+ TRACE(KERNEL, BZERO, 0);
+
+ for(dest_i = 0; dest_i < numobjs; dest_i++) {
+ // Initialize type specific fields
+ src_cap.u.vnode_aarch64_l1.base =
+ genpaddr + dest_i * ((genpaddr_t)1 << objbits_vnode);
+
+#ifdef __aarch64__
+ // Insert kernel/mem mappings into new table.
+ lpaddr_t var = gen_phys_to_local_phys(src_cap.u.vnode_aarch64_l1.base);
+ paging_make_good(var);
+#endif
+
+ // Insert the capability
+ err = set_cap(&dest_caps[dest_i].cap, &src_cap);
+ if (err_is_fail(err)) {
+ break;
+ }
+ }
+
+ break;
+ }
+
+ case ObjType_VNode_AARCH64_l2:
+ {
+ size_t objbits_vnode = vnode_objbits(type);
+
+ TRACE(KERNEL, BZERO, 1);
+ memset((void*)lvaddr, 0, 1UL << bits);
+ &n