basic armv7-m support
authorClaudio Föllmi <foellmic@student.ethz.ch>
Thu, 29 Aug 2013 07:49:46 +0000 (09:49 +0200)
committerClaudio Föllmi <foellmic@student.ethz.ch>
Thu, 29 Aug 2013 07:49:46 +0000 (09:49 +0200)
added new hake architecture "armv7-m"
added armv7-m port of kernel and libbarrelfish
fixed small arm_molly issues with parsing paths containing '-'

the port does not actually handle interrupts yet, but scheduling, spawning, and starting processes works
this update does not yet contain the armv7-a code to start the m3 core

65 files changed:
hake/ARMv7_M.hs [new file with mode: 0644]
hake/Args.hs
hake/RuleDefs.hs
hake/menu.lst.armv7-m [new file with mode: 0644]
hake/symbolic_targets.mk
if/arch/armv7-m.if [new file with mode: 0644]
if/platform/armv7-m.if [new file with mode: 0644]
include/arch/arm/barrelfish_kpi/asm_inlines_arch.h
include/arch/arm/barrelfish_kpi/paging_arch.h
include/arch/arm/barrelfish_kpi/registers_pushed_arm_v7m.h [new file with mode: 0644]
include/omap44xx_map.h
include/target/arm/barrelfish_kpi/paging_arm_v7m.h [new file with mode: 0644]
kernel/Hakefile
kernel/arch/armv7-m/boot.S [new file with mode: 0644]
kernel/arch/armv7-m/exceptions.S [new file with mode: 0644]
kernel/arch/armv7-m/exec.c [new file with mode: 0644]
kernel/arch/armv7-m/exn.c [new file with mode: 0644]
kernel/arch/armv7-m/init.c [new file with mode: 0644]
kernel/arch/armv7-m/linker.lds.in [new file with mode: 0644]
kernel/arch/armv7-m/omap.c [new file with mode: 0644]
kernel/arch/armv7-m/paging.c [new file with mode: 0644]
kernel/arch/armv7/kludges.c
kernel/arch/armv7/kputchar.c
kernel/arch/armv7/syscall.c
kernel/arch/omap44xx/startup_arch.c
kernel/include/arch/armv7-m/arch_gdb_stub.h [new file with mode: 0644]
kernel/include/arch/armv7-m/arm.h [new file with mode: 0644]
kernel/include/arch/armv7-m/arm_core_data.h [new file with mode: 0644]
kernel/include/arch/armv7-m/arm_hal.h [new file with mode: 0644]
kernel/include/arch/armv7-m/armv7_syscall.h [new file with mode: 0644]
kernel/include/arch/armv7-m/cp15.h [new file with mode: 0644]
kernel/include/arch/armv7-m/exceptions.h [new file with mode: 0644]
kernel/include/arch/armv7-m/global.h [new file with mode: 0644]
kernel/include/arch/armv7-m/init.h [new file with mode: 0644]
kernel/include/arch/armv7-m/io.h [new file with mode: 0644]
kernel/include/arch/armv7-m/irq.h [new file with mode: 0644]
kernel/include/arch/armv7-m/ixp2800_uart.h [new file with mode: 0644]
kernel/include/arch/armv7-m/kernel_multiboot.h [new file with mode: 0644]
kernel/include/arch/armv7-m/kputchar.h [new file with mode: 0644]
kernel/include/arch/armv7-m/misc.h [new file with mode: 0644]
kernel/include/arch/armv7-m/offsets.h [new file with mode: 0644]
kernel/include/arch/armv7-m/paging_kernel_arch.h [new file with mode: 0644]
kernel/include/arch/armv7-m/phys_mmap.h [new file with mode: 0644]
kernel/include/arch/armv7-m/pl011_uart.h [new file with mode: 0644]
kernel/include/arch/armv7-m/spinlock.h [new file with mode: 0644]
kernel/include/arch/armv7-m/start_aps.h [new file with mode: 0644]
kernel/include/arch/armv7-m/startup_arch.h [new file with mode: 0644]
kernel/include/arch/armv7-m/ti_i2c.h [new file with mode: 0644]
lib/barrelfish/Hakefile
lib/barrelfish/arch/arm/dispatch.c
lib/barrelfish/arch/arm/entry.S
lib/barrelfish/arch/arm/pmap_arch.c
lib/barrelfish/arch/arm/syscall.S
lib/barrelfish/include/arch/arm/arch/registers.h
lib/crt/arch/arm/crt0.S
lib/newlib/newlib/libc/Hakefile
lib/spawndomain/arch/arm/spawn_arch.c
tools/arm_molly/build_data_files.sh
tools/arm_molly/build_multiboot.c
tools/arm_molly/molly_boot.S
usr/drivers/omap44xx/fdif/Hakefile
usr/init/init.c
usr/monitor/Hakefile
usr/monitor/arch/armv7/boot.c
usr/skb/skb_simple/Hakefile

diff --git a/hake/ARMv7_M.hs b/hake/ARMv7_M.hs
new file mode 100644 (file)
index 0000000..7abc275
--- /dev/null
@@ -0,0 +1,201 @@
+--------------------------------------------------------------------------
+-- Copyright (c) 2007-2010, ETH Zurich.
+-- All rights reserved.
+--
+-- This file is distributed under the terms in the attached LICENSE file.
+-- If you do not find this file, copies can be found by writing to:
+-- ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+--
+-- Architectural definitions for Barrelfish on ARMv7-M ISA.
+--
+-- The build target is the cortex-m3 sideprocessor on the pandaboard
+--------------------------------------------------------------------------
+
+module ARMv7_M where
+--module names can not contain "-", so I went for an underscore instead
+import HakeTypes
+import Path
+import qualified Config
+import qualified ArchDefaults
+
+-------------------------------------------------------------------------
+--
+-- Architecture specific definitions for ARM
+--
+-------------------------------------------------------------------------
+
+arch = "armv7-m"
+archFamily = "arm"
+
+compiler = "arm-linux-gnueabi-gcc"
+objcopy  = "arm-linux-gnueabi-objcopy"
+objdump  = "arm-linux-gnueabi-objdump"
+ar       = "arm-linux-gnueabi-ar"
+ranlib   = "arm-linux-gnueabi-ranlib"
+cxxcompiler = "arm-linux-gnueabi-g++"
+
+ourCommonFlags = [ Str "-fno-unwind-tables",
+                   Str "-Wno-packed-bitfield-compat",
+                   Str "-mthumb",
+                   Str "-mcpu=cortex-m3",
+                   Str "-march=armv7-m",
+                   Str "-mapcs",
+                   Str "-mabi=aapcs-linux",
+                   Str "-msingle-pic-base",
+                   Str "-mpic-register=r10",
+                   Str "-DPIC_REGISTER=R10",
+                   Str "-fPIE",
+                   Str "-ffixed-r9",
+                   Str "-DTHREAD_REGISTER=R9",
+                   Str "-D__ARM_CORTEX__",
+                   Str "-D__ARM_ARCH_7M__",
+                   Str "-Wno-unused-but-set-variable",
+                   Str "-Wno-format",
+                   Str ("-D__" ++ Config.armv7_platform ++ "__")
+ ]
+
+cFlags = ArchDefaults.commonCFlags 
+         ++ ArchDefaults.commonFlags
+         ++ ourCommonFlags
+
+cxxFlags = ArchDefaults.commonCxxFlags
+           ++ ArchDefaults.commonFlags
+           ++ ourCommonFlags
+
+cDefines = ArchDefaults.cDefines options
+
+
+
+ourLdFlags = [ Str "-mcpu=cortex-m3",
+               Str "-mthumb",             --necessary to get correct division library
+               Str "-Wl,-section-start,.text=0x400000",
+               Str "-Wl,-section-start,.data=0x600000" ]
+
+
+
+ldFlags = ArchDefaults.ldFlags arch ++ ourLdFlags
+ldCxxFlags = ArchDefaults.ldCxxFlags arch ++ ourLdFlags
+
+stdLibs = ArchDefaults.stdLibs arch ++ [ Str "-lgcc" ]
+
+options = (ArchDefaults.options arch archFamily) { 
+            optFlags = cFlags,
+            optCxxFlags = cxxFlags,
+            optDefines = cDefines,
+            optDependencies = 
+                [ PreDep InstallTree arch "/include/errors/errno.h",
+                  PreDep InstallTree arch "/include/barrelfish_kpi/capbits.h",
+                  PreDep InstallTree arch "/include/asmoffsets.h",
+                  PreDep InstallTree arch "/include/trace_definitions/trace_defs.h"],
+            optLdFlags = ldFlags,
+            optLdCxxFlags = ldCxxFlags,
+            optLibs = stdLibs,
+            optInterconnectDrivers = ["lmp", "ump"],
+            optFlounderBackends = ["lmp", "ump"]
+          }
+
+--
+-- Compilers
+--
+cCompiler = ArchDefaults.cCompiler arch compiler
+cxxCompiler = ArchDefaults.cxxCompiler arch cxxcompiler
+makeDepend = ArchDefaults.makeDepend arch compiler
+makeCxxDepend  = ArchDefaults.makeCxxDepend arch cxxcompiler
+cToAssembler = ArchDefaults.cToAssembler arch compiler
+assembler = ArchDefaults.assembler arch compiler
+archive = ArchDefaults.archive arch
+linker = ArchDefaults.linker arch compiler
+cxxlinker = ArchDefaults.cxxlinker arch cxxcompiler
+
+
+--
+-- The kernel is "different"
+--
+
+kernelCFlags = [ Str s | s <- [ "-fno-builtin",
+                                "-fno-unwind-tables",
+                                "-nostdinc",
+                                "-std=c99",
+                                "-mthumb",
+                                "-mcpu=cortex-m3",
+                                "-march=armv7-m",
+                                "-mapcs",
+                                "-mabi=aapcs-linux",
+                                "-fPIE",
+                                "-U__linux__",
+                                "-Wall",
+                                "-Wshadow",
+                                "-Wstrict-prototypes",
+                                "-Wold-style-definition",
+                                "-Wmissing-prototypes",
+                                "-Wmissing-declarations",
+                                "-Wmissing-field-initializers",
+                                "-Wredundant-decls",
+                                "-Werror",
+                                "-imacros deputy/nodeputy.h",
+                                "-fno-stack-check",
+                                "-ffreestanding",
+                                "-fomit-frame-pointer",
+                                "-mno-long-calls",
+                                "-Wmissing-noreturn",
+                                "-mno-apcs-stack-check",
+                                "-mno-apcs-reentrant",
+                                "-msingle-pic-base",
+                                "-mpic-register=r10",
+                                "-DPIC_REGISTER=R10",
+                                "-ffixed-r9",
+                                "-DTHREAD_REGISTER=R9",
+                                "-D__ARM_CORTEX__",
+                                "-D__ARM_ARCH_7M__",
+                                "-Wno-unused-but-set-variable",
+                                "-Wno-format",
+                                "-D__" ++ Config.armv7_platform ++ "__" ]]
+
+kernelLdFlags = [ Str "-Wl,-N",
+                  Str "-fno-builtin",
+                  Str "-nostdlib",
+                  Str "-pie",
+                  Str "-Wl,--fatal-warnings",
+                  Str "-mthumb",
+                  Str "-mcpu=cortex-m3"
+                ]---mthumb is necessary to get the correct lgcc (for division)
+
+
+--
+-- Link the kernel (CPU Driver)
+--
+linkKernel :: Options -> [String] -> [String] -> String -> HRule
+linkKernel opts objs libs name =
+    let linkscript = "/kernel/" ++ name ++ ".lds"
+        kernelmap  = "/kernel/" ++ name ++ ".map"
+        kasmdump   = "/kernel/" ++ name ++ ".asm"
+        kbinary    = "/sbin/" ++ name
+        kbootable  = kbinary ++ ".bin"
+    in
+        Rules [ Rule ([ Str compiler, Str Config.cOptFlags,
+                      NStr "-T", In BuildTree arch linkscript,
+                      Str "-o", Out arch kbinary,
+                      NStr "-Wl,-Map,", Out arch kernelmap
+                    ]
+                    ++ (optLdFlags opts)
+                    ++
+                    [ In BuildTree arch o | o <- objs ]
+                    ++
+                    [ In BuildTree arch l | l <- libs ]
+                    ++
+                    [ Str "-lgcc" ]
+                   ),
+              -- Generate kernel assembly dump
+              Rule [ Str objdump, 
+                     Str "-d", 
+                     Str "-M reg-names-raw",
+                     In BuildTree arch kbinary, 
+                     Str ">", Out arch kasmdump ],
+              Rule [ Str "cpp",
+                     NStr "-I", NoDep SrcTree "src" "/kernel/include/arch/armv7-m",
+                     Str "-D__ASSEMBLER__",
+                     Str ("-D__" ++ Config.armv7_platform ++ "__"),
+                     Str "-P", In SrcTree "src" "/kernel/arch/armv7-m/linker.lds.in",
+                     Out arch linkscript
+                   ]
+            ]
index 605a7fd..72a1655 100644 (file)
@@ -68,7 +68,7 @@ defaultArgs = Args {
       architectures = allArchitectures
 }
 
-allArchitectures = [ "x86_64", "x86_32", "armv5", "arm11mp", "scc", "xscale", "armv7" ]
+allArchitectures = [ "x86_64", "x86_32", "armv5", "arm11mp", "scc", "xscale", "armv7", "armv7-m" ]
 allArchitectureFamilies = [ "x86_64", "x86_32", "arm", "scc" ]
 -- architectures that currently support THC
 thcArchitectures = ["x86_64", "x86_32", "scc"]
index 53b4cc6..ef9d0f1 100644 (file)
@@ -21,6 +21,7 @@ import qualified ARMv5
 import qualified ARM11MP
 import qualified XScale
 import qualified ARMv7
+import qualified ARMv7_M
 import HakeTypes
 import qualified Args
 import qualified Config
@@ -83,6 +84,7 @@ options "armv5" = ARMv5.options
 options "arm11mp" = ARM11MP.options
 options "xscale" = XScale.options
 options "armv7" = ARMv7.options
+options "armv7-m" = ARMv7_M.options
 
 kernelCFlags "x86_64" = X86_64.kernelCFlags
 kernelCFlags "x86_32" = X86_32.kernelCFlags
@@ -91,6 +93,7 @@ kernelCFlags "armv5" = ARMv5.kernelCFlags
 kernelCFlags "arm11mp" = ARM11MP.kernelCFlags
 kernelCFlags "xscale" = XScale.kernelCFlags
 kernelCFlags "armv7" = ARMv7.kernelCFlags
+kernelCFlags "armv7-m" = ARMv7_M.kernelCFlags
 
 kernelLdFlags "x86_64" = X86_64.kernelLdFlags
 kernelLdFlags "x86_32" = X86_32.kernelLdFlags
@@ -99,6 +102,7 @@ kernelLdFlags "armv5" = ARMv5.kernelLdFlags
 kernelLdFlags "arm11mp" = ARM11MP.kernelLdFlags
 kernelLdFlags "xscale" = XScale.kernelLdFlags
 kernelLdFlags "armv7" = ARMv7.kernelLdFlags
+kernelLdFlags "armv7-m" = ARMv7_M.kernelLdFlags
 
 archFamily :: String -> String
 archFamily arch = optArchFamily (options arch)
@@ -172,6 +176,7 @@ cCompiler opts phase src obj
     | optArch opts == "arm11mp" = ARM11MP.cCompiler opts phase src obj
     | optArch opts == "xscale" = XScale.cCompiler opts phase src obj
     | optArch opts == "armv7" = ARMv7.cCompiler opts phase src obj
+    | optArch opts == "armv7-m" = ARMv7_M.cCompiler opts phase src obj
     | otherwise = [ ErrorMsg ("no C compiler for " ++ (optArch opts)) ]
 
 cPreprocessor :: Options -> String -> String -> String -> [ RuleToken ]
@@ -206,6 +211,8 @@ makeDepend opts phase src obj depfile
         XScale.makeDepend opts phase src obj depfile
     | optArch opts == "armv7" = 
         ARMv7.makeDepend opts phase src obj depfile
+    | optArch opts == "armv7-m" = 
+        ARMv7_M.makeDepend opts phase src obj depfile
     | otherwise = [ ErrorMsg ("no dependency generator for " ++ (optArch opts)) ]
 
 makeCxxDepend :: Options -> String -> String -> String -> String -> [ RuleToken ]
@@ -223,6 +230,7 @@ cToAssembler opts phase src afile objdepfile
     | optArch opts == "arm11mp" = ARM11MP.cToAssembler opts phase src afile objdepfile
     | optArch opts == "xscale" = XScale.cToAssembler opts phase src afile objdepfile
     | optArch opts == "armv7" = ARMv7.cToAssembler opts phase src afile objdepfile
+    | optArch opts == "armv7-m" = ARMv7_M.cToAssembler opts phase src afile objdepfile
     | otherwise = [ ErrorMsg ("no C compiler for " ++ (optArch opts)) ]
 
 --
@@ -237,6 +245,7 @@ assembler opts src obj
     | optArch opts == "arm11mp" = ARM11MP.assembler opts src obj
     | optArch opts == "xscale" = XScale.assembler opts src obj
     | optArch opts == "armv7" = ARMv7.assembler opts src obj
+    | optArch opts == "armv7-m" = ARMv7_M.assembler opts src obj
     | otherwise = [ ErrorMsg ("no assembler for " ++ (optArch opts)) ]
 
 archive :: Options -> [String] -> [String] -> String -> String -> [ RuleToken ]
@@ -248,6 +257,7 @@ archive opts objs libs name libname
     | optArch opts == "arm11mp" = ARM11MP.archive opts objs libs name libname
     | optArch opts == "xscale" = XScale.archive opts objs libs name libname
     | optArch opts == "armv7" = ARMv7.archive opts objs libs name libname
+    | optArch opts == "armv7-m" = ARMv7_M.archive opts objs libs name libname
     | otherwise = [ ErrorMsg ("Can't build a library for " ++ (optArch opts)) ]
 
 linker :: Options -> [String] -> [String] -> String -> [RuleToken]
@@ -259,6 +269,7 @@ linker opts objs libs bin
     | optArch opts == "arm11mp" = ARM11MP.linker opts objs libs bin
     | optArch opts == "xscale" = XScale.linker opts objs libs bin
     | optArch opts == "armv7" = ARMv7.linker opts objs libs bin
+    | optArch opts == "armv7-m" = ARMv7_M.linker opts objs libs bin
     | otherwise = [ ErrorMsg ("Can't link executables for " ++ (optArch opts)) ]
 
 cxxlinker :: Options -> [String] -> [String] -> String -> [RuleToken]
@@ -739,6 +750,7 @@ linkKernel opts name objs libs
     | optArch opts == "arm11mp" = ARM11MP.linkKernel opts objs [libraryPath l | l <- libs ] ("/sbin" ./. name)
     | optArch opts == "xscale" = XScale.linkKernel opts objs [libraryPath l | l <- libs ] ("/sbin" ./. name)
     | optArch opts == "armv7" = ARMv7.linkKernel opts objs [libraryPath l | l <- libs ] name
+    | optArch opts == "armv7-m" = ARMv7_M.linkKernel opts objs [libraryPath l | l <- libs ] name
     | otherwise = Rule [ Str ("Error: Can't link kernel for '" ++ (optArch opts) ++ "'") ]
 
 --
diff --git a/hake/menu.lst.armv7-m b/hake/menu.lst.armv7-m
new file mode 100644 (file)
index 0000000..6bbe2b0
--- /dev/null
@@ -0,0 +1,32 @@
+timeout 0
+
+#
+# This script is used to describe the commands to start at
+# boot-time and the arguments they should receive.
+#
+
+title  Barrelfish
+#root  (nd)
+kernel /armv7-m/sbin/cpu_omap44xx loglevel=4
+module /armv7-m/sbin/cpu_omap44xx
+module /armv7-m/sbin/init
+
+# Domains spawned by init
+module /armv7-m/sbin/mem_serv
+module /armv7-m/sbin/monitor
+
+# Special boot time domains spawned by monitor
+#module /armv7-m/sbin/chips boot
+module /armv7-m/sbin/ramfsd boot
+module /armv7-m/sbin/skb boot
+module /armv7-m/sbin/spawnd boot bootarm=0
+module /armv7-m/sbin/startd boot
+
+# General user domains
+#module        /armv7-m/sbin/serial
+#module        /armv7-m/sbin/fish
+
+module /armv7-m/sbin/memtest
+
+# For pandaboard, use following values.
+mmap map 0x80000000 0x40000000 1
index 01880af..b6dea55 100644 (file)
@@ -560,3 +560,55 @@ arm_gem5_detailed: arm_gem5_image $(SRCDIR)/tools/arm_gem5/gem5script.py
        gem5.fast $(ARM_FLAGS) --cpu-type=arm_detailed
 
 .PHONY: arm_gem5_mc arm_gem5 arm_gem5_detailed arm_gem5_detailed
+
+#######################################################################
+#
+# Pandaboard build for the armv7-M slave image (to be used in conjunction with a master image)
+# (basically a normap pandaboard_image, but compiled for the cortex-m3)
+#
+#######################################################################
+
+HETEROPANDA_MODULES=\
+       armv7-m/sbin/cpu_omap44xx \
+       armv7-m/sbin/init \
+       armv7-m/sbin/mem_serv \
+       armv7-m/sbin/monitor \
+       armv7-m/sbin/ramfsd \
+       armv7-m/sbin/spawnd \
+       armv7-m/sbin/startd \
+       armv7-m/sbin/skb \
+       armv7-m/sbin/memtest
+
+menu.lst.armv7-m: $(SRCDIR)/hake/menu.lst.armv7-m
+       cp $< $@
+
+heteropanda_slave: $(HETEROPANDA_MODULES) \
+               tools/bin/arm_molly \
+               menu.lst.armv7-m
+       # Translate each of the binary files we need
+       $(SRCDIR)/tools/arm_molly/build_data_files.sh menu.lst.armv7-m molly_panda
+       # Generate appropriate linker script
+       cpp -P -DBASE_ADDR=0x0 $(SRCDIR)/tools/arm_molly/molly_ld_script.in \
+               molly_panda/molly_ld_script
+       # Build a C file to link into a single image for the 2nd-stage
+       # bootloader
+       tools/bin/arm_molly menu.lst.armv7-m panda_mbi.c
+       # Compile the complete boot image into a single executable
+       $(ARM_GCC) -std=c99 -g -fPIC -pie -Wl,-N -fno-builtin \
+               -nostdlib -march=armv7-m -mcpu=cortex-m3 -mthumb -mapcs -fno-unwind-tables \
+               -Tmolly_panda/molly_ld_script \
+               -I$(SRCDIR)/include \
+               -I$(SRCDIR)/include/arch/arm \
+               -I./armv7-m/include \
+               -I$(SRCDIR)/include/oldc \
+               -I$(SRCDIR)/include/c \
+               -imacros $(SRCDIR)/include/deputy/nodeputy.h \
+               $(SRCDIR)/tools/arm_molly/molly_boot.S \
+               $(SRCDIR)/tools/arm_molly/molly_init.c \
+               $(SRCDIR)/tools/arm_molly/lib.c \
+               ./panda_mbi.c \
+               $(SRCDIR)/lib/elf/elf32.c \
+               ./molly_panda/* \
+               -o heteropanda_slave
+       @echo "OK - heteropanda slave image is built."
+       @echo "you can now use this image to link into a regular pandaboard image"
diff --git a/if/arch/armv7-m.if b/if/arch/armv7-m.if
new file mode 100644 (file)
index 0000000..280e09e
--- /dev/null
@@ -0,0 +1,11 @@
+/*
+ * Copyright (c) 2007, 2008, 2009, 2010, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+alias errval uint32;
+alias cycles uint32;
diff --git a/if/platform/armv7-m.if b/if/platform/armv7-m.if
new file mode 100644 (file)
index 0000000..459214c
--- /dev/null
@@ -0,0 +1,12 @@
+/*
+ * Copyright (c) 2007, 2008, 2009, 2010, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+alias genpaddr uint64;
+alias genvaddr uint64;
+alias rsrcid   uint32;
index 5e86f23..1341904 100644 (file)
@@ -17,7 +17,7 @@
 
 #ifndef __ASSEMBLER__
 
-#ifdef __ARM_ARCH_7A__
+#if defined (__ARM_ARCH_7A__) || defined(__ARM_ARCH_7M__)
 
 static inline void dmb(void)
 {
index b562d3a..f54daed 100644 (file)
@@ -21,6 +21,8 @@
 #include <target/arm/barrelfish_kpi/paging_arm_v5.h>
 #elif defined(__ARM_ARCH_7A__)
 #include <target/arm/barrelfish_kpi/paging_arm_v7.h>
+#elif defined(__ARM_ARCH_7M__)
+#include <target/arm/barrelfish_kpi/paging_arm_v7m.h>
 #else
 #error "Missing ARM Paging header file"
 #endif
diff --git a/include/arch/arm/barrelfish_kpi/registers_pushed_arm_v7m.h b/include/arch/arm/barrelfish_kpi/registers_pushed_arm_v7m.h
new file mode 100644 (file)
index 0000000..3471e78
--- /dev/null
@@ -0,0 +1,57 @@
+/**
+ * \file
+ * \brief architecture-specific registers code
+ * Armv7-M automatically pushes some registers instead of banking them.
+ */
+
+/*
+ * Copyright (c) 2010, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef ARCH_ARM_BARRELFISH_KPI_REGISTERS_PUSHED_ARMV7_M_H
+#define ARCH_ARM_BARRELFISH_KPI_REGISTERS_PUSHED_ARMV7_M_H
+
+#ifndef __ASSEMBLER__
+#include <barrelfish_kpi/types.h> // for lvaddr_t
+#endif
+
+/*
+ * Offsets of automatically pushed registers on context switch.
+ * These registers get pushed on exception entry and are restored from the stack on 
+ * exception return.
+ */
+#define R0_REG_PUSHED      0
+#define R1_REG_PUSHED      1
+#define R2_REG_PUSHED      2
+#define R3_REG_PUSHED      3
+#define R12_REG_PUSHED     4
+#define LR_REG_PUSHED      5
+#define PC_REG_PUSHED      6
+#define XPSR_REG_PUSHED    7     //this overlaps with the cpsr of armv7-a, but only partially
+
+#define NUM_REGS_PUSHED    8
+
+#ifndef __ASSEMBLER__
+
+//analogous to registers_arm in registers_arm.h
+union registers_arm_pushed {
+    struct registers_arm_pushed_named {
+        uint32_t r0, r1, r2, r3;
+        uint32_t r12;
+        uint32_t link;
+        uint32_t pc;
+        uint32_t xpsr;
+    } named;
+    uint32_t regs[sizeof(struct registers_arm_pushed_named) / sizeof(uint32_t)];
+};
+
+STATIC_ASSERT_SIZEOF(union registers_arm_pushed, NUM_REGS_PUSHED * 4);
+
+#endif // __ASSEMBLER__
+
+#endif // ARCH_ARM_BARRELFISH_KPI_REGISTERS_PUSHED_ARMV7_M_H
index b49b321..006384e 100644 (file)
 /*
  * 2.4 Dual Cortex-M3 Subsystem Memory Space Mapping
  */
-
+#define OMAP44XX_MAP_M3_L2MMU                           0x55082000
+#define OMAP44XX_MAP_M3_L2MMU_SIZE                      0x1000
 /*
  * 2.5 DSP Subsystem Memory Space Mapping
  */
diff --git a/include/target/arm/barrelfish_kpi/paging_arm_v7m.h b/include/target/arm/barrelfish_kpi/paging_arm_v7m.h
new file mode 100644 (file)
index 0000000..d465c6e
--- /dev/null
@@ -0,0 +1,164 @@
+/**
+ * \file
+ * \brief Paging definitions for arm_v7m.
+ * this reflects the page table entry format on the cortex-M3 on the pandaboard, which
+ * may not be the same as that of other armv7-m implementations (as the pure armv7-m
+ * specification does not define any kind of address translation mechanism)
+ * Alignment and size of tables is the same as for other armv7 architectures,
+ * but it lacks all permission bits, providing endianness control instead
+ */
+
+/*
+ * Copyright (c) 2012, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+//TODO: heteropanda: maybe use the ignored bits in each entry to store metadata
+//  at least put a bit for cacheability in there
+#ifndef TARGET_ARM_BARRELFISH_KPI_PAGING_ARM_V7M_H
+#define TARGET_ARM_BARRELFISH_KPI_PAGING_ARM_V7M_H
+
+
+/* Default page size is 4K */
+#define BASE_PAGE_BITS          12
+#define BASE_PAGE_SIZE          (1u << BASE_PAGE_BITS)
+#define BASE_PAGE_MASK          (BASE_PAGE_SIZE - 1)
+#define BASE_PAGE_OFFSET(a)     ((a) & BASE_PAGE_MASK)
+
+#define LARGE_PAGE_MASK                 0x0000FFFF  //64KB large page
+
+#define ARM_L1_OFFSET(addr)       (((uintptr_t)addr) >> 20)
+#define ARM_L2_OFFSET(addr)       ((((uintptr_t)addr) >> 12) & 0xff)
+#define ARM_PAGE_OFFSET(addr)     ((uintptr_t)addr & 0xfff)
+
+#define ARM_L1_ALIGN                    16384u
+
+#define ARM_L1_MAX_ENTRIES              4096u
+#define ARM_L1_BYTES_PER_ENTRY          4u
+#define ARM_L1_SECTION_BYTES            (1024u * 1024u)
+#define ARM_L1_SECTION_MASK             0x000FFFFF
+
+#define ARM_L1_SUPERSECTION_MASK        0x00FFFFFF
+
+#define ARM_L2_ALIGN                    1024u
+#define ARM_L2_MAX_ENTRIES              256u
+#define ARM_L2_BYTES_PER_ENTRY          4u
+#define ARM_L2_TABLE_BYTES              ARM_L2_ALIGN
+
+//#define ARM_L2_SMALL_CACHEABLE          0x008
+//#define ARM_L2_SMALL_BUFFERABLE         0x004
+//#define ARM_L2_SMALL_USR_RO             0x20
+//#define ARM_L2_SMALL_USR_RW             0x30
+//#define ARM_L2_SMALL_USR_NONE            0x10
+
+/* Page type independent page options */
+#define KPI_PAGING_FLAGS_READ    0x01
+#define KPI_PAGING_FLAGS_WRITE   0x02
+#define KPI_PAGING_FLAGS_EXECUTE 0x04
+#define KPI_PAGING_FLAGS_NOCACHE 0x08
+#define KPI_PAGING_FLAGS_MASK    0x0f
+
+union arm_l1_entry {
+    uint32_t raw;
+
+    /// Invalid L1 entry
+    struct {
+        uint32_t        type            :2;     // == 0
+    } invalid;
+
+    /// L1 entry for 256 4K L2 entries
+    struct {
+        uint32_t        type            :2;     // == 1
+        uint32_t        ignored3        :8;     //XXX: we currently use this byte to keep version numbers for the table
+        uint32_t        base_address    :22;
+    } page_table;
+
+    /// L1 entry for 1MB mapped section
+    struct {
+        uint32_t        type            :2;     // == 2
+        uint32_t        ignored3        :8;
+        uint32_t        element_size    :2;        // for endianness conversion (should be irrelevant) - use "3" if in doubt
+        uint32_t        ignored2        :3;        // type extension
+        uint32_t        endianness      :1;        // locked on "0", little endian
+        uint32_t        ignored1        :1;
+        uint32_t        mixed_region    :1;
+        uint32_t        mbz0            :1;        //must be zero
+        uint32_t        ignored0        :1;
+        uint32_t        base_address    :12;
+    } section;
+
+    /// L1 entry for 16MB mapped super section
+    struct {
+        uint32_t        type            :2;     // == 3
+        uint32_t        ignored3        :8;
+        uint32_t        element_size    :2;        // for endianness conversion (should be irrelevant)
+        uint32_t        ignored2        :3;        // type extension
+        uint32_t        endianness      :1;        // locked on "0", little endian
+        uint32_t        ignored1        :1;
+        uint32_t        mixed_region    :1;
+        uint32_t        mbo0            :1;        //must be one
+        uint32_t        ignored0        :5;
+        uint32_t        base_address    :8;
+    } super_section;
+
+};
+
+#define L1_TYPE_INVALID_ENTRY           0
+#define L1_TYPE_PAGE_TABLE_ENTRY        1
+#define L1_TYPE_SECTION_ENTRY           2
+#define L1_TYPE_SUPER_SECTION_ENTRY     3
+#define L1_TYPE(x)              ((x) & 3)
+
+union arm_l2_entry {
+    uint32_t raw;
+
+    /// Invalid L2 entry
+    struct {
+        uint32_t        type            :2;     // == 0
+    } invalid;
+
+    /// Descriptior for a 64K page
+    struct {
+        uint32_t        type            :2;     // == 1
+        uint32_t        ignored3        :2;
+        uint32_t        element_size    :2;        // for endianness conversion (should be irrelevant)
+        uint32_t        ignored2        :3;        // type extension
+        uint32_t        endianness      :1;        // locked on "0", little endian
+        uint32_t        ignored1        :1;
+        uint32_t        mixed_region    :1;
+        uint32_t        ignored0        :4;
+        uint32_t        base_address    :16;
+    } large_page;
+
+    /// Descriptor for a 4K page
+    struct {
+        uint32_t        type            :2;        // == 2 or 3
+        uint32_t        ignored3        :2;
+        uint32_t        element_size    :2;        // for endianness conversion (should be irrelevant)
+        uint32_t        ignored2        :3;        // type extension
+        uint32_t        endianness      :1;        // locked on "0", little endian
+        uint32_t        ignored1        :1;
+        uint32_t        mixed_region    :1;
+        uint32_t        base_address    :20;
+    } small_page;
+
+};
+
+
+#define L2_TYPE_INVALID_PAGE    0
+#define L2_TYPE_LARGE_PAGE      1
+#define L2_TYPE_SMALL_PAGE      2
+#define L2_TYPE_SMALL_PAGE_XN   3
+#define L2_TYPE(x)              ((x) & 3)
+
+#define BYTES_PER_SECTION       0x100000
+#define BYTES_PER_LARGE_PAGE    0x10000
+#define BYTES_PER_PAGE          0x1000
+#define BYTES_PER_SMALL_PAGE    0x400
+
+
+
+#endif // TARGET_ARM_BARRELFISH_KPI_PAGING_ARM_V7M_H
index e1e375f..b00a151 100644 (file)
@@ -370,6 +370,42 @@ let
                          "omap/omap44xx_spinlock",
                          "omap/omap44xx_gpio"],
      addLibraries = [ "elf", "cpio" ]
-     }                            
-  
+     },                            
+  --
+  -- Broadcom OMAP44xx-series dual-core Cortex-M3 Coprocessor,
+  -- used to run a heterogenous system on the pandaboard 
+  --
+
+  cpuDriver {
+     target = "omap44xx",
+     architectures = [ "armv7-m" ],
+     assemblyFiles = [ "arch/armv7-m/boot.S",
+                       "arch/armv7-m/exceptions.S" ],
+     cFiles = [ 
+                "arch/armv7-m/exec.c", 
+                "arch/arm/misc.c", 
+                "arch/armv7-m/exn.c", 
+                "arch/arm/phys_mmap.c",
+                "arch/armv7/kludges.c", 
+                "arch/armv7/multiboot.c", 
+                "arch/armv7/syscall.c", 
+                "arch/armv7-m/init.c", 
+                "arch/armv7-m/omap.c", 
+                "arch/armv7-m/paging.c", 
+                "arch/omap44xx/startup_arch.c", 
+                "arch/omap44xx/omap_uart.c", 
+--                "arch/omap44xx/start_aps.c", 
+                "arch/armv7/kputchar.c", 
+                "arch/omap44xx/spinlock.c"
+                ],
+     mackerelDevices = [ "arm", 
+                         "omap/omap44xx_cortex_m3_nvic", 
+                         "omap/omap_uart", 
+                         "omap/omap44xx_id", 
+                         "omap/omap44xx_emif",
+                         "omap/omap44xx_gpio",
+                         "omap/omap44xx_mmu",
+                         "omap/omap44xx_spinlock"],
+     addLibraries = [ "elf", "cpio" ]
+     }
   ]
diff --git a/kernel/arch/armv7-m/boot.S b/kernel/arch/armv7-m/boot.S
new file mode 100644 (file)
index 0000000..02da339
--- /dev/null
@@ -0,0 +1,78 @@
+/**
+ * \file
+ * \brief Bootstrap the kernel for ARMv7-M processors, specifically the 
+ *      cortex-m3 on the OMAP44XX  This code is
+ *      entered from the bootloader (typically arm_molly, RedBoot,
+ *      etc.).
+ */
+/*
+ * Copyright (c) 2009 ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef __ASSEMBLER__
+#define __ASSEMBLER__   1
+#endif
+
+#include <barrelfish_kpi/flags_arch.h> // ARM_MODE_MASK
+#include <offsets.h> // BOOT_STACK_PHYS
+
+        .text
+//        .arm
+
+        .globl start, halt, got_base
+        .extern kernel_stack, glbl_core_data
+
+        // Used to track phys memory allocator limit globally.
+        alloc_top .req r11
+
+start:
+/*
+    //idea is that the cortex-A9 has already started the MMU, and included all pagemappings
+    //that we need to setup the rest  
+
+        // On entry:
+        //
+        // MMU already enabled, TLB filled with necessary mappings to bootstrap
+        // Cache disabled
+        // CPU is in priviledged thread mode.
+        //
+
+*/
+        //init stack
+        ldr     r6, =kernel_stack
+        mov     sp, r6//can not directly ldr to sp, so use r6 inbetween
+        movw    r6, #KERNEL_STACK_SIZE//can not directly add big immediates, so move it into register first
+        add     sp, r6  //sp = &kernel_stack+KERNEL_STACK_SIZE
+
+        
+        ldr     r0, =got_base
+        mov     PIC_REGISTER, r0
+        // equivalent to: ldr     PIC_REGISTER, =got_base
+
+        //prepare argument
+        mov     r0, r2
+        //do a long call to compensate for shorter range
+        ldr     r6, =arch_init//shorthand, loads the value from memory
+        bx      r6
+        //equivalent to: b       arch_init        
+        b       halt
+
+
+/**
+ * extern "C" void halt(void) __attribute__((noreturn))
+ */
+halt:
+        b       .
+
+/**********************************************************************/
+.ltorg
+
+got_base:
+        .word                                   // Initialized by linker
+
+        .end
diff --git a/kernel/arch/armv7-m/exceptions.S b/kernel/arch/armv7-m/exceptions.S
new file mode 100644 (file)
index 0000000..5ffd01b
--- /dev/null
@@ -0,0 +1,334 @@
+/*
+ * Copyright (c) 2007, 2008, 2009, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef __ASSEMBLER__
+#define __ASSEMBLER__
+#endif // __ASSEMBLER__
+
+#include <asmoffsets.h> // OFFSETOF etc.
+#include <barrelfish_kpi/registers_arch.h> // CPSR_REG etc.
+#include <barrelfish_kpi/registers_pushed_arm_v7m.h> // PC_REG_PUSHED etc.
+#include <barrelfish_kpi/flags_arch.h> // CPSR_IF_MASK etc.
+#include <exceptions.h>
+
+//#define EXCEPTION_MODE_STACK_BYTES       256
+#define HANDLER_MODE_STACK_BYTES         8192
+
+#define VECTORTABLE_BYTES       320 //16 system interrupts + 64 external interrupts, 4 bytes each
+
+
+        .syntax unified
+        .text
+
+        .globl exceptions_init
+        .globl exceptions_early_init
+        .globl vectortable
+        .globl irq_save_pushed_area_top
+        //
+        .balign 4096 //so we can treat this as just a page when setting up the MMU  
+        //XXX: "balign" seems to be GNU-specific. 
+        //other arm assemblers use the corresponding power of two as their argument instead
+        
+        
+vectortable:    //where the interrupt handlers will be looked up
+        .space VECTORTABLE_BYTES, 0
+        //XXX: maybe we rather want the table in .data instead of .text, but then 
+        //we will have to make sure both the page with the table AND the page with the 
+        //handlers will be accessible (mapped in cache MMU) 
+
+        //
+        // Handler mode stack
+        //
+        // used whenever we transfer to handler mode (priviledged) from an unpriviledged mode,
+        // in which case we can assume that it is safe to overwrite all of it
+        // DO NOT USE THIS WHEN CAME FROM HANDLER MODE IN THE FIRST PLACE
+        //  all exception handlers that are allowed to interrupt others need to check first
+        //  if they interrupted another handler, in which case they are not allowed to set their
+        //  own stack!
+handler_stack:
+        .space HANDLER_MODE_STACK_BYTES, 0
+handler_stack_top:
+
+irq_save_area:
+        .space (NUM_REGS * 4), 0
+        
+irq_save_pushed_area:       //used when we switch to a process with uninitialized stack
+        .space (NUM_REGS_PUSHED * 4), 0
+irq_save_pushed_area_top:
+
+got_for_handler_mode:
+        .word   0
+
+
+
+
+        // void exceptions_early_init(void)
+        //
+        // Assumes running in priviledged thread Mode.
+        // sets up the special handler used to cleanly transition into handler mode
+        // DOES NOT RETURN, instead triggers the special handler
+        //
+exceptions_early_init:
+        //use SVcall, because this is always enabled and can be triggered by a single instruction
+        //XXX: HACK using the fact that whatever vector table we are currently using, 
+        //it will be mapped to address 0
+        mov            r2, #0                              // base of vectortable, where the handler will be stored
+        mov     r0, # ARM_7M_EVECTOR_SVCALL
+        adr     r1, initialization_handler
+               add         r2, r2, r0 , LSL #2         // vectortable + 4*vector_index
+               orr     r1, #1                      //lsb of handler must be 1 to indicate thumb mode
+               str             r1, [r2]                    //write handler address into table
+        //change thread mode stack pointer (the only banked register)
+        mrs     r1, MSP
+        msr     PSP, r1     //copy main stack pointer to thread stack pointer
+        mov     r1, #2
+        msr     control, r1 //set threadmode stackpointer to PSP
+        //trigger the special exception -> will never return
+        svc     #0
+        
+        
+        //special handler that may only be triggered once -> used so we are in handler mode
+        //gives control over to initialization code in armv7-m/init.c        
+initialization_handler:
+        mov     r1, #1
+        msr     control, r1 //set threadmode unpriviledged
+        ldr r4, =text_init_continued
+        bx  r4
+        
+        
+        //
+        // void exceptions_init(void)
+        //
+        // Assumes running in handler Mode.
+        //
+exceptions_init:
+               push    {lr}
+               push    {r0-r4}
+        ldr     r0, null_handler
+        ldr     r1, =vectortable
+$exceptions_install_handlers:
+        mov     r0, # ARM_7M_EVECTOR_MEM
+        ldr     r1, =generic_irq_handler                //needs work
+        bl      exceptions_install_handler
+        mov     r0, # ARM_7M_EVECTOR_BUS
+        ldr     r1, =generic_irq_handler               //needs work
+        bl      exceptions_install_handler
+        mov     r0, # ARM_7M_EVECTOR_SYSTICK
+        ldr     r1, =generic_irq_handler
+        bl      exceptions_install_handler
+        mov     r0, # ARM_7M_EVECTOR_USAGE
+        ldr     r1, =generic_irq_handler              
+        bl      exceptions_install_handler
+        mov     r0, # ARM_7M_EVECTOR_SVCALL
+        ldr     r1, =swi_handler
+        bl      exceptions_install_handler
+        mov     r0, # ARM_7M_EVECTOR_HARDFAULT
+        ldr     r1, =generic_irq_handler
+        bl      exceptions_install_handler
+        mov     r0, # ARM_7M_EVECTOR_NMI
+        ldr     r1, =generic_irq_handler
+        bl      exceptions_install_handler
+$exceptions_save_got:
+        ldr     r0, = got_for_handler_mode
+        mov     r1, PIC_REGISTER
+        str     r1, [r0]
+$exceptions_init_done:
+        //ldmfd   sp!, {pc}
+        pop            {r0-r4}
+        pop            {pc}
+
+
+               //
+               //void exceptions_install_handler(int vector_index, void (*handler)(void)
+               // writes entry into vectortable (and nothing else)
+               //
+exceptions_install_handler:
+               ldr             r2, =vectortable                        // base of vectortable, where the handler will be stored
+               add         r2, r2, r0 , LSL #2         // vectortable + 4*vector_index
+               orr     r1, #1                      //lsb of handler must be 1 to indicate thumb mode
+               str             r1, [r2]                    //write handler address into table
+               bx              lr
+        //
+        // void null_handler(void)
+        //
+null_handler:
+        b       .
+
+
+        // Macro definition to get pointer to arch specific dispatcher
+        //
+        // Logical equivalent of C function with same name
+        //
+.macro get_dispatcher_shared_arm out
+        ldr \out, = dcb_current // out = address of pointer to dcb_current
+        ldr \out, [\out]        // out = dcb_current
+        ldr \out, [\out, #OFFSETOF_DCB_DISP] //now ptr to dispatcher_shared_arm
+.endm
+
+        //
+        // Macro to determine if dispatcher is disabled.
+        //
+        // pc and disp arguments are unmodified.
+        // out contains result
+        //
+.macro disp_is_disabled disp, pc, out
+        // disp->disabled || (disp->crit_pc_lo <= pc && pc < disp->crit_pc_hi)
+        ldrb    \out, [\disp, #OFFSETOF_DISP_DISABLED]
+        cmp     \out, #1
+        IT      hs
+        bhs     0f                      // disp->disabled >= 0      | disabled
+                                        // disp->disabled = false
+        ldr     \out, [\disp, #OFFSETOF_DISP_CRIT_PC_LOW]
+        cmp     \out, \pc
+        ITt     hi
+        movhi   \out, #0
+        bhi     0f                      // disp->crit_pc_low > pc   | enabled
+        ldr     \out, [\disp, #OFFSETOF_DISP_CRIT_PC_HIGH]
+        
+        cmp     \pc, \out
+        ITe     hs
+        movhs   \out, #0                // pc >= disp->crit_pc_high | enabled
+        movlo   \out, #1                // pc <  disp->crit_pc_high | disable
+0:
+.endm
+
+
+        //
+        // Macro to initialize handler mode stack
+        //
+.macro init_handler_stack
+        ldr sp, =handler_stack_top
+.endm
+
+        //
+        // Macro to initialize handler pic register
+        //
+.macro init_handler_pic_register
+        ldr PIC_REGISTER, = got_for_handler_mode
+        ldr PIC_REGISTER, [PIC_REGISTER]
+.endm
+
+
+        //
+        // Macro to save all registers (as they were at the moment of interrupt)
+        //  into context
+        // pops from threadmode stack (PSP -> PSP - 8*4)
+        // only temp_regs will be clobbered
+        //
+.macro save_context context, temp_reg1, temp_reg2
+        .if     CPSR_REG <> 0
+        .err    "Invariant failure: CPSR offset != 0"
+        .endif
+        .if     PC_REG <> 16
+        .err    "Invariant failure: PC offset != 16"
+        .endif
+        mrs     \temp_reg1, PSP     //thread mode stack
+        ldr     \temp_reg2, [\temp_reg1, #(R0_REG_PUSHED *4)]            //read pushed r0
+        str     \temp_reg2, [\context, #(R0_REG*4)]               //copy to context.r0    
+        ldr     \temp_reg2, [\temp_reg1, #(R1_REG_PUSHED *4)]            //read pushed r1
+        str     \temp_reg2, [\context, #(R1_REG *4)]             //copy to context.r1    
+        ldr     \temp_reg2, [\temp_reg1, #(R2_REG_PUSHED *4)]            //read pushed r2
+        str     \temp_reg2, [\context, #(R2_REG *4)]             //copy to context.r2
+        ldr     \temp_reg2, [\temp_reg1, #(R3_REG_PUSHED *4)]            //read pushed r3
+        str     \temp_reg2, [\context, #(R3_REG *4)]             //copy to context.r3
+        ldr     \temp_reg2, [\temp_reg1, #(R12_REG_PUSHED *4)]           //read pushed r12
+        str     \temp_reg2, [\context, #(R12_REG * 4)]           //copy to context.r12
+        ldr     \temp_reg2, [\temp_reg1, #(LR_REG_PUSHED *4)]            //read pushed lr
+        str     \temp_reg2, [\context, #(LR_REG * 4)]            //copy to context.lr
+        ldr     \temp_reg2, [\temp_reg1, #(PC_REG_PUSHED *4)]            //read pushed pc
+        str     \temp_reg2, [\context, #(PC_REG * 4)]            //copy to context.pc
+        ldr     \temp_reg2, [\temp_reg1, #(XPSR_REG_PUSHED *4)]          //read pushed xpsr
+        str     \temp_reg2, [\context, #(CPSR_REG * 4)]          //copy to context.xpsr
+        add     \temp_reg1, #(NUM_REGS_PUSHED*4)                 //we read the pushed registers -> restore stack
+        str     \temp_reg1, [\context, #(SP_REG * 4)]           //copy to context.sp
+        //store rest of registers in context
+        add     \temp_reg1, \context, #(R4_REG * 4)     //point to first unpushed register
+        stmia   \temp_reg1, {r4-r11}                   //store rest
+.endm
+
+
+
+/*
+ * void generic_irq_handler(void)
+ *
+ * handler for pretty much any IRQ we get, except system calls (as those take arguments)
+ * checks if we interrupted userspace or kernelspace,
+ * reads out IRQ number,
+ * calls a handler that does the case distinction in C
+ */
+generic_irq_handler:
+    mrs     r0, IPSR            //what IRQ are we currently handling
+    and     r1, lr, #8          // r1 is now 0 if we come from handler mode
+    cbnz    r1, generic_irq_handler_user //compares without changing condition flags
+    
+generic_irq_handler_kernel: //no "$" because that bloated the layout, reducing the branch range
+    mov     r2, sp          //place where pushed registers start
+    ldr     r1, [sp, #(PC_REG_PUSHED * 4)]   //this should be where the pushed pc ends up
+    ldr     r3, =fatal_kernel_fault
+    bx      r3
+generic_irq_handler_user:
+    init_handler_stack
+    get_dispatcher_shared_arm r2
+    mrs     r0, PSP
+    ldr     r0, [r0, #(PC_REG_PUSHED*4)]    // r0 = faulting pc
+    disp_is_disabled r2, r0, r1             // r1 = 1 if disabled, else 0
+    cmp     r1, #0
+    ITe     eq    
+    addeq   r1, r2, #OFFSETOF_DISP_ENABLED_AREA
+    addne   r1, r2, #OFFSETOF_DISP_TRAP_AREA
+    save_context r1, r3, r2                     // r1 = save area
+    init_handler_pic_register     //only set after saving context
+    ldr     r3, =handle_irq
+    bx      r3
+
+
+
+    
+    
+        //
+        // void swi_handler(void)
+        //
+        // handles all software interrupts (SVCALL)
+        //
+        // r0 = encoded syscall ordinal
+        // r1 = syscall arg0
+        // r2 = syscall arg1
+        // ...
+        // r7 = syscall arg6
+        //
+        // For now the system saves the caller's context here, because
+        // some fraction of system calls do not return directly.
+        //
+swi_handler:
+    .if SYSCALL_REG <> 0
+    .error "Syscall entry broken. Expected ordinal reg to be r0."
+    .endif
+    
+//registers r0-r3,r12 are available for scratch use, since they are already on PSP stack
+    and     r1, lr, #8          // r1 is now 0 if we come from handler mode
+    cbnz    r1, swi_user //compares without changing condition flags
+swi_kernel:
+    ldr     r3, =sys_syscall_kernel
+    bx      r3
+swi_user:   //much like generic_irq_handler, but the save area is in r0
+    init_handler_stack
+    get_dispatcher_shared_arm r2
+    mrs     r0, PSP
+    ldr     r0, [r0, #(PC_REG_PUSHED*4)]    // r0 = faulting pc
+    disp_is_disabled r2, r0, r1             // r1 = 1 if disabled, else 0
+    cmp     r1, #0
+    ITe     eq    
+    addeq   r1, r2, #OFFSETOF_DISP_ENABLED_AREA
+    addne   r1, r2, #OFFSETOF_DISP_TRAP_AREA
+    save_context r1, r3, r2                     // r1 = save area
+    mov     r0, r1                              //sys_syscall expects the context to be in r0
+    init_handler_pic_register     //only set this after saving context!
+    ldr     r3, =sys_syscall
+    bx      r3
+
diff --git a/kernel/arch/armv7-m/exec.c b/kernel/arch/armv7-m/exec.c
new file mode 100644 (file)
index 0000000..a87e9c8
--- /dev/null
@@ -0,0 +1,169 @@
+/**
+ * \file
+ * \brief ARMv7-M upcalls
+ */
+
+/*
+ * Copyright (c) 2007, 2008, 2009, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#include <kernel.h>
+#include <dispatch.h>
+#include <init.h>
+#include <arm.h>
+#include <arm_hal.h>
+#include <exec.h>
+#include <exceptions.h>
+#include <misc.h>
+#include <barrelfish_kpi/registers_pushed_arm_v7m.h>//for autopushed register access
+
+static arch_registers_state_t upcall_state;
+
+extern uint32_t ctr;
+
+/*
+ * On armv7-m, the registers r0-r3,r12,lr,pc,apsr are popped from the stack
+ * whenever an exception returns -> we copy those values onto the thread mode stack after
+ * restoring the others
+ */
+static inline __attribute__((noreturn))
+void do_resume(uint32_t *regs)
+{
+    STATIC_ASSERT(CPSR_REG ==  0, "wrong register layout");
+    STATIC_ASSERT(R0_REG   ==  1, "wrong register layout");
+    STATIC_ASSERT(PC_REG   ==  16, "wrong register layout");
+    STATIC_ASSERT(XPSR_REG_PUSHED ==  7, "wrong register layout");
+    STATIC_ASSERT(R0_REG_PUSHED   ==  0, "wrong register layout");
+    STATIC_ASSERT(PC_REG_PUSHED   ==  6, "wrong register layout");
+  
+
+    
+    if (((arch_registers_state_t*) regs)->named.stack == 0){
+        printf("uninitialized stack pointer -> point to irq_save_pushed_area_top\n");
+        ((arch_registers_state_t*) regs)->named.stack = (uint32_t) irq_save_pushed_area_top;
+        printf("new stack pointer: 0x%x\n", ((arch_registers_state_t*) regs)->named.stack);
+    }
+    
+    //this is probably the ONLY time we do not have to adjust a future pc - we have to 
+    //adjust the XPSR instead...
+    //the EPSR part of the XPSR is normally ignored (mrs reads it as 0, msr is ignored),
+    //but when restoring the XPSR on modeswitch, it is actually read.
+    //This means we have to always ensure the Thumb-bit is set
+    if ((((arch_registers_state_t*) regs)->named.cpsr & 0x01000000) == 0){
+        printf("EPSR thumb bit not set -> fixed\n");
+        ((arch_registers_state_t*) regs)->named.cpsr |= 0x01000000;
+        printf("new XPSR register: 0x%x\n", ((arch_registers_state_t*) regs)->named.cpsr);
+        //should only actually happen when we first execute a new process
+    }
+    
+    __asm volatile(
+        "mov    r0, %[regs]                         \n\t"  //address where the regs are
+        "ldr    r1, [r0, #56]                       \n\t"  //stored stack pointer
+        "sub    r1, #32                             \n\t"  //allocate stack space for 8 registers
+        //copy the 8 expected registers
+        //XXX: could probably be shortened by using ldm, but only for the first 4 registers
+        "ldr    r2, [r0,#(" XTR(R0_REG) "*4)]       \n\t"  //copy r0 entry, using r2 as temp
+        "str    r2, [r1,#(" XTR(R0_REG_PUSHED) "*4)]\n\t"
+        "ldr    r2, [r0,#(" XTR(R1_REG) "*4)]       \n\t"  //copy r1 entry, using r2 as temp
+        "str    r2, [r1,#(" XTR(R1_REG_PUSHED) "*4)]\n\t"
+        "ldr    r2, [r0,#(" XTR(R2_REG) "*4)]       \n\t"  //copy r2 entry, using r2 as temp
+        "str    r2, [r1,#(" XTR(R2_REG_PUSHED) "*4)]\n\t"
+        "ldr    r2, [r0,#(" XTR(R3_REG) "*4)]       \n\t"  //copy r3 entry, using r2 as temp
+        "str    r2, [r1,#(" XTR(R3_REG_PUSHED) "*4)]\n\t"
+        "ldr    r2, [r0,#(" XTR(R12_REG) "*4)]      \n\t"  //copy r12 entry, using r2 as temp
+        "str    r2, [r1,#(" XTR(R12_REG_PUSHED)"*4)]\n\t"
+        "ldr    r2, [r0,#(" XTR(LR_REG) "*4)]       \n\t"  //copy lr entry, using r2 as temp
+        "str    r2, [r1,#(" XTR(LR_REG_PUSHED) "*4)]\n\t"
+        "ldr    r2, [r0,#(" XTR(PC_REG) "*4)]       \n\t"  //copy pc entry, using r2 as temp
+        "str    r2, [r1,#(" XTR(PC_REG_PUSHED) "*4)]\n\t"
+        "ldr    r2, [r0]                            \n\t"  //copy xpsr entry, using r2 as temp
+        "str    r2, [r1,#28]                        \n\t"
+        //set thread stack pointer
+        "msr    PSP, r1                             \n\t"  //set thread stack pointer to saved context
+        //restore unpushed registers: r4-r11
+        "add    r0, #(" XTR(R4_REG) "*4)            \n\t"  //point to r4 entry
+        "ldmia  r0, {r4-r11}                        \n\t"  //restore r4 - r11
+        "ldr    lr, =#0xFFFFFFFD                    \n\t"  //special return address to change modes
+        "bx     lr                                  \n\t"  //actual context switch
+    :: [regs] "r" (regs) : "lr");
+
+    panic("do_resume returned.");
+}
+
+/// Ensure context is for user-mode with interrupts enabled.
+static inline void
+ensure_user_mode_policy(arch_registers_state_t *state)
+{
+    //this should be a no-op on armv7-m:
+    //user mode is ensured by the return address used in do_resume
+    //exeptions can be tail-chained instead of preemting each other, meaning we never
+    //have to completely disable them
+}
+
+/**
+ * \brief Go to user-space at entry point 'entry'.
+ *
+ * This function goes to user-space and starts executing the program at
+ * its entry point at virtual address 'entry'.
+ *
+ * \param entry Entry point address of program to execute.
+ */
+void __attribute__ ((noreturn))
+execute(lvaddr_t entry)
+{
+    dispatcher_handle_t handle = dcb_current->disp;
+    struct dispatcher_shared_arm *disp_arm = get_dispatcher_shared_arm(handle);
+
+    arch_registers_state_t *state = &upcall_state;
+    assert(0 != disp_arm->got_base);
+
+    state->named.r10 = disp_arm->got_base;
+
+    struct dispatcher_shared_generic *disp_gen
+        = get_dispatcher_shared_generic(handle);
+
+    state->named.rtls = disp_gen->udisp;
+
+    state->named.pc = entry;
+    ensure_user_mode_policy(state);
+    do_resume(state->regs);
+}
+
+/**
+ * \brief Resume the given user-space snapshot.
+ *
+ * This function resumes user-space execution by restoring the CPU
+ * registers with the ones given in the array, pointed to by 'state'.
+ */
+uint32_t ctr=0;
+void __attribute__ ((noreturn)) resume(arch_registers_state_t *state)
+{
+    ctr++;
+    state->named.rtls = arch_get_thread_register();
+    ensure_user_mode_policy(state);
+
+    /*
+      This function succeeds the first time executed, i.e.
+      when init is started for the first time.
+      If we hold the execution here after the first execption, we are still good
+    */
+    //    while(ctr>1);
+    do_resume(state->regs);
+}
+
+void wait_for_interrupt(void)
+{
+    // REVIEW: Timer interrupt could be masked here.
+
+    __asm volatile(
+        "0:                                             \n\t"
+        "wfi                                            \n\t"
+        "b      0b                                      \n\t" :::);
+
+    panic("wfi returned");
+}
diff --git a/kernel/arch/armv7-m/exn.c b/kernel/arch/armv7-m/exn.c
new file mode 100644 (file)
index 0000000..d15a87f
--- /dev/null
@@ -0,0 +1,294 @@
+/*
+ * Copyright (c) 2009-2013 ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#include <kernel.h>
+#include <dispatch.h>
+#include <arm.h>
+#include <arm_hal.h>    //needed for nvic device
+#include <exceptions.h>
+#include <exec.h>
+#include <misc.h>
+#include <stdio.h>
+#include <wakeup.h>
+#include <paging_kernel_arch.h>     //for mmu device
+#include <dev/omap/omap44xx_cortex_m3_nvic_dev.h>
+
+
+
+//TODO: heteropanda: actually handle the interrupts, instead of aborting
+
+void handle_user_page_fault(lvaddr_t                fault_address,
+                            arch_registers_state_t* save_area)
+{
+    lvaddr_t handler;
+    struct dispatcher_shared_arm *disp = get_dispatcher_shared_arm(dcb_current->disp);
+    uintptr_t saved_pc = save_area->named.pc;
+
+    disp->d.disabled = dispatcher_is_disabled_ip(dcb_current->disp, saved_pc);
+    bool disabled = (disp->d.disabled != 0);
+
+    assert(dcb_current->disp_cte.cap.type == ObjType_Frame);
+
+    printk(LOG_WARN, "user page fault%s in '%.*s': addr %"PRIxLVADDR
+                      " IP %"PRIxPTR"\n",
+           disabled ? " WHILE DISABLED" : "", DISP_NAME_LEN,
+           disp->d.name, fault_address, saved_pc);
+
+    if (disabled) {
+        assert(save_area == &disp->trap_save_area);
+        handler = disp->d.dispatcher_pagefault_disabled;
+        dcb_current->faults_taken++;
+    }
+    else {
+        assert(save_area == &disp->enabled_save_area);
+        handler = disp->d.dispatcher_pagefault;
+    }
+
+    if (dcb_current->faults_taken > 2) {
+        printk(LOG_WARN, "handle_user_page_fault: too many faults, "
+               "making domain unrunnable\n");
+        dcb_current->faults_taken = 0; // just in case it gets restarted
+        scheduler_remove(dcb_current);
+        dispatch(schedule());
+    }
+    else {
+        //
+        // Upcall to dispatcher
+        //
+        // NB System might be cleaner with a prototype
+        // dispatch context that has R0-R3 to be overwritten
+        // plus initial stack, thread, and gic registers. Could do
+        // a faster resume_for_upcall().
+        //
+
+        struct dispatcher_shared_generic *disp_gen =
+            get_dispatcher_shared_generic(dcb_current->disp);
+
+        union registers_arm resume_area;
+
+        //resume_area.named.xpsr = ?? //since the mode is not encoded in here, we can probably just ignore this
+        resume_area.named.pc   = handler;
+        resume_area.named.r0   = disp_gen->udisp;
+        resume_area.named.r1   = fault_address;
+        resume_area.named.r2   = 0;
+        resume_area.named.r3   = saved_pc;
+        resume_area.named.rtls = disp_gen->udisp;
+        resume_area.named.r10  = disp->got_base;
+
+        // SP is set by handler routine.
+
+        // Upcall user to save area
+        disp->d.disabled = true;
+        resume(&resume_area);
+    }
+}
+
+void handle_user_undef(lvaddr_t fault_address,
+                       arch_registers_state_t* save_area)
+{
+    union registers_arm resume_area;
+
+    struct dispatcher_shared_arm *disp = get_dispatcher_shared_arm(dcb_current->disp);
+
+    bool disabled = dispatcher_is_disabled_ip(dcb_current->disp, save_area->named.pc);
+    disp->d.disabled = disabled;
+
+    assert(dcb_current->disp_cte.cap.type == ObjType_Frame);
+    if (disabled) {
+        //        assert(save_area == &disp->trap_save_area);
+    }
+    else {
+        assert(save_area == &disp->enabled_save_area);
+    }
+
+    printk(LOG_WARN, "user undef fault%s in '%.*s': IP %" PRIuPTR "\n",
+           disabled ? " WHILE DISABLED" : "", DISP_NAME_LEN,
+           disp->d.name, fault_address);
+
+    struct dispatcher_shared_generic *disp_gen =
+        get_dispatcher_shared_generic(dcb_current->disp);
+
+    //resume_area.named.xpsr = ?? //since the mode is not encoded in here, we can probably just ignore this
+    resume_area.named.pc   = disp->d.dispatcher_trap;
+    resume_area.named.r0   = disp_gen->udisp;
+    resume_area.named.r1   = ARM_7M_EVECTOR_USAGE;
+    resume_area.named.r2   = 0;
+    resume_area.named.r3   = fault_address;
+    resume_area.named.rtls = disp_gen->udisp;
+    resume_area.named.r10  = disp->got_base;
+
+    // Upcall user to save area
+    disp->d.disabled = true;
+    resume(&resume_area);
+}
+
+/*
+//XXX: probably broken since I had to change bkpt for thumb
+static int32_t bkpt_decode(lvaddr_t fault_address)
+{
+    int32_t bkpt_id = -1;
+    if ((fault_address & 3) == 0 && fault_address >= KERNEL_OFFSET) {
+        const uint32_t bkpt_mask = 0xfff000f0;
+        const uint32_t bkpt_isn  = 0xe1200070;
+
+        uintptr_t isn = *((uintptr_t*)fault_address);
+        if ((isn & bkpt_mask) == bkpt_isn) {
+            bkpt_id = (int32_t)((isn & 0xf) | ((isn & 0xfff00) >> 4));
+        }
+    }
+    return bkpt_id;
+}
+*/
+
+void fatal_kernel_fault(uint32_t evector, lvaddr_t address, arch_registers_state_t* save_area
+    )
+{
+    int i;
+    printk(LOG_PANIC, "Kernel fault at %08"PRIxLVADDR
+                      " vector %08"PRIx32"\n\n", address, evector);
+    printk(LOG_PANIC, "Processor save_area at: %p\n", save_area);
+
+    for (i = 0; i < 16; i++) {
+        const char *extrainfo = "";
+
+        switch(i) {
+        case 13:
+            extrainfo = "\t(sp)";
+            break;
+
+        case 14:
+            extrainfo = "\t(lr)";
+            break;
+
+        case 15:
+            {
+                char str[128];
+                snprintf(str, 128, "\t(pc)\t%08lx",
+                         save_area->regs[R0_REG + i] -
+                         local_phys_to_mem((uint32_t)&kernel_first_byte) +
+                         0x100000);
+                extrainfo = str;
+            }
+            break;
+        }
+
+        printk(LOG_PANIC, "r%d\t%08"PRIx32"%s\n", i, save_area->regs[R0_REG + i], extrainfo);
+    }
+    printk(LOG_PANIC, "xpsr\t%08"PRIx32"\n", save_area->regs[CPSR_REG]);
+    printk(LOG_PANIC, "called from: %p\n", __builtin_return_address(0) -
+           local_phys_to_mem((uint32_t)&kernel_first_byte) + 0x100000);
+
+
+    printf("Error registers:\n");
+    printf("M3 MMU address: 0x%x\n", *((uint32_t*) &mmu));
+    printf("M3 MMU_FAULT_AD register: 0x%x\n", omap44xx_mmu_fault_ad_rd(&mmu));
+    printf("M3 MMU_FAULT_STATUS register: 0x%x\n", omap44xx_mmu_fault_status_rd(&mmu));
+    printf("M3 MMU_IRQSTATUS register: 0x%x\n", omap44xx_mmu_irqstatus_rd(&mmu));
+    
+    printf("ICTR: 0x%x\n", omap44xx_cortex_m3_nvic_ICTR_rd(&nvic));
+    printf("CPUID_BASE: 0x%x\n", omap44xx_cortex_m3_nvic_CPUID_BASE_rd(&nvic));
+    printf("ICSR: 0x%x\n", omap44xx_cortex_m3_nvic_ICSR_rd(&nvic));
+    printf("VTOR: 0x%x\n", omap44xx_cortex_m3_nvic_VTOR_rd(&nvic));
+    printf("AIRCR: 0x%x\n", omap44xx_cortex_m3_nvic_AIRCR_rd(&nvic));
+    printf("CCR: 0x%x\n", omap44xx_cortex_m3_nvic_CCR_rd(&nvic));
+    printf("SHCSR: 0x%x\n", omap44xx_cortex_m3_nvic_SHCSR_rd(&nvic));
+    printf("CFSR: 0x%x\n", omap44xx_cortex_m3_nvic_CFSR_rd(&nvic));
+    printf("BFAR: 0x%x\n", omap44xx_cortex_m3_nvic_BFAR_rd(&nvic));
+    printf("SYSTICK_CTRL: 0x%x\n", omap44xx_cortex_m3_nvic_SYSTICK_CTRL_rd(&nvic));
+    printf("SYSTICK_CALV: 0x%x\n", omap44xx_cortex_m3_nvic_SYSTICK_CALV_rd(&nvic));
+
+    switch (evector) {
+        case ARM_7M_EVECTOR_USAGE:
+        //TODO: heteropanda: distinguish further
+            panic("Usage fault.\n");
+            break;
+
+        case ARM_7M_EVECTOR_BUS:
+          //TODO: heteropanda: distinguish further
+            panic("Bus fault\n");
+            break;
+      //TODO: heteropanda: distinguish further
+      default:
+        panic("Caused by evector: %02"PRIx32, evector);
+        break;
+    }
+
+}
+
+
+
+/*
+ * \brief handler pretty much any usermode IRQ except system calls
+ */
+void handle_irq(uint32_t irq, arch_registers_state_t* save_area)
+{
+    printf("handle_irq: registers:\n");//dump content for debugging reasons
+    for(uint32_t i = 0; i<NUM_REGS; i++){
+        printf("0x%x\n", save_area->regs[i]);
+    }
+    uintptr_t fault_pc = save_area->named.pc;//read faulting pc from pushed context
+    
+    uint32_t regval;
+    __asm volatile ("mrs %[regval], xpsr" : [regval] "=r"(regval));
+    printf("current XPSR register: 0x%x\n", regval);
+    
+    printf("M3 MMU address: 0x%x\n", *((uint32_t*) &mmu));
+    printf("M3 MMU_FAULT_AD register: 0x%x\n", omap44xx_mmu_fault_ad_rd(&mmu));
+    printf("M3 MMU_FAULT_STATUS register: 0x%x\n", omap44xx_mmu_fault_status_rd(&mmu));
+    printf("M3 MMU_FAULT_PC register: 0x%x\n", omap44xx_mmu_fault_pc_rd(&mmu));
+    printf("M3 MMU_IRQSTATUS register: 0x%x\n", omap44xx_mmu_irqstatus_rd(&mmu));
+    
+    printf("ICTR: 0x%x\n", omap44xx_cortex_m3_nvic_ICTR_rd(&nvic));
+    printf("CPUID_BASE: 0x%x\n", omap44xx_cortex_m3_nvic_CPUID_BASE_rd(&nvic));
+    printf("ICSR: 0x%x\n", omap44xx_cortex_m3_nvic_ICSR_rd(&nvic));
+    printf("VTOR: 0x%x\n", omap44xx_cortex_m3_nvic_VTOR_rd(&nvic));
+    printf("AIRCR: 0x%x\n", omap44xx_cortex_m3_nvic_AIRCR_rd(&nvic));
+    printf("CCR: 0x%x\n", omap44xx_cortex_m3_nvic_CCR_rd(&nvic));
+    printf("SHCSR: 0x%x\n", omap44xx_cortex_m3_nvic_SHCSR_rd(&nvic));
+    printf("CFSR: 0x%x\n", omap44xx_cortex_m3_nvic_CFSR_rd(&nvic));
+    printf("BFAR: 0x%x\n", omap44xx_cortex_m3_nvic_BFAR_rd(&nvic));
+    printf("SYSTICK_CTRL: 0x%x\n", omap44xx_cortex_m3_nvic_SYSTICK_CTRL_rd(&nvic));
+    printf("SYSTICK_CALV: 0x%x\n", omap44xx_cortex_m3_nvic_SYSTICK_CALV_rd(&nvic));
+    
+    debug(SUBSYS_DISPATCH, "IRQ %"PRIu32" while %s\n", irq,
+          dcb_current ? (dcb_current->disabled ? "disabled": "enabled") : "in kernel");
+
+
+    if (dcb_current != NULL) {
+        dispatcher_handle_t handle = dcb_current->disp;
+        if (save_area == dispatcher_get_disabled_save_area(handle)) {
+            assert(dispatcher_is_disabled_ip(handle, fault_pc));
+            dcb_current->disabled = true;
+        } else {
+/*            debug(SUBSYS_DISPATCH,
+                  "save_area=%p, dispatcher_get_enabled_save_are(handle)=%p\n",
+                   save_area, dispatcher_get_enabled_save_area(handle));
+*/
+
+            assert(save_area == dispatcher_get_enabled_save_area(handle));
+            assert(!dispatcher_is_disabled_ip(handle, fault_pc));
+            dcb_current->disabled = false;
+        }
+    }
+    //TODO: heteropanda: make a case distinction on the type of interrupt, and  
+    //actually handle it
+
+    if (0) {//TODO: heteropanda: should be "if timer interrupt"
+        // Timer interrupt, pit_handle_irq acks it at the timer.
+        assert(kernel_ticks_enabled);
+        kernel_now += kernel_timeslice;
+        wakeup_check(kernel_now);
+        dispatch(schedule());
+    }
+    else {
+        // send_user_interrupt(irq);
+        panic("Unhandled IRQ %"PRIu32"\n", irq);
+    }
+
+}
diff --git a/kernel/arch/armv7-m/init.c b/kernel/arch/armv7-m/init.c
new file mode 100644 (file)
index 0000000..b1f3be4
--- /dev/null
@@ -0,0 +1,341 @@
+/*
+ * Copyright (c) 2009-2013, ETH Zurich. All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, CAB F.78, Universitaetstr. 6, CH-8092 Zurich,
+ * Attn: Systems Group.
+ */
+
+/**
+ * \file
+ * \brief cortex-m3 CPU driver init code for the OMAP44xx series SoCs.
+ */
+
+#include <kernel.h>
+#include <string.h>
+#include <init.h>
+#include <exceptions.h>
+#include <exec.h>
+#include <offsets.h>
+#include <paging_kernel_arch.h>
+#include <phys_mmap.h>
+#include <serial.h>
+#include <spinlock.h>
+#include <stdio.h>
+#include <arm_hal.h>
+#include <getopt/getopt.h>
+#include <elf/elf.h>
+#include <arm_core_data.h>
+#include <startup_arch.h>
+#include <kernel_multiboot.h>
+#include <global.h>
+#include <start_aps.h> // AP_WAIT_*, AUX_CORE_BOOT_*  and friends
+
+#include <omap44xx_map.h>
+#include <dev/omap/omap44xx_id_dev.h>
+#include <dev/omap/omap44xx_gpio_dev.h>
+
+/// Round up n to the next multiple of size
+#define ROUND_UP(n, size)           ((((n) + (size) - 1)) & (~((size) - 1)))
+
+/**
+ * Used to store the address of global struct passed during boot across kernel
+ * relocations.
+ */
+//static uint32_t addr_global;
+
+/**
+ * \brief Kernel stack.
+ *
+ * This is the one and only kernel stack for a kernel instance.
+ */
+uintptr_t kernel_stack[KERNEL_STACK_SIZE/sizeof(uintptr_t)]
+__attribute__ ((aligned(8)));
+
+#define MIN(a,b) ((a) < (b) ? (a) : (b))
+#define MAX(a,b) ((a) > (b) ? (a) : (b))
+#define CONSTRAIN(x, a, b) MIN(MAX(x, a), b)
+
+//
+// Kernel command line variables and binding options
+//
+//XXX: SysTick actually counts cycles, so time can at most be estimated
+static int timeslice  = 5; //interval in ms in which the scheduler gets called
+
+static struct cmdarg cmdargs[] = {
+    { "consolePort",    ArgType_UInt, { .uinteger = &serial_console_port}},
+    { "debugPort",      ArgType_UInt, { .uinteger = &serial_debug_port}},
+    { "loglevel",       ArgType_Int, { .integer = &kernel_loglevel }},
+    { "logmask",        ArgType_Int, { .integer = &kernel_log_subsystem_mask }},
+    { "timeslice",      ArgType_Int, { .integer = &timeslice }},
+    {NULL, 0, {NULL}}
+};
+
+static inline void __attribute__ ((always_inline))
+relocate_stack(lvaddr_t offset)
+{
+    __asm volatile (
+                   "add        sp, sp, %[offset]\n\t" ::[offset] "r" (offset)
+                   );
+}
+
+static inline void __attribute__ ((always_inline))
+relocate_got_base(lvaddr_t offset)
+{
+    __asm volatile (
+                   "add        r10, r10, %[offset]\n\t" ::[offset] "r" (offset)
+                   );
+}
+
+
+void kernel_startup_early(void)
+{
+    const char *cmdline;
+    assert(glbl_core_data != NULL);
+    cmdline = MBADDR_ASSTRING(glbl_core_data->cmdline);
+    parse_commandline(cmdline, cmdargs);
+    timeslice = CONSTRAIN(timeslice, 1, 20);
+}
+
+/**
+ * \brief Continue kernel initialization in kernel address space.
+ *
+ * This function resets paging to map out low memory and map in physical
+ * address space, relocating all remaining data structures. It sets up exception handling,
+ * initializes devices and transitions into handler mode.
+ */
+static void  __attribute__ ((noinline,noreturn)) text_init(void)
+{
+    errval_t errval;
+
+    if ((glbl_core_data->multiboot_flags & MULTIBOOT_INFO_FLAG_HAS_MMAP)) {
+        // BSP core: set final page tables
+        struct arm_coredata_mmap *mmap = (struct arm_coredata_mmap *)
+            local_phys_to_mem(glbl_core_data->mmap_addr);
+        paging_arm_reset(mmap->base_addr, mmap->length);
+        //printf("paging_arm_reset: base: 0x%"PRIx64", length: 0x%"PRIx64".\n", mmap->base_addr, mmap->length);
+    } else {
+        // AP core
+        //  FIXME: Not sure what to do, so map the whole memory for now
+        paging_arm_reset(PHYS_MEMORY_START, 0x40000000);
+    }
+
+    exceptions_init();
+
+
+    //printf("startup_early\n");
+    kernel_startup_early();
+    //printf("kernel_startup_early done!\n");
+
+    //initialize console
+    serial_init(serial_console_port);
+    spinlock_init();
+
+    printf("Barrelfish CPU driver starting on ARMv7-M OMAP44xx"
+           " Board id 0x%08"PRIx32"\n", hal_get_board_id());
+    printf("The address of paging_map_kernel_section is %p\n",
+           paging_map_kernel_section);
+
+    errval = serial_debug_init();
+    if (err_is_fail(errval)) {
+            printf("Failed to initialize debug port: %d", serial_debug_port);
+    }
+
+    if (my_core_id != hal_get_cpu_id()) {
+        printf("** setting my_core_id (="PRIuCOREID") to match hal_get_cpu_id() (=%u)\n");
+        my_core_id = hal_get_cpu_id();
+    }
+
+    // Test MMU by remapping the device identifier and reading it using a
+    // virtual address
+    lpaddr_t id_code_section = OMAP44XX_MAP_L4_CFG_SYSCTRL_GENERAL_CORE & ~ARM_L1_SECTION_MASK;
+    lvaddr_t id_code_remapped = paging_map_device(id_code_section,
+                                                  ARM_L1_SECTION_BYTES);
+    omap44xx_id_t id;
+    omap44xx_id_initialize(&id, (mackerel_addr_t)(id_code_remapped +
+            (OMAP44XX_MAP_L4_CFG_SYSCTRL_GENERAL_CORE & ARM_L1_SECTION_MASK)));
+
+    char buf[200];
+    omap44xx_id_code_pr(buf,200,&id);
+    printf("Using MMU, %s", buf);
+
+    
+    nvic_init();
+    printf("nvic_init done\n");
+    systick_init(0x10000);//TODO: heteropanda: find out what cycle count to use here
+    printf("systick_init done\n");
+    
+
+    //transition into handler mode, will call text_init_continued
+    exceptions_early_init();
+    panic("exceptions_early_init has returned. this should never happen!");
+}
+
+/*
+ * \brief last bit of initialization before calling arm_kernel_startup()
+ *  called by a special exception handler -> we are now in handler mode
+ *  properly set up exceptions here
+ */
+void  __attribute__ ((noinline,noreturn)) text_init_continued(void){
+    printf("entered text_init_continued - we are now in handler mode\n");
+    
+    //now all devices should have been mapped -> release the lock on their TLB entries
+    set_tlb_lock_basevalue(0);
+    
+    //now is probably the time to set up the vectortable?
+    printf("overwriting TLB mapping for page 0 (vectortable).\n");
+    printf("physical address of vectortable: 0x%x\n", (uint32_t) &vectortable);
+    add_tlb_mapping(0, (lpaddr_t) &vectortable, 1, 2);
+    printf("mapped vectortable page to address 0 in TLB.\n");
+    set_tlb_lock_basevalue(2);//XXX: hack, to make sure the already preserved entry in there stays preserved
+    //(entry 1 is a preserved mapping of the section containing the TLB flush code)
+    //XXX: cachemarker: as soon as caching is up, we can flush the TLB safely
+    // -> remove the preserved-bit for the entry with the flushing code on
+    
+    
+    exceptions_init();//set up proper exception handlers in the relocated vectortable
+    
+    
+    arm_kernel_startup();
+}
+
+
+/*
+ * Doesn't work yet on the second LED for some reason...
+ */
+static void set_leds(void)
+{
+    uint32_t r, nr;
+    omap44xx_gpio_t g;
+    //char buf[8001];
+
+    omap44xx_gpio_initialize(&g, (mackerel_addr_t)OMAP44XX_MAP_L4_WKUP_GPIO1);
+    // Output enable
+    r = omap44xx_gpio_oe_rd(&g) & (~(1<<8));
+    omap44xx_gpio_oe_wr(&g,r);
+    // Write data out
+    r = omap44xx_gpio_dataout_rd(&g);
+    nr = r  |(1<<8); 
+    for(int i = 0; i < 5; i++) {
+       omap44xx_gpio_dataout_wr(&g,r);
+       for(int j = 0; j < 2000; j++) { 
+           printf(".");
+       }
+       omap44xx_gpio_dataout_wr(&g,nr);
+       for(int j = 0; j < 2000; j++) { 
+           printf(".");
+       }
+    }
+    return;
+
+    omap44xx_gpio_initialize(&g, (mackerel_addr_t)OMAP44XX_MAP_L4_PER_GPIO4);
+
+    // Output enable
+    r = omap44xx_gpio_oe_rd(&g) & (~(1<<14));
+    omap44xx_gpio_oe_wr(&g,r);
+    // Write data out
+    r = omap44xx_gpio_dataout_rd(&g);
+    nr = r  |(1<<14); 
+    for(int i = 0; i < 100; i++) {
+       omap44xx_gpio_dataout_wr(&g,r);
+       for(int j = 0; j < 2000; j++) { 
+           printf(".");
+       }
+       omap44xx_gpio_dataout_wr(&g,nr);
+       for(int j = 0; j < 2000; j++) { 
+           printf(".");
+       }
+    }
+}
+
+/**
+ * Entry point called from boot.S for bootstrap processor.
+ * if is_bsp == true, then pointer points to multiboot_info
+ * else pointer points to a global struct
+ */
+void arch_init(void *pointer)
+{
+
+    serial_early_init(serial_console_port);
+    spinlock_early_init();//from here on we can safely use printf
+
+#if 0 //XXX: HACK: we currently are seperate from the other cores, so we can not use
+        // either of the "normal" cases
+    if (hal_cpu_is_bsp()) {
+        struct multiboot_info *mb = pointer;
+
+        memset(glbl_core_data, 0, sizeof(struct arm_core_data));
+
+        size_t max_addr = max(multiboot_end_addr(mb), (uintptr_t)&kernel_final_byte);
+        glbl_core_data->start_free_ram = ROUND_UP(max_addr, BASE_PAGE_SIZE);
+        glbl_core_data->mods_addr = mb->mods_addr;
+        glbl_core_data->mods_count = mb->mods_count;
+        glbl_core_data->cmdline = mb->cmdline;
+        glbl_core_data->mmap_length = mb->mmap_length;
+        glbl_core_data->mmap_addr = mb->mmap_addr;
+        glbl_core_data->multiboot_flags = mb->flags;
+
+        memset(&global->locks, 0, sizeof(global->locks));
+    } else {
+        global = (struct global *)GLOBAL_VBASE;
+        // zeroing locks for the app core seems bogus to me --AKK
+        //memset(&global->locks, 0, sizeof(global->locks));
+
+        // our core data (struct arm_core_data) is placed one page before the
+        // first byte of the kernel image
+        glbl_core_data = (struct arm_core_data *)
+                            ((lpaddr_t)&kernel_first_byte - BASE_PAGE_SIZE);
+        glbl_core_data->cmdline = (lpaddr_t)&glbl_core_data->kernel_cmdline;
+        my_core_id = glbl_core_data->dst_core_id;
+
+        // tell BSP that we are started up
+        // See Section 27.4.4 in the OMAP44xx manual for how this should work.
+        // we do this early, to avoid having to map the registers
+        lpaddr_t aux_core_boot_0 = AUX_CORE_BOOT_0;
+        lpaddr_t ap_wait = AP_WAIT_PHYS;
+
+        *((volatile lvaddr_t *)aux_core_boot_0) = 2<<2;
+        //__sync_synchronize();
+        *((volatile lvaddr_t *)ap_wait) = AP_STARTED;
+
+    }
+#else    
+    //XXX: HACK
+    //since the M3 currently thinks it is a bsp core, we do most of the bsp stuff
+    //exept for truly global variables
+    
+    //not sure what address to use for our core_data
+    glbl_core_data = (struct arm_core_data*)((lvaddr_t)&kernel_first_byte - BASE_PAGE_SIZE);
+    
+    memset(glbl_core_data, 0, sizeof(struct arm_core_data));
+       
+    struct multiboot_info *mb = (struct multiboot_info *)pointer;
+    
+       glbl_core_data->start_free_ram =
+                       ROUND_UP(max(multiboot_end_addr(mb), (uintptr_t)&kernel_final_byte),
+                                BASE_PAGE_SIZE);
+
+    glbl_core_data->mods_addr = mb->mods_addr;
+    glbl_core_data->mods_count = mb->mods_count;
+    glbl_core_data->cmdline = mb->cmdline;
+    glbl_core_data->mmap_length = mb->mmap_length;
+    glbl_core_data->mmap_addr = mb->mmap_addr;
+    glbl_core_data->multiboot_flags = mb->flags;
+    
+//    global = (struct global *)GLOBAL_VBASE;//we currently do not need global
+#endif //0
+
+
+    // XXX: print kernel address for debugging with gdb
+    printf("Barrelfish OMAP44xx cortex-m3 CPU driver starting at addr 0x%"PRIxLVADDR"\n", 
+          local_phys_to_mem((uint32_t)&kernel_first_byte));
+
+
+    if (1) {
+        set_leds();
+    }
+
+    //we already are in a virtual address space, so we do not have to do MMU stuff already
+    text_init();
+}
diff --git a/kernel/arch/armv7-m/linker.lds.in b/kernel/arch/armv7-m/linker.lds.in
new file mode 100644 (file)
index 0000000..6ee810b
--- /dev/null
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2007, 2008, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#include <offsets.h>
+
+OUTPUT_FORMAT("elf32-littlearm")
+OUTPUT_ARCH("arm")
+
+ENTRY(start)
+
+/*
+PHDRS {
+               headers PT_PHDR PHDRS;
+               text PT_LOAD FILEHDR PHDRS;
+               data PT_LOAD;
+               dynamic PT_DYNAMIC;
+}
+*/
+
+SECTIONS {
+        . = START_KERNEL_PHYS;
+        
+        /*kernel_elf_header = .;*/
+        kernel_first_byte = .;
+        
+        /*. += SIZEOF_HEADERS; */
+        
+        .text : { *(.text); }
+        kernel_text_final_byte = .;
+
+        . = ALIGN(4k);
+        .rodata . :
+        {
+                *(.rodata);
+        }
+
+        .got . :
+        {
+                got_base = .;
+                *(.got);
+        }
+        
+        /*.rel.got . : { *(.rel.got); } */
+
+        .bss . :
+        {
+                *(.bss);
+        }
+        
+
+        kernel_final_byte = .;
+
+        /***** These sections get discarded *****/
+        /DISCARD/ :
+        {
+                /* Discard exception handler frames and headers -- we don't use em */
+                *(.eh_frame);
+                *(.eh_frame_hdr);
+               *(.note.gnu.build-id);
+                *(.interp);
+/*              *(.dynsym); */
+/*              *(.dynstr); */
+/*              *(.hash); */
+/*              *(.gnu.hash); */
+              *(.dynamic); 
+        }
+}
diff --git a/kernel/arch/armv7-m/omap.c b/kernel/arch/armv7-m/omap.c
new file mode 100644 (file)
index 0000000..954decd
--- /dev/null
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2012, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+/*
+ * Abstraction layer for cortex-m3
+ * mostly NVIC and SysTick
+ */
+
+#include <kernel.h>
+#include <paging_kernel_arch.h>
+
+#include <dev/omap/omap44xx_cortex_m3_nvic_dev.h>
+
+#include <arm_hal.h>
+
+
+
+//not sure where to put this (or what to call it - should this go into omap44xx_map.h?)
+#define OMAP_CORTEX_M3_NVIC_BASE 0xE000E000
+#define OMAP_CORTEX_M3_NVIC_SIZE 0x1000         //4KB
+
+
+//hardcoded bc gem5 doesn't set board id in ID_Register
+//XXX: change so it makes sense for the pandaboard -SG
+#define VEXPRESS_ELT_BOARD_ID          0x8e0
+uint32_t hal_get_board_id(void)
+{
+    return VEXPRESS_ELT_BOARD_ID;
+}
+
+uint8_t hal_get_cpu_id(void)
+{
+    return 0;//XXX: for the moment, pretend to be the bsp
+}
+
+bool hal_cpu_is_bsp(void)
+{
+    return hal_get_cpu_id() == 0;
+}
+
+
+
+/*
+ * \brief initialize NVIC
+ * IMPORTANT: the NVIC is in a private section, which means it can NOT be mapped into
+ * memory where we want it. ALL accesses to virtual address 0xE000XXXX will ALWAYS
+ * go to this private region regardless of any translation rules 
+ */
+void nvic_init(void){
+    omap44xx_cortex_m3_nvic_initialize(&nvic, (mackerel_addr_t)OMAP_CORTEX_M3_NVIC_BASE);
+    printf("NVIC: %d interrupt lines detected\n", (omap44xx_cortex_m3_nvic_ICTR_rd(&nvic)+1)*32);
+    
+#if defined(GLOBAL_DEBUG)     //additional debug output
+    printf("ICTR: 0x%x\n", omap44xx_cortex_m3_nvic_ICTR_rd(&nvic));
+    printf("CPUID_BASE: 0x%x\n", omap44xx_cortex_m3_nvic_CPUID_BASE_rd(&nvic));
+    printf("ICSR: 0x%x\n", omap44xx_cortex_m3_nvic_ICSR_rd(&nvic));
+    printf("VTOR: 0x%x\n", omap44xx_cortex_m3_nvic_VTOR_rd(&nvic));
+    printf("AIRCR: 0x%x\n", omap44xx_cortex_m3_nvic_AIRCR_rd(&nvic));
+    printf("CCR: 0x%x\n", omap44xx_cortex_m3_nvic_CCR_rd(&nvic));
+    printf("SHCSR: 0x%x\n", omap44xx_cortex_m3_nvic_SHCSR_rd(&nvic));
+    printf("CFSR: 0x%x\n", omap44xx_cortex_m3_nvic_CFSR_rd(&nvic));
+    printf("SYSTICK_CTRL: 0x%x\n", omap44xx_cortex_m3_nvic_SYSTICK_CTRL_rd(&nvic));
+    printf("SYSTICK_CALV: 0x%x\n", omap44xx_cortex_m3_nvic_SYSTICK_CALV_rd(&nvic));
+#endif    //defined(GLOBAL_DEBUG)
+
+    //allow return to thread mode from any handler by using the right return address
+    omap44xx_cortex_m3_nvic_CCR_nonbasethrdena_wrf(&nvic, 1);
+    //allow stack to be aligned to "only" 4 bytes on entry
+    omap44xx_cortex_m3_nvic_CCR_stkalign_wrf(&nvic, 0);
+    
+//TODO: set some register values:
+//  - set priotiry grouping? (which priorities do not preempt each other)
+//  - set priorities
+    enable_all_system_interrupts();
+}
+
+
+/*
+ * \brief enable a specific interrupt on this core
+ */
+void nvic_enable_interrupt(uint32_t int_id, uint16_t prio){
+    printf("nvic_enable_interrupt called -> implement ----------------\n");
+//TODO: heteropanda: actually implement
+//decide on a enumeration (should "0" be vectortable offset 0 or external interrupt 0 (offset 16)? probably the latter)
+//find out which register to set (two sets of 32 each to choose from)
+//first write priority
+//write set enable
+}
+
+
+uint32_t nvic_get_active_irq(void)
+{
+       return omap44xx_cortex_m3_nvic_ICSR_vectactive_rdf(&nvic);
+}
+
+
+/*
+ * \brief enable all system interrupts (IRQ 2 - 15) that we might need
+ */
+void enable_all_system_interrupts(void){
+    //TODO: maybe set priorities?
+    omap44xx_cortex_m3_nvic_SHCSR_usgfaultena_wrf(&nvic, 1);
+    omap44xx_cortex_m3_nvic_SHCSR_busfaultena_wrf(&nvic, 1);
+    omap44xx_cortex_m3_nvic_SHCSR_memfaultena_wrf(&nvic, 1);
+    //service calls, NMI, hardfaults are always enabled
+    //SysTick interrupts are enabled in systick_init
+    
+    //enable interrupts on division by 0 or unaligned memory access
+    omap44xx_cortex_m3_nvic_CCR_div_0_trp_wrf(&nvic, 1);
+    omap44xx_cortex_m3_nvic_CCR_unalign_trp_wrf(&nvic, 1);
+}
+
+/*
+ * \brief set up SysTick, assumes nvic_init has already been called
+ * as there is no clock reference on pandaboard, we count actual cycles
+ */
+void systick_init(uint32_t tick_cycles){
+    //we do not actually need to map any memory here - SysTick is a subdevice of NVIC
+    
+    //use core clock (so we count processor cycles)
+    omap44xx_cortex_m3_nvic_SYSTICK_CTRL_clksource_wrf(&nvic,1);
+    //enable interrupts when countdown reaches 0
+    omap44xx_cortex_m3_nvic_SYSTICK_CTRL_tickint_wrf(&nvic,1);
+    
+    //number of cycles between interrupts (only 24 bit value!)
+    omap44xx_cortex_m3_nvic_SYSTICK_RV_reload_wrf(&nvic, tick_cycles);
+}
+
+/*
+ * \brief start SysTick timer and set value to reload value
+ */
+void systick_start(void){
+    omap44xx_cortex_m3_nvic_SYSTICK_CRTV_wr(&nvic,0);//reset counter
+    omap44xx_cortex_m3_nvic_SYSTICK_CTRL_enable_wrf(&nvic,1);//run timer
+}
+
+/*
+ * \brief stop SysTick timer
+ */
+void systick_stop(void){
+    omap44xx_cortex_m3_nvic_SYSTICK_CTRL_enable_wrf(&nvic,0);//stop timer
+}
+
+
+//only here so I do not have to change pure debug code
+inline uint32_t tsc_read(void){
+    printf("tsc_read called, ignored\n");
+    return 0;
+}
+
+inline uint32_t tsc_get_hz(void){
+    printf("tsc_get_hz called, ignored\n");
+    return 0;
+}
diff --git a/kernel/arch/armv7-m/paging.c b/kernel/arch/armv7-m/paging.c
new file mode 100644 (file)
index 0000000..5eae51a
--- /dev/null
@@ -0,0 +1,687 @@
+/*
+ * Copyright (c) 2009 - 2012 ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#include <kernel.h>
+#include <dispatch.h>
+#include <paging_kernel_arch.h>
+#include <string.h>
+#include <exceptions.h>
+#include <arm_hal.h>
+#include <cap_predicates.h>
+#include <dispatch.h>
+#include <dev/omap/omap44xx_mmu_dev.h>
+#include <omap44xx_map.h>
+
+/**
+ * Kernel L1 page table
+ */
+//XXX: We reserve double the space needed to be able to align the pagetable
+//     to 16K after relocation
+static union arm_l1_entry kernel_l1_table[2*ARM_L1_MAX_ENTRIES]
+__attribute__((aligned(ARM_L1_ALIGN)));
+static union arm_l1_entry *aligned_kernel_l1_table;
+
+#if 0
+/**
+ * Kernel L2 page table for first MB
+ */
+//XXX: We reserve double the space needed to be able to align the pagetable
+//     to 1K after relocation
+static union arm_l2_entry low_l2_table[2*ARM_L2_MAX_ENTRIES]
+__attribute__((aligned(ARM_L2_ALIGN)));
+static union arm_l2_entry *aligned_low_l2_table;
+#endif // 0
+
+// ------------------------------------------------------------------------
+// Utility declarations
+
+inline static uintptr_t paging_round_down(uintptr_t address, uintptr_t size)
+{
+    return address & ~(size - 1);
+}
+
+inline static uintptr_t paging_round_up(uintptr_t address, uintptr_t size)
+{
+    return (address + size - 1) & ~(size - 1);
+}
+
+inline static int aligned(uintptr_t address, uintptr_t bytes)
+{
+    return (address & (bytes - 1)) == 0;
+}
+
+static inline struct cte *cte_for_cap(struct capability *cap)
+{
+    return (struct cte *) (cap - offsetof(struct cte, cap));
+}
+
+/*
+ * \brief read the version number of the table (called ignored3), to see if the upper half 
+ * of the table has been modified and needs to be replicated. 
+ * These version numbers are necessary for paging_context_switch, because
+ * the upper half of the table is expected to be preserved
+ */
+inline uint8_t read_table_version(union arm_l1_entry* ttb){
+    return ttb->section.ignored3;
+} 
+
+inline void write_table_version(union arm_l1_entry* ttb, uint8_t version){
+    ttb->section.ignored3 = version;
+}
+
+//call this whenever the upper half of a page table is modified
+inline void increase_table_version(union arm_l1_entry* ttb){
+    if (ttb == NULL){//read current TTB, so the caller does not have to
+    //assumes the virtual and physical address are the same (should always hold for kernel)
+        ttb = (union arm_l1_entry*) omap44xx_mmu_ttb_rd(&mmu);
+    } 
+    write_table_version(ttb, read_table_version(ttb)+1);
+}
+
+static void
+paging_write_l1_entry(uintptr_t ttbase, lvaddr_t va, union arm_l1_entry l1)
+{
+    union arm_l1_entry *l1_table;
+    if (ttbase == 0) {//read current TTB, so the caller does not have to
+        ttbase = omap44xx_mmu_ttb_rd(&mmu);
+    }
+    l1_table = (union arm_l1_entry *) ttbase;
+    l1_table[ARM_L1_OFFSET(va)] = l1;
+}
+// ------------------------------------------------------------------------
+// Exported functions
+
+
+void paging_map_kernel_section(uintptr_t ttbase, lvaddr_t va, lpaddr_t pa)
+{
+
+    union arm_l1_entry l1;
+
+    l1.raw = 0;
+    l1.section.type = L1_TYPE_SECTION_ENTRY;
+    //XXX: we currently ignore all paging flags
+    /*
+    l1.section.bufferable   = 1;
+    l1.section.cacheable    = 1;
+    l1.section.ap10         = 1;    // RW/NA
+    l1.section.ap2          = 0;
+    */
+    l1.section.base_address = pa >> 20u;
+
+    paging_write_l1_entry(ttbase, va, l1);
+}
+
+void paging_map_memory(uintptr_t ttbase, lpaddr_t paddr, size_t bytes)
+{
+    lpaddr_t pend  = paging_round_down(paddr + bytes, BYTES_PER_SECTION);
+    while (paddr < pend) {
+        paging_map_kernel_section(ttbase, paddr, paddr);
+        paddr += BYTES_PER_SECTION;
+    }
+}
+
+void
+paging_map_device_section(uintptr_t ttbase, lvaddr_t va, lpaddr_t pa);
+
+void
+paging_map_device_section(uintptr_t ttbase, lvaddr_t va, lpaddr_t pa)
+{
+    union arm_l1_entry l1;
+
+    l1.raw = 0;
+    l1.section.type = L1_TYPE_SECTION_ENTRY;
+    //XXX: we currently ignore all paging flags
+    /*
+    l1.section.bufferable   = 0;
+    l1.section.cacheable    = 0;
+    l1.section.ap10         = 3; // prev value: 3 // RW/NA RW/RW
+    l1.section.ap2        = 0;
+    */
+    l1.section.base_address = pa >> 20u;
+
+    paging_write_l1_entry(ttbase, va, l1);
+}
+
+lvaddr_t paging_map_device(lpaddr_t device_base, size_t device_bytes)
+{
+    // HACK to put device in high memory.
+    // Should likely track these allocations.
+    static lvaddr_t dev_alloc = DEVICE_OFFSET;
+    assert(device_bytes <= BYTES_PER_SECTION);
+    dev_alloc -= BYTES_PER_SECTION;
+
+    //printf("paging_map_device_section: 0x%"PRIxLVADDR", 0x%"PRIxLVADDR", "
+    //        "0x%"PRIxLPADDR".\n",
+    //        (uintptr_t)aligned_kernel_l1_table, dev_alloc, device_base);
+
+    paging_map_device_section((uintptr_t)aligned_kernel_l1_table, dev_alloc,
+            device_base);
+
+    increase_table_version(NULL);
+    
+    //do_full_tlb_flush();//probably not needed: the newly mapped address should not have 
+    //ever been accessed before
+
+    return dev_alloc;
+}
+
+/**
+ * \brief Reset kernel paging.
+ *
+ * This function resets the page maps for kernel and memory-space. It clears out
+ * all other mappings. Use this only at system bootup!
+ */
+void paging_arm_reset(lpaddr_t paddr, size_t bytes)
+{
+    omap44xx_mmu_initialize(&mmu, (mackerel_addr_t) OMAP44XX_MAP_M3_L2MMU);//base address for mmu
+    printf("MMU revision: 0x%x\n", omap44xx_mmu_revision_rd(&mmu));
+    // make sure kernel pagetable is aligned to 16K after relocation
+    aligned_kernel_l1_table = (union arm_l1_entry *)ROUND_UP(
+            (uintptr_t)kernel_l1_table, ARM_L1_ALIGN);
+    // Re-map physical memory
+    //
+    paging_map_memory((uintptr_t)aligned_kernel_l1_table , paddr, bytes);
+
+    //because the kernel is mapped 1:1 (virtual = physical address),
+    //we can write the TTB easily
+    omap44xx_mmu_ttb_wr(&mmu, (uint32_t) aligned_kernel_l1_table);
+
+    omap44xx_mmu_cntl_tlw_wrf(&mmu, 1);
+    printf("Table walking enabled.\n");
+    
+    increase_table_version(NULL);//mark current table as changed
+    
+    //make all MMU errors to bus faults
+    omap44xx_mmu_gp_reg_bus_error_back_en_wrf(&mmu, 1);
+    
+    //now that we can use the page table, we can map devices into high memory
+    //sadly, the MMU can not be remapped, as it is accessed by its virtual(!) address
+    
+    //XXX: DO NOT INVALIDATE TLB YET, because devices like serial are not remapped yet
+   //do not even set the lock basevalue down yet
+}
+
+
+/*
+ * Describe me
+ */
+//If we use table versioning, then this seems superfluous -> ignore for the moment
+void paging_make_good(lvaddr_t new_table_base, size_t new_table_bytes)
+{
+printf("paging_make_good called, ignored.\n");
+    assert(new_table_base >= MEMORY_OFFSET);
+    assert(new_table_bytes == ARM_L1_ALIGN);
+    assert(aligned(new_table_base, ARM_L1_ALIGN));
+/*
+    lvaddr_t ttbr = local_phys_to_mem(omap44xx_mmu_ttb_rd(&mmu));
+    size_t st = (MEMORY_OFFSET / ARM_L1_SECTION_BYTES) * ARM_L1_BYTES_PER_ENTRY;
+
+    // Copy kernel pages (everything from MEMORY_OFFSET upwards)
+    memcpy((void*)new_table_base + st, (void*)ttbr + st,
+           ARM_L1_MAX_ENTRIES * ARM_L1_BYTES_PER_ENTRY - st);
+*/
+}
+
+void paging_map_user_pages_l1(lvaddr_t table_base, lvaddr_t va, lpaddr_t pa)
+{
+    assert(aligned(table_base, ARM_L1_ALIGN));
+    assert(aligned(pa, BYTES_PER_SMALL_PAGE));
+
+    union arm_l1_entry e;
+
+    e.raw                 = 0;
+    e.page_table.type         = L1_TYPE_PAGE_TABLE_ENTRY;
+//    e.page_table.domain       = 0;//there is no such thing in armv7-m L2 MMU
+    e.page_table.base_address = (pa >> 10);
+
+    paging_write_l1_entry(table_base, va, e);
+}
+
+void paging_set_l2_entry(uintptr_t* l2e, lpaddr_t addr, uintptr_t flags)
+{
+//    assert(0 == (flags & 0xfffff000));
+//    assert(0 == (flags & 0x3));
+    assert(0 == (addr & 0xfff));
+
+    union arm_l2_entry e;
+//    e.raw = flags;
+
+    e.small_page.type = L2_TYPE_SMALL_PAGE;
+    e.small_page.base_address = (addr >> 12);
+
+    *l2e = e.raw;
+}
+
+void paging_context_switch(lpaddr_t ttbr)
+{
+//    printf("paging context switch to %"PRIxLPADDR"\n", ttbr);
+    lpaddr_t old_ttbr = omap44xx_mmu_ttb_rd(&mmu);
+    if (ttbr != old_ttbr) {
+        //we currently do table versioning, to make sure we always have the most recent
+        //kernel region mapping
+        uint8_t version_current = read_table_version((union arm_l1_entry*) old_ttbr);
+        uint8_t version_replacement = read_table_version((union arm_l1_entry*) ttbr);
+
+        if (version_current > version_replacement){
+        //the kernel part of the new table is not up to date -> replicate
+        printf("current TTB version: %hhd, replacement version %hhd \n", version_current, version_replacement);
+            printf("replicating upper half of page table\n");
+            //something in the upper half of the page table has changed -> copy changes
+            //copy full upper half of page table. this is very coarse grained, but should be enough for the moment
+            memcpy((void*)ttbr+(ARM_L1_BYTES_PER_ENTRY*ARM_L1_MAX_ENTRIES/2), 
+                (void*)old_ttbr+(ARM_L1_BYTES_PER_ENTRY*ARM_L1_MAX_ENTRIES/2),
+                (ARM_L1_BYTES_PER_ENTRY*ARM_L1_MAX_ENTRIES/2));
+            write_table_version((union arm_l1_entry*) ttbr, version_current);
+        }
+        omap44xx_mmu_ttb_wr(&mmu, ttbr);
+        //XXX: cachemarker: flush cache here (including cache MMU)
+        do_full_tlb_flush();
+    }
+}
+
+static void
+paging_set_flags(union arm_l2_entry *entry, uintptr_t kpi_paging_flags)
+{
+//XXX: we currently ignore all flags - we would need the cache MMU to enforce them,
+// and the L2 MMU completely ignores them
+/*
+        entry->small_page.bufferable = 1;
+        entry->small_page.cacheable =
+            (kpi_paging_flags & KPI_PAGING_FLAGS_NOCACHE) ? 0 : 1;
+        entry->small_page.ap10  =
+            (kpi_paging_flags & KPI_PAGING_FLAGS_READ)  ? 2 : 0;
+        entry->small_page.ap10 |=
+            (kpi_paging_flags & KPI_PAGING_FLAGS_WRITE) ? 3 : 0;
+        entry->small_page.ap2 = 0;
+*/
+}
+
+static errval_t
+caps_map_l1(struct capability* dest,
+            cslot_t            slot,
+            struct capability* src,
+            uintptr_t          kpi_paging_flags,
+            uintptr_t          offset,
+            uintptr_t          pte_count)
+{
+    //
+    // Note:
+    //
+    // We have chicken-and-egg problem in initializing resources so
+    // instead of treating an L2 table it's actual 1K size, we treat
+    // it as being 4K. As a result when we map an "L2" table we actually
+    // map a page of memory as if it is 4 consecutive L2 tables.
+    //
+    // See lib/barrelfish/arch/arm/pmap_arch.c for more discussion.
+    //
+    const int ARM_L1_SCALE = 4;
+
+    if (slot >= 1024) {
+        printf("slot = %"PRIuCSLOT"\n",slot);
+        panic("oops: slot id >= 1024");
+        return SYS_ERR_VNODE_SLOT_INVALID;
+    }
+
+    if (pte_count != 1) {
+        printf("pte_count = %zu\n",(size_t)pte_count);
+        panic("oops: pte_count");
+        return SYS_ERR_VM_MAP_SIZE;
+    }
+
+    if (src->type != ObjType_VNode_ARM_l2) {
+        panic("oops: wrong src type");
+        return SYS_ERR_WRONG_MAPPING;
+    }
+
+    if (slot >= ARM_L1_OFFSET(MEMORY_OFFSET) / ARM_L1_SCALE) {
+        printf("slot = %"PRIuCSLOT"\n",slot);
+        panic("oops: slot id");
+        return SYS_ERR_VNODE_SLOT_RESERVED;
+    }
+
+    // Destination
+    lpaddr_t dest_lpaddr = gen_phys_to_local_phys(get_address(dest));
+    lvaddr_t dest_lvaddr = local_phys_to_mem(dest_lpaddr);
+
+    union arm_l1_entry* entry = (union arm_l1_entry*)dest_lvaddr + (slot * ARM_L1_SCALE);
+
+    // Source
+    genpaddr_t src_gpaddr = get_address(src);
+    lpaddr_t   src_lpaddr = gen_phys_to_local_phys(src_gpaddr);
+
+    assert(offset == 0);
+    assert(aligned(src_lpaddr, 1u << 10));
+    assert((src_lpaddr < dest_lpaddr) || (src_lpaddr >= dest_lpaddr + 16384));
+
+    struct cte *src_cte = cte_for_cap(src);
+    src_cte->mapping_info.pte_count = pte_count;
+    src_cte->mapping_info.pte = dest_lpaddr + (slot * ARM_L1_SCALE);
+    src_cte->mapping_info.offset = 0;
+
+    for (int i = 0; i < 4; i++, entry++)
+    {
+        entry->raw = 0;
+        entry->page_table.type   = L1_TYPE_PAGE_TABLE_ENTRY;
+//        entry->page_table.domain = 0;//no such thing on ermv7-m
+        entry->page_table.base_address =
+            (src_lpaddr + i * BASE_PAGE_SIZE / ARM_L1_SCALE) >> 10;
+        debug(SUBSYS_PAGING, "L1 mapping %"PRIuCSLOT". @%p = %08"PRIx32"\n",
+              slot * ARM_L1_SCALE + i, entry, entry->raw);
+    }
+
+    do_full_tlb_flush();
+
+    return SYS_ERR_OK;
+}
+
+static errval_t
+caps_map_l2(struct capability* dest,
+            cslot_t            slot,
+            struct capability* src,
+            uintptr_t          kpi_paging_flags,
+            uintptr_t          offset,
+            uintptr_t          pte_count)
+{
+    assert(0 == (kpi_paging_flags & ~KPI_PAGING_FLAGS_MASK));
+
+    // ARM L2 has 256 entries, but we treat a 4K page as a consecutive
+    // region of L2 with a single index. 4K == 4 * 1K
+    if (slot >= (256 * 4)) {
+        panic("oops: slot >= (256 * 4)");
+        return SYS_ERR_VNODE_SLOT_INVALID;
+    }
+
+    if (src->type != ObjType_Frame && src->type != ObjType_DevFrame) {
+        panic("oops: src->type != ObjType_Frame && src->type != ObjType_DevFrame");
+        return SYS_ERR_WRONG_MAPPING;
+    }
+
+    // check offset within frame
+    if ((offset + BYTES_PER_PAGE > get_size(src)) ||
+        ((offset % BYTES_PER_PAGE) != 0)) {
+        panic("oops: frame offset invalid");
+        return SYS_ERR_FRAME_OFFSET_INVALID;
+    }
+
+    // check mapping does not overlap leaf page table
+    if (slot + pte_count > (256 * 4)) {
+        return SYS_ERR_VM_MAP_SIZE;
+    }
+
+    // Destination
+    lpaddr_t dest_lpaddr = gen_phys_to_local_phys(get_address(dest));
+    lvaddr_t dest_lvaddr = local_phys_to_mem(dest_lpaddr);
+
+    union arm_l2_entry* entry = (union arm_l2_entry*)dest_lvaddr + slot;
+    if (entry->small_page.type != L2_TYPE_INVALID_PAGE) {
+        panic("Remapping valid page.");
+    }
+
+    lpaddr_t src_lpaddr = gen_phys_to_local_phys(get_address(src) + offset);
+    if ((src_lpaddr & (BASE_PAGE_SIZE - 1))) {
+        panic("Invalid target");
+    }
+
+    struct cte *src_cte = cte_for_cap(src);
+    src_cte->mapping_info.pte_count = pte_count;
+    src_cte->mapping_info.pte = dest_lpaddr;
+    src_cte->mapping_info.offset = offset;
+
+    for (int i = 0; i < pte_count; i++) {
+        entry->raw = 0;
+
+        entry->small_page.type = L2_TYPE_SMALL_PAGE;
+        paging_set_flags(entry, kpi_paging_flags);
+        entry->small_page.base_address = (src_lpaddr + i * BYTES_PER_PAGE) >> 12;
+
+        entry++;
+
+        debug(SUBSYS_PAGING, "L2 mapping %08"PRIxLVADDR"[%"PRIuCSLOT"] @%p = %08"PRIx32"\n",
+               dest_lvaddr, slot, entry, entry->raw);
+    }
+
+    // Flush TLB if remapping.
+    do_full_tlb_flush();
+
+    return SYS_ERR_OK;
+}
+
+/// Create page mappings
+errval_t caps_copy_to_vnode(struct cte *dest_vnode_cte, cslot_t dest_slot,
+                            struct cte *src_cte, uintptr_t flags,
+                            uintptr_t offset, uintptr_t pte_count)
+{
+    struct capability *src_cap  = &src_cte->cap;
+    struct capability *dest_cap = &dest_vnode_cte->cap;
+
+    if (src_cte->mapping_info.pte) {
+        return SYS_ERR_VM_ALREADY_MAPPED;
+    }
+
+    if (ObjType_VNode_ARM_l1 == dest_cap->type) {
+        //printf("caps_map_l1: %zu\n", (size_t)pte_count);
+        return caps_map_l1(dest_cap, dest_slot, src_cap,
+                           flags,
+                           offset,
+                           pte_count
+                          );
+    }
+    else if (ObjType_VNode_ARM_l2 == dest_cap->type) {
+        //printf("caps_map_l2: %zu\n", (size_t)pte_count);
+        return caps_map_l2(dest_cap, dest_slot, src_cap,
+                           flags,
+                           offset,
+                           pte_count
+                          );
+    }
+    else {
+        panic("ObjType not VNode");
+    }
+}
+
+size_t do_unmap(lvaddr_t pt, cslot_t slot, size_t num_pages)
+{
+    size_t unmapped_pages = 0;
+    union arm_l2_entry *ptentry = (union arm_l2_entry *)pt + slot;
+    for (int i = 0; i < num_pages; i++) {
+        ptentry++->raw = 0;
+        unmapped_pages++;
+    }
+    return unmapped_pages;
+}
+
+static inline void read_pt_entry(struct capability *pgtable, size_t slot, genpaddr_t *paddr)
+{
+    assert(type_is_vnode(pgtable->type));
+    assert(paddr);
+
+    genpaddr_t gp = get_address(pgtable);
+    lpaddr_t lp = gen_phys_to_local_phys(gp);
+    lvaddr_t lv = local_phys_to_mem(lp);
+
+    switch (pgtable->type) {
+        case ObjType_VNode_ARM_l1:
+        {
+            union arm_l1_entry *e = (union arm_l1_entry*)lv;
+            *paddr = (genpaddr_t)(e->page_table.base_address) << 10;
+            return;
+        }
+        case ObjType_VNode_ARM_l2:
+        {
+            union arm_l2_entry *e = (union arm_l2_entry*)lv;
+            *paddr = (genpaddr_t)(e->small_page.base_address) << 12;
+            return;
+        }
+        default:
+            assert(!"Should not get here");
+    }
+}
+
+errval_t page_mappings_unmap(struct capability *pgtable, struct cte *mapping, size_t slot, size_t num_pages)
+{
+    assert(type_is_vnode(pgtable->type));
+    //printf("page_mappings_unmap(%zd pages, slot = %zd)\n", num_pages, slot);
+
+    // get page table entry data
+    genpaddr_t paddr;
+    //lpaddr_t pte;
+    read_pt_entry(pgtable, slot, &paddr);
+    lvaddr_t pt = local_phys_to_mem(gen_phys_to_local_phys(get_address(pgtable)));
+
+    // get virtual address of first page
+    // TODO: error checking
+    genvaddr_t vaddr;
+    struct cte *leaf_pt = cte_for_cap(pgtable);
+    compile_vaddr(leaf_pt, slot, &vaddr);
+    //genvaddr_t vend = vaddr + num_pages * BASE_PAGE_SIZE;
+    // printf("vaddr = 0x%"PRIxGENVADDR"\n", vaddr);
+    // printf("num_pages = %zu\n", num_pages);
+
+    // get cap for mapping
+    /*
+    struct cte *mem;
+    errval_t err = lookup_cap_for_mapping(paddr, pte, &mem);
+    if (err_is_fail(err)) {
+        printf("page_mappings_unmap: %ld\n", err);
+        return err;
+    }
+    */
+    //printf("state before unmap: mapped_pages = %zd\n", mem->mapping_info.mapped_pages);
+    //printf("state before unmap: num_pages    = %zd\n", num_pages);
+
+    if (num_pages != mapping->mapping_info.pte_count) {
+        printf("num_pages = %zu, mapping = %zu\n", num_pages, mapping->mapping_info.pte_count);
+        // want to unmap a different amount of pages than was mapped
+        return SYS_ERR_VM_MAP_SIZE;
+    }
+
+    do_unmap(pt, slot, num_pages);
+
+    // flush TLB for unmapped pages
+    // TODO: selective TLB flush
+    do_full_tlb_flush();
+
+    // update mapping info
+    memset(&mapping->mapping_info, 0, sizeof(struct mapping_info));
+
+    return SYS_ERR_OK;
+}
+
+errval_t paging_modify_flags(struct capability *frame, uintptr_t offset,
+                             uintptr_t pages, uintptr_t kpi_paging_flags)
+{
+    // we currently ignore all permission flags
+//    assert(0 == (kpi_paging_flags & ~KPI_PAGING_FLAGS_MASK));
+
+    struct cte *mapping = cte_for_cap(frame);
+    struct mapping_info *info = &mapping->mapping_info;
+
+    /* Calculate location of page table entries we need to modify */
+    lvaddr_t base = info->pte + offset;
+
+    for (int i = 0; i < pages; i++) {
+        union arm_l2_entry *entry =
+            (union arm_l2_entry *)base + i;
+        paging_set_flags(entry, kpi_paging_flags);
+    }
+
+    return SYS_ERR_OK;
+}
+
+void paging_dump_tables(struct dcb *dispatcher)
+{
+    printf("dump_hw_page_tables\n");
+    lvaddr_t l1 = local_phys_to_mem(dispatcher->vspace);
+
+    for (int l1_index = 0; l1_index < ARM_L1_MAX_ENTRIES; l1_index++) {
+        // get level2 table
+        union arm_l1_entry *l1_e = (union arm_l1_entry *)l1 + l1_index;
+        if (!l1_e->raw) { continue; }
+        genpaddr_t ptable_gp = (genpaddr_t)(l1_e->page_table.base_address) << 10;
+        lvaddr_t ptable_lv = local_phys_to_mem(gen_phys_to_local_phys(ptable_gp));
+
+        for (int entry = 0; entry < ARM_L2_MAX_ENTRIES; entry++) {
+            union arm_l2_entry *e =
+                (union arm_l2_entry *)ptable_lv + entry;
+            genpaddr_t paddr = (genpaddr_t)(e->small_page.base_address) << BASE_PAGE_BITS;
+            if (!paddr) {
+                continue;
+            }
+            printf("%d.%d: 0x%"PRIxGENPADDR"\n", l1_index, entry, paddr);
+        }
+    }
+}
+
+/*
+ * Cortex-M3 on pandaboard specific stuff
+ * since the M3 memory model on the pandaboard is a bit weird, we need a few additional functions
+ * some of these would be provided by a cp15, but we don't have one
+ */
+
+void do_one_tlb_flush(genvaddr_t vaddr){
+    omap44xx_mmu_cam_virtual_tag_wrf(&mmu, vaddr>>12);
+    omap44xx_mmu_flush_entry_flush_entry_wrf(&mmu,1);//flush all entries corresponding to CAM
+}
+
+void do_full_tlb_flush(void){
+    omap44xx_mmu_gflush_global_flush_wrf(&mmu,1);//flushes all non-protected entries
+}
+
+
+/*
+ * \brief provided here so we can use it in init.c, after we mapped all devices
+ */
+inline void set_tlb_lock_basevalue(uint8_t basevalue){
+    omap44xx_mmu_lock_basevalue_wrf(&mmu, basevalue);
+    printf("set TLB lock base to %hhd\n", basevalue);
+}
+
+/*
+ * \brief add another (protected) entry into L2 TLB
+ * size: 0 -> section (1MB)
+ *       1 -> large page (64KB)
+ *       2 -> small page (4KB)
+ *       3 -> supersection (16MB)
+ *
+ */
+void add_tlb_mapping(lvaddr_t vaddr, lpaddr_t paddr, bool preserved, uint8_t size){
+    uint8_t lockbase = omap44xx_mmu_lock_basevalue_rdf(&mmu);
+//    printf("add_tlb_mapping: lockbase: %hhd\n", lockbase);
+    
+    omap44xx_mmu_cam_virtual_tag_wrf(&mmu, vaddr>>12);
+    omap44xx_mmu_cam_preserved_wrf(&mmu, preserved);
+    omap44xx_mmu_cam_page_size_wrf(&mmu, (omap44xx_mmu_page_size_t) size);
+    omap44xx_mmu_cam_valid_wrf(&mmu, 1);
+    
+    omap44xx_mmu_ram_physical_address_wrf(&mmu, paddr>>12);
+    omap44xx_mmu_ram_endianness_wrf(&mmu, (omap44xx_mmu_page_endianness_t) 0);
+    omap44xx_mmu_ram_element_size_wrf(&mmu, (omap44xx_mmu_page_element_size_t) 3);
+    
+    printf("flushing all previous mappings for virtual address 0x%x\n", (uint32_t) vaddr);
+    omap44xx_mmu_flush_entry_flush_entry_wrf(&mmu,1);//flush all entries corresponding to CAM
+    
+    uint32_t cam_temp = omap44xx_mmu_cam_rd(&mmu);//temporarily store the CAM we later want to write
+    //flush all entries corresponding to larger regions containing ours
+    omap44xx_mmu_cam_virtual_tag_wrf(&mmu, (vaddr & (~LARGE_PAGE_MASK))>>12);
+    omap44xx_mmu_flush_entry_flush_entry_wrf(&mmu,1);//flush all entries corresponding to CAM
+    omap44xx_mmu_cam_virtual_tag_wrf(&mmu, (vaddr & (~ARM_L1_SECTION_MASK))>>12);
+    omap44xx_mmu_flush_entry_flush_entry_wrf(&mmu,1);//flush all entries corresponding to CAM
+    omap44xx_mmu_cam_virtual_tag_wrf(&mmu, (vaddr & (~ARM_L1_SUPERSECTION_MASK))>>12);
+    omap44xx_mmu_flush_entry_flush_entry_wrf(&mmu,1);//flush all entries corresponding to CAM
+    
+
+    omap44xx_mmu_cam_wr(&mmu, cam_temp);
+    
+    //make sure we overwrite the next contiguous entry
+    omap44xx_mmu_lock_current_victim_wrf(&mmu, lockbase);
+
+    omap44xx_mmu_ld_tlb_wr(&mmu, (omap44xx_mmu_ld_tlb_t) 1);//actually load entry into TLB
+    printf("TLB entry overwritten\n");
+    omap44xx_mmu_lock_basevalue_wrf(&mmu, lockbase + 1);//lock our new entry
+}
index f7e6797..64fd7ca 100644 (file)
 extern void dbg_break(void);
 void dbg_break(void)
 {
+#ifndef __thumb__ 
     __asm("bkpt #0xffff");
+#else
+    //heteropanda: smaller breakpoint immediate in pure thumb2
+    __asm("bkpt #0xff");
+#endif
 }
 
 extern void arch_benchmarks(void);
index 7d2ed5f..bac768b 100644 (file)
@@ -15,7 +15,7 @@
 \r
 #include <serial.h>\r
 #include <kputchar.h>\r
-#include <arch/armv7/global.h>\r
+#include <global.h>\r
 \r
 #ifdef __pandaboard__   //use the spinlock module\r
 #include <spinlock.h>\r
index ac8afd2..4fa5034 100644 (file)
@@ -40,6 +40,9 @@ void sys_syscall_kernel(void)
 struct sysret sys_monitor_spawn_core(coreid_t core_id, enum cpu_type cpu_type,
                                      genvaddr_t entry)
 {
+#ifdef __ARM_ARCH_7M__
+printf("armv7-m can not spawn new cores yet");
+#else
        int r;
        switch(cpu_type) {
        case CPU_ARM:
@@ -54,7 +57,7 @@ struct sysret sys_monitor_spawn_core(coreid_t core_id, enum cpu_type cpu_type,
         return SYSRET(SYS_ERR_CORE_NOT_FOUND);
         break;
        }
-
+#endif //defined(__ARM_ARCH_7M__)
     return SYSRET(SYS_ERR_OK);
 }
 
@@ -467,19 +470,30 @@ static struct sysret handle_irq_table_set( struct capability* to,
         int argc
         )
 {
+#ifdef __ARM_ARCH_7M__
+    printf("armv7-m can not handle userspace IRQs yet\n");
+    return SYSRET(SYS_ERR_IRQ_INVALID);
+#else
     struct registers_arm_syscall_args* sa = &context->syscall_args;
 
     return SYSRET(irq_table_set(sa->arg2, sa->arg3));
+#endif
 }
 
+
 static struct sysret handle_irq_table_delete( struct capability* to,
         arch_registers_state_t* context,
         int argc
         )
 {
+#ifdef __ARM_ARCH_7M__
+    printf("armv7-m can not handle userspace IRQs yet\n");
+    return SYSRET(SYS_ERR_IRQ_INVALID);
+#else
     struct registers_arm_syscall_args* sa = &context->syscall_args;
 
     return SYSRET(irq_table_delete(sa->arg2));
+#endif
 }
 
 
index cec0ef3..5e921f0 100644 (file)
 #include <arm_hal.h>
 #include <paging_kernel_arch.h>
 #include <exceptions.h>
-#include <cp15.h>
 #include <cpiobin.h>
 #include <init.h>
-#include <barrelfish_kpi/paging_arm_v7.h>
+#include <barrelfish_kpi/paging_arch.h>
 #include <arm_core_data.h>
 #include <kernel_multiboot.h>
 #include <offsets.h>
 #define STARTUP_PROGRESS()      debug(SUBSYS_STARTUP, "%s:%d\n",          \
                                       __FUNCTION__, __LINE__);
 
+#ifdef __ARM_ARCH_7M__//armv7-M : cortex-M3 processor on pandaboard
+#define BSP_INIT_MODULE_NAME    "armv7-m/sbin/init"
+#define APP_INIT_MODULE_NAME   "armv7-m/sbin/monitor"
+#else//"normal" armv7-A
 #define BSP_INIT_MODULE_NAME    "armv7/sbin/init"
 #define APP_INIT_MODULE_NAME   "armv7/sbin/monitor"
-
+#endif
 
 
 //static phys_mmap_t* g_phys_mmap;        // Physical memory map
@@ -166,6 +169,10 @@ spawn_init_map(union arm_l2_entry* l2_table,
 
 static uint32_t elf_to_l2_flags(uint32_t eflags)
 {
+#ifdef __ARM_ARCH_7M__//the cortex-m3 does not actually understand these flags yet
+//XXX: if we ever allow all these flags, then remove the ifdef again
+    return 0; 
+#else   //normal case, __ARM_ARCH_7A__
     switch (eflags & (PF_W|PF_R))
     {
       case PF_W|PF_R:
@@ -179,6 +186,7 @@ static uint32_t elf_to_l2_flags(uint32_t eflags)
       default:
         panic("Unknown ELF flags combination.");
     }
+#endif
 }
 
 struct startup_l2_info
@@ -558,7 +566,9 @@ static struct dcb *spawn_init_common(const char *name,
     disp->udisp = INIT_DISPATCHER_VBASE;
 
     disp_arm->enabled_save_area.named.r0   = paramaddr;
+#ifndef __ARM_ARCH_7M__ //the armv7-m profile does not have such a mode field
     disp_arm->enabled_save_area.named.cpsr = ARM_MODE_USR | CPSR_F_MASK;
+#endif
     disp_arm->enabled_save_area.named.rtls = INIT_DISPATCHER_VBASE;
     disp_arm->disabled_save_area.named.rtls = INIT_DISPATCHER_VBASE;
 
@@ -602,7 +612,9 @@ struct dcb *spawn_bsp_init(const char *name, alloc_phys_func alloc_phys)
     disp_arm->got_base = got_base;
 
     disp_arm->disabled_save_area.named.pc   = init_ep;
+#ifndef __ARM_ARCH_7M__ //the armv7-m profile does not have such a mode field
     disp_arm->disabled_save_area.named.cpsr = ARM_MODE_USR | CPSR_F_MASK;
+#endif
     disp_arm->disabled_save_area.named.r10  = got_base;
 
     /* Create caps for init to use */
@@ -689,7 +701,9 @@ struct dcb *spawn_app_init(struct arm_core_data *core_data,
     disp_arm->got_base = got_base;
 
     disp_arm->disabled_save_area.named.pc   = entry_point;
+#ifndef __ARM_ARCH_7M__ //the armv7-m profile does not have such a mode field
     disp_arm->disabled_save_area.named.cpsr = ARM_MODE_USR | CPSR_F_MASK;
+#endif
     disp_arm->disabled_save_area.named.r10  = got_base;
     //disp_arm->disabled_save_area.named.rtls = INIT_DISPATCHER_VBASE;
 
@@ -724,8 +738,10 @@ void arm_kernel_startup(void)
 
         init_dcb = spawn_app_init(glbl_core_data, APP_INIT_MODULE_NAME, app_alloc_phys);
 
+#ifndef __ARM_ARCH_7M__ //armv7-m does not use a gic and can not acknowledge interrupts
        uint32_t irq = gic_get_active_irq();
        gic_ack_irq(irq);
+#endif
     }
 
     /* printf("Trying to enable interrupts\n"); */
diff --git a/kernel/include/arch/armv7-m/arch_gdb_stub.h b/kernel/include/arch/armv7-m/arch_gdb_stub.h
new file mode 100644 (file)
index 0000000..7045d50
--- /dev/null
@@ -0,0 +1,23 @@
+/**
+ * \file
+ * \brief Header for ARMv5-specific GDB stub code.
+ */
+
+/*
+ * Copyright (c) 2007, 2008, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#include <arm.h>
+
+extern uintptr_t *gdb_arch_registers;
+
+/** Address of saved registers as void * */
+#define GDB_ARCH_REGADDR    ((void*)gdb_arch_registers)
+
+/** Number of bytes saved in GDB frame */
+#define GDB_ARCH_REGBYTES   (sizeof(uintptr_t) * ARCH_NUMREGS)
diff --git a/kernel/include/arch/armv7-m/arm.h b/kernel/include/arch/armv7-m/arm.h
new file mode 100644 (file)
index 0000000..313e2c7
--- /dev/null
@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) 2007, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef __ARM_H
+#define __ARM_H
+
+#include <barrelfish_kpi/types.h>
+#include <barrelfish_kpi/cpu.h>
+
+#endif //__ARM_H
diff --git a/kernel/include/arch/armv7-m/arm_core_data.h b/kernel/include/arch/armv7-m/arm_core_data.h
new file mode 100644 (file)
index 0000000..9738bad
--- /dev/null
@@ -0,0 +1,76 @@
+/**
+ * \file
+ * \brief Data sent to a newly booted x86 kernel
+ */
+
+/*
+ * Copyright (c) 2012, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef COREDATA_H
+#define COREDATA_H
+
+struct arm_coredata_modinfo {
+    uint32_t    mod_start;
+    uint32_t    mod_end;
+    uint32_t    string;
+    uint32_t    reserved;
+};
+
+struct arm_coredata_mmap {
+    uint32_t    size;
+    uint64_t    base_addr;
+    uint64_t    length;
+    uint32_t    type;
+} __attribute__ ((packed));
+
+struct arm_coredata_elf {
+    uint32_t    num;
+    uint32_t    size;
+    uint32_t    addr;
+    uint32_t    shndx;
+};
+
+/**
+ * \brief Data sent to a newly booted kernel
+ *
+ */
+struct arm_core_data {
+    uint32_t multiboot_flags; ///< The multiboot flags of the cpu module
+    struct arm_coredata_elf elf; ///< elf structure for the cpu module
+    uint32_t module_start;  ///< The start of the cpu module
+    uint32_t module_end;    ///< The end of the cpu module
+    uint32_t urpc_frame_base;
+    uint8_t urpc_frame_bits;
+    uint32_t monitor_binary;
+    uint32_t monitor_binary_size;
+    uint32_t memory_base_start;
+    uint8_t memory_bits;
+    coreid_t src_core_id;
+    uint8_t src_arch_id;
+    coreid_t dst_core_id;
+    char kernel_cmdline[128];
+
+    uint32_t    initrd_start;
+    uint32_t   initrd_size;
+
+    uint32_t    cmdline;
+    uint32_t    mods_count;
+    uint32_t    mods_addr;
+
+    uint32_t    mmap_length;
+    uint32_t    mmap_addr;
+
+    uint32_t    start_free_ram;
+
+    uint32_t    chan_id;
+
+}; //__attribute__ ((packed));
+
+
+#endif
diff --git a/kernel/include/arch/armv7-m/arm_hal.h b/kernel/include/arch/armv7-m/arm_hal.h
new file mode 100644 (file)
index 0000000..aa951f4
--- /dev/null
@@ -0,0 +1,76 @@
+/**
+ * \file
+ * \brief Hardware Abstraction Layer interface for ARMv7-M boards.
+ *
+ * This file defines the hardware abstraction layer for ARMv7-M targets. Each
+ * board is expected to have an implementation that corresponds to this
+ * interface.
+ *
+ * This interface is expected to change as new boards are added.
+ */
+
+/*
+ * Copyright (c) 2007, 2009, 2012, 2013 ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef __ARM_HAL_H__
+#define __ARM_HAL_H__
+
+#include <barrelfish_kpi/types.h>
+#include <dev/omap/omap44xx_cortex_m3_nvic_dev.h>
+
+/**
+ * @return Unique 32-bit identifier associated with current board.
+ */
+uint32_t hal_get_board_id(void);
+
+/**
+ * @return Current processor ordinal. Value has range 0 to n_cpus - 1.
+ */
+uint8_t  hal_get_cpu_id(void);
+
+/**
+ * @return true if current processor is bootstrap processor.
+ */
+bool     hal_cpu_is_bsp(void);
+
+/*
+ * nested vectored interrupt controller functionality 
+ * (note that in this model, all interrupts are to local cpu and do not need to be acknowledged)
+ */
+
+omap44xx_cortex_m3_nvic_t nvic;
+
+void     nvic_init(void);
+void     nvic_enable_interrupt(uint32_t int_id, uint16_t prio);
+void     nvic_disable_all_irqs(void);
+uint32_t nvic_get_active_irq(void);
+void     enable_all_system_interrupts(void);
+//void     nvic_raise_softirq(uint8_t irq);//not used in code yet -> since we can not interrupt other cores yet this is a bit pointless
+
+//TODO: heteropanda: not sure what the systick functions should be exactly
+//the pandaboard does not provide a reference to calculate hz, so we count actual cycles
+void     systick_init(uint32_t tick_cycles);
+void     systick_start(void);
+void     systick_stop(void);
+
+
+//XXX: does this even exist for the cortex-m3 ??
+void write_sysflags_reg(uint32_t regval);
+
+extern lpaddr_t sysflagset_base;
+
+
+//pure dummy functions - just so I do not have to do case distinctions in pure debug code
+uint32_t tsc_read(void);
+uint32_t tsc_get_hz(void);
+
+
+/* [2009-11-17 orion] TODO: device enumeration */
+
+#endif // __ARM_HAL_H__
diff --git a/kernel/include/arch/armv7-m/armv7_syscall.h b/kernel/include/arch/armv7-m/armv7_syscall.h
new file mode 100644 (file)
index 0000000..ea432ce
--- /dev/null
@@ -0,0 +1,23 @@
+/**
+ * \file
+ * \brief armv7-specific system calls implementation.
+ */
+
+/*
+ * Copyright (c) 2007, 2008, 2009, 2010, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef ARMV7_SYSCALL_H
+#define ARMV7_SYSCALL_H
+
+#include <capabilities.h>
+
+struct sysret sys_monitor_spawn_core(coreid_t core_id, enum cpu_type cpu_type,
+                                     genvaddr_t entry);
+
+#endif // ARMV7_SYSCALL_H
diff --git a/kernel/include/arch/armv7-m/cp15.h b/kernel/include/arch/armv7-m/cp15.h
new file mode 100644 (file)
index 0000000..2b9a531
--- /dev/null
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2009 ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef __CP15_H__
+#define __CP15_H__
+/* The cortex-M3 does not support cp15.
+ * Its functionality is instead implemented using devices
+ * This header file is only for compatibility with code that includes cp15.h
+ *      (and apparently never uses it...)
+ */
+
+#endif // __CP15_H__
diff --git a/kernel/include/arch/armv7-m/exceptions.h b/kernel/include/arch/armv7-m/exceptions.h
new file mode 100644 (file)
index 0000000..dd28f50
--- /dev/null
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2007, 2008, 2009, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef __EXCEPTIONS_H__
+#define __EXCEPTIONS_H__
+
+//these are the indexes for the vector table, multiply by 4 to get the offsets
+#define ARM_7M_EVECTOR_RESET        1
+#define ARM_7M_EVECTOR_NMI          2
+#define ARM_7M_EVECTOR_HARDFAULT    3
+#define ARM_7M_EVECTOR_MEM          4
+#define ARM_7M_EVECTOR_BUS          5
+#define ARM_7M_EVECTOR_USAGE        6
+#define ARM_7M_EVECTOR_SVCALL       11
+#define ARM_7M_EVECTOR_DEBUGMON     12
+#define ARM_7M_EVECTOR_PENDSV       14
+#define ARM_7M_EVECTOR_SYSTICK      15
+#define ARM_7M_EVECTOR_EXTERNAL     16
+
+
+#define CACHE_LINE_BYTES 32
+
+#if !defined(__ASSEMBLER__)
+
+void *vectortable;//address of vector table. (will also be mapped to virtual address 0)
+void *irq_save_pushed_area_top;//small area for threads that do not have a stack yet
+
+/**
+ * \brief Install and trigger special exception handler, that continues startup
+ */
+void exceptions_early_init(void);
+
+
+/**
+ * Install and enable exception vectors.
+ *
+ * This routine properly sets up the vector table, installing
+ * the basic exception handlers
+ */
+void exceptions_init(void);
+
+/**
+ * Handle page fault in user-mode process.
+ */
+void handle_user_page_fault(lvaddr_t                fault_address,
+                            arch_registers_state_t* saved_context)
+    __attribute__((noreturn));
+
+/**
+ * Handle undefined instruction fault in user-mode process.
+ */
+void handle_user_undef(lvaddr_t                fault_address,
+                       arch_registers_state_t* saved_context)
+    __attribute__((noreturn));
+
+/**
+ * Handle faults in occuring in a priviledged mode.
+ */
+void fatal_kernel_fault(uint32_t   evector,
+                        lvaddr_t   fault_address,
+                        arch_registers_state_t* saved_context)
+    __attribute__((noreturn));
+
+/**
+ * Handle any IRQ except system calls.
+ *
+ */
+void handle_irq(uint32_t irq, arch_registers_state_t* saved_context)
+    __attribute__((noreturn));
+
+#endif // !defined(__ASSEMBLER__)
+
+#endif // __EXCEPTIONS_H__
+
diff --git a/kernel/include/arch/armv7-m/global.h b/kernel/include/arch/armv7-m/global.h
new file mode 100644 (file)
index 0000000..2f6c918
--- /dev/null
@@ -0,0 +1,47 @@
+/**
+ * \file
+ * \brief A struct for all shared data between the kernels
+ */
+
+/*
+ * Copyright (c) 2008, 2010 ETH Zurich
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+
+#ifndef KERNEL_ARCH_ARM_GLOBAL_H
+#define KERNEL_ARCH_ARM_GLOBAL_H
+
+#include <barrelfish_kpi/spinlocks_arch.h>
+#include <barrelfish_kpi/types.h>
+
+/**
+ * \brief Struct passed to app_cores during boot.
+ * Contains information that the bsp_kernel wants to pass to the app_kernels.
+ */
+struct global {
+    /// Shared locks between the kernels
+    struct {
+        spinlock_t print;       ///< Lock for printing
+    } locks;
+
+    uint32_t tickspersec;
+
+    genpaddr_t notify[MAX_COREID];
+};
+
+extern struct global *global;
+
+#if defined(__gem5__)
+#define GLOBAL_VBASE   0x21000
+#elif defined(__pandaboard__)
+#define GLOBAL_VBASE   (GEN_ADDR(31) + 0x21000)
+#else
+#error "unknown armv7 platform"
+#endif
+
+#endif
diff --git a/kernel/include/arch/armv7-m/init.h b/kernel/include/arch/armv7-m/init.h
new file mode 100644 (file)
index 0000000..a6beabb
--- /dev/null
@@ -0,0 +1,37 @@
+/**
+ * \file
+ * \brief ARM architecture initialization
+ */
+
+/*
+ * Copyright (c) 2007, 2008, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef INIT_H
+#define INIT_H
+
+#ifndef __ASSEMBLER__
+
+struct atag;
+void arch_init(void *pointer)
+//void arch_init(uint32_t board_id, struct atag *atag_paddr,
+//                        lvaddr_t ttbase,
+//               lvaddr_t phys_alloc_top)
+    __attribute__((noreturn));
+
+//struct phys_mmap;
+void arm_kernel_startup(void)
+    __attribute__((noreturn));
+
+//continues text_init after the transition to handler mode
+void text_init_continued(void)
+    __attribute__((noreturn));
+
+#endif // __ASSEMBLER__
+
+#endif // INIT_H
diff --git a/kernel/include/arch/armv7-m/io.h b/kernel/include/arch/armv7-m/io.h
new file mode 100644 (file)
index 0000000..90eefc2
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ * io.h
+ *
+ * Copyright (c) 2012, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef IO_H_
+#define IO_H_
+
+static inline void writeb(unsigned char b, volatile void *addr)
+{
+       *(volatile unsigned char *) addr = b;
+}
+static inline void writew(unsigned short b, volatile void *addr)
+{
+       *(volatile unsigned short *) addr = b;
+}
+static inline void writel(uint32_t b, char *addr)
+{
+       *(volatile uint32_t *) addr = b;
+}
+static inline void writeq(unsigned int b, volatile void *addr)
+{
+       *(volatile unsigned long long *) addr = b;
+}
+
+#endif /* IO_H_ */
diff --git a/kernel/include/arch/armv7-m/irq.h b/kernel/include/arch/armv7-m/irq.h
new file mode 100644 (file)
index 0000000..74a6340
--- /dev/null
@@ -0,0 +1,21 @@
+/*
+ * Copyright (c) 2010, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef KERNEL_ARCH_ARM_IRQ_H
+#define KERNEL_ARCH_ARM_IRQ_H
+
+struct capability;
+struct idc_recv_msg;
+//struct sysret irq_table_set(struct capability *to, struct idc_recv_msg *msg);
+//struct sysret irq_table_delete(struct capability *to, struct idc_recv_msg *msg);
+errval_t irq_table_set(unsigned int nidt, capaddr_t endpoint);
+errval_t irq_table_delete(unsigned int nidt);
+void send_user_interrupt(int irq);
+
+#endif // KERNEL_ARCH_ARM_IRQ_H
diff --git a/kernel/include/arch/armv7-m/ixp2800_uart.h b/kernel/include/arch/armv7-m/ixp2800_uart.h
new file mode 100644 (file)
index 0000000..6575e5b
--- /dev/null
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2007, 2008, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef __IXP2800_UART_H__
+#define __IXP2800_UART_H__
+
+void ixp2800_uart_init(ixp2800_uart_t *uart, lvaddr_t base);
+void ixp2800_putchar(ixp2800_uart_t *uart, char c);
+char ixp2800_getchar(ixp2800_uart_t *uart);
+
+#endif // __IXP2800_UART_H__
diff --git a/kernel/include/arch/armv7-m/kernel_multiboot.h b/kernel/include/arch/armv7-m/kernel_multiboot.h
new file mode 100644 (file)
index 0000000..e82d4a7
--- /dev/null
@@ -0,0 +1,31 @@
+/**
+ * \file
+ * \brief Relay header for multiboot structures and kernel-specific
+ * function definitions.
+ */
+
+/*
+ * Copyright (c) 2007, 2008, 2009, 2010, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef KERNEL_MULTIBOOT_H
+#define KERNEL_MULTIBOOT_H
+
+#include <multiboot.h>
+
+/**
+ * Convert a 32bit address from the Multiboot header to a native virtual
+ * address as a char pointer.
+ */
+#define MBADDR_ASSTRING(vaddr)  (char * NTS)TC((uintptr_t)(local_phys_to_mem(vaddr)))
+
+void multiboot_info_print(struct multiboot_info *mb);
+struct multiboot_modinfo *multiboot_find_module(const char *basename);
+uintptr_t multiboot_end_addr(struct multiboot_info *mi);
+
+#endif
diff --git a/kernel/include/arch/armv7-m/kputchar.h b/kernel/include/arch/armv7-m/kputchar.h
new file mode 100644 (file)
index 0000000..f912ed8
--- /dev/null
@@ -0,0 +1,24 @@
+/**
+ * \file
+ * \brief A struct for all shared data between the kernels
+ */
+
+/*
+ * Copyright (c) 2008, 2010 ETH Zurich
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef KERNEL_ARCH_ARM_KPUTCHAR_H
+#define KERNEL_ARCH_ARM_KPUTCHAR_H
+
+#include <serial.h>
+
+void kprintf_begin(void);
+int kputchar(int c);
+void kprintf_end(void);
+
+#endif // KERNEL_ARCH_ARM_KPUTCHAR_H
diff --git a/kernel/include/arch/armv7-m/misc.h b/kernel/include/arch/armv7-m/misc.h
new file mode 100644 (file)
index 0000000..9a62c6f
--- /dev/null
@@ -0,0 +1,45 @@
+/* [2009-07-30 ohodson] TODO: implement! */
+
+/**
+ * \file
+ * \brief Miscellaneous architecture-specific functions
+ */
+
+/*
+ * Copyright (c) 2008, 2009, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef ARCH_MISC_H
+#define ARCH_MISC_H
+
+//
+// Helpers for pasting #defined values into inline assembler.
+//
+#define STR(x) #x
+#define XTR(x) STR(x)
+
+/**
+ * \brief Set thread-local-storage register.
+ */
+static inline void arch_set_thread_register(uintptr_t value)
+{
+    __asm (
+        "mov "XTR(THREAD_REGISTER)", %[value]" :: [value] "r" (value)
+          );
+}
+
+static inline uintptr_t arch_get_thread_register(void)
+{
+    uintptr_t result;
+    __asm (
+        "mov %[result]," XTR(THREAD_REGISTER) :  [result] "=r" (result)
+          );
+    return result;
+}
+
+#endif /* ARCH_MISC_H */
diff --git a/kernel/include/arch/armv7-m/offsets.h b/kernel/include/arch/armv7-m/offsets.h
new file mode 100644 (file)
index 0000000..c6efeb7
--- /dev/null
@@ -0,0 +1,224 @@
+/**
+ * \file
+ * \brief ARM address space sizes and offsets
+ *
+ * The layout of the ARM virtual address space can be summarized as
+ * follows:
+ *
+ *
+ * User-space maps user-space programs. Physical memory maps all
+ * available physical memory (up to PADDR_SPACE_LIMIT). Kernel-space
+ * maps only the kernel text and data.
+ *
+ * This partition is static and can only be changed at compile-time.
+ *
+ */
+
+/* [2009-07-30 ohodson]TODO: This is a first-cut, layout likely
+ * does not make sense.
+ */
+
+/*
+ * Copyright (c) 2007, 2008, 2009, 2012, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef OFFSETS_H
+#define OFFSETS_H
+
+#define GEN_ADDR(bits)          (((genpaddr_t)1) << bits)
+
+/**
+ * Absolute size of virtual address space. This is 32-bit on ARM.
+ */
+#define VADDR_SPACE_SIZE        GEN_ADDR(32);
+
+/**
+ * Absolute size of physical address space.
+ */
+#define PADDR_SPACE_SIZE        GEN_ADDR(32)
+
+/**
+ * Start address of kernel image in physical memory. This is passed to
+ * the linker also. This address is chosen to be the same as Linux on ARM
+ * for GEM5 and/or bootloader compatibility.
+ *
+ * Entry point is 0x11000.
+ *
+ */
+//#define START_KERNEL_PHYS       (0x10000 + 0x1000)
+#define START_KERNEL_PHYS              0x100000
+
+/**
+ * Physical address of the kernel stack at boot time.
+ */
+#define BOOT_STACK_PHYS         0x10000
+
+/**
+ * Kernel offset - virtual base of kernel.
+ */
+#define KERNEL_OFFSET           0xc0000000
+
+/**
+ * Maximum physical address space mappable by the kernel.  Adjust this
+ * for a bigger physical address space.  
+ */
+#define PADDR_SPACE_LIMIT       0xFFFFFFFF
+
+/**
+ * Kernel address space limit is 1 MB currently.
+ */
+#define KERNEL_SPACE_LIMIT      (1L << 20)
+
+/**
+ * Static address space limit for the init user-space domain. The
+ * static space is used to map in code and static data of the init
+ * module, as well as all loaded multiboot modules. init can freely
+ * allocate dynamic memory as soon as it is running. This is 32 MBytes
+ * right now.
+ *
+ * You should make this constant a multiple of #BASE_PAGE_SIZE *
+ * #PTABLE_SIZE or you'll restrict init's static address space
+ * unneccessarily. init's lowest segment should also be based at these
+ * multiples or it restricts itself.
+ *
+ *
+ * NB 32MB is size of the fast context switch extension
+ * per-process address space.
+ */
+#define INIT_SPACE_LIMIT        (32 * 1024 * 1024)
+
+/**
+ * Base address of init address space in virtual memory. init should
+ * start at 4 MByte. The kernel maps in important structures at 2
+ * MByte. This address should be page-table size aligned (i.e. with 4
+ * KByte pages, a page table maps 2 MBytes. Thus, align it to
+ * multiples of 2 MBytes).
+ */
+#define INIT_VBASE              (2 * 1024 * 1024)
+
+/**
+ * Initial amount of physical memory to map during bootup. The low
+ * 1MByte of memory is always expected to be there and has to be
+ * specified here at minimum. If you need more during bootup, increase
+ * this value. This value is also the amount of memory you _expect_ to
+ * be in the system during bootup, or the kernel will crash!
+ */
+#define KERNEL_INIT_MEMORY      (1 * 1024 * 1024)
+
+/**
+ * Absolute offset of mapped physical memory within virtual address
+ * space.  
+ *
+ * 2GB.
+ */
+#define MEMORY_OFFSET           GEN_ADDR(31)
+// 2G (2 ** 31)
+
+/**
+ * Absolute start of RAM in physical memory.
+ */
+#if defined(__gem5__)
+#define PHYS_MEMORY_START       0x0
+#elif defined(__pandaboard__)
+// 2G (2 ** 31)
+#define PHYS_MEMORY_START       GEN_ADDR(31)
+#else
+#error "unknown armv7 platform"
+#endif
+
+/*
+ * Device offset to map devices in high memory.
+ */
+#define DEVICE_OFFSET                  0xff000000
+
+/**
+ * Kernel stack size -- 16KB
+ */
+#define KERNEL_STACK_SIZE       0x4000
+
+/**
+ * The size of the whole kernel image.
+ */
+#define KERNEL_IMAGE_SIZE       (size_t)(&kernel_final_byte - &kernel_first_byte)
+
+/*
+ * Bytes per kernel copy for each core (1 Section)
+ */
+#define KERNEL_SECTION_SIZE            0x100000
+// 1MB, (2 ** 20)
+
+#define KERNEL_STACK_ADDR              (lpaddr_t)kernel_stack
+
+#ifndef __ASSEMBLER__
+
+static inline lvaddr_t local_phys_to_mem(lpaddr_t addr)
+{
+    // On the PandaBoard, this is a nop, because the physical memory is mapped
+    // at the same address in virtual memory
+    // i.e., MEMORY_OFFSET == PHYS_MEMORY_START
+    if(PADDR_SPACE_LIMIT - PHYS_MEMORY_START > 0) {
+        assert(addr < PHYS_MEMORY_START + PADDR_SPACE_LIMIT);
+    }
+    return (lvaddr_t)(addr + ((lpaddr_t)MEMORY_OFFSET - (lpaddr_t)PHYS_MEMORY_START));
+}
+
+static inline lpaddr_t mem_to_local_phys(lvaddr_t addr)
+{
+    assert(addr >= MEMORY_OFFSET);
+    return (lpaddr_t)(addr - ((lvaddr_t)MEMORY_OFFSET - (lvaddr_t)PHYS_MEMORY_START));
+}
+
+static inline lpaddr_t gen_phys_to_local_phys(genpaddr_t addr)
+{
+    //assert(addr < PADDR_SPACE_SIZE);
+    return (lpaddr_t)addr;
+}
+
+static inline genpaddr_t local_phys_to_gen_phys(lpaddr_t addr)
+{
+    return (genpaddr_t)addr;
+}
+
+/**
+ * Symbol: Start of kernel image. This symbol points to the start
+ * address of the kernel image.
+ */
+extern uint8_t kernel_first_byte;
+
+/**
+ * Symbol: End of kernel image. This symbol points to the end address
+ * of the kernel image.
+ */
+extern uint8_t kernel_text_final_byte;
+
+/**
+ * Symbol: End of kernel image. This symbol points to the end address
+ * of the kernel image.
+ */
+extern uint8_t kernel_final_byte;
+
+extern uint8_t kernel_elf_header;
+
+/**
+ * \brief The kernel stack.
+ *
+ * Declared in boot.S.
+ */
+extern uintptr_t kernel_stack[KERNEL_STACK_SIZE/sizeof(uintptr_t)];
+
+#endif  // __ASSEMBLER__
+
+/**
+ * Kernel interrupt jump table
+ */
+#define INT_HANDLER_TABLE      0xFFFF0100
+
+
+
+
+#endif  // OFFSETS_H
diff --git a/kernel/include/arch/armv7-m/paging_kernel_arch.h b/kernel/include/arch/armv7-m/paging_kernel_arch.h
new file mode 100644 (file)
index 0000000..308b5d2
--- /dev/null
@@ -0,0 +1,120 @@
+/**
+ * \file
+ * \brief ARM kernel page-table structures.
+ */
+
+/*
+ * Copyright (c) 2007, 2008, 2009, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef KERNEL_ARCH_ARM_PAGING_H
+#define KERNEL_ARCH_ARM_PAGING_H
+
+// XXX: Not sure if these includes are required
+#include <capabilities.h>
+#include <barrelfish_kpi/cpu.h>
+#include <barrelfish_kpi/paging_arch.h>
+#include <dev/omap/omap44xx_mmu_dev.h>
+
+omap44xx_mmu_t mmu;//use mackerel device for manipulating the MMU
+
+
+/**
+ * Setup bootstrap page table with direct and relocated mappings for kernel.
+ *
+ * This function does not enable paging.
+ *
+ * @param initial_base
+ * @param initial_size
+ */
+void paging_map_kernel(uintptr_t initial_base, size_t initial_size);
+
+lvaddr_t paging_map_device(lpaddr_t base, size_t size);
+
+
+/**
+ * Maps a device to a l2 page.
+ * Assumption: corresponding L1 entry already set
+ *
+ */
+
+void paging_map_device_page(uintptr_t l1_table,
+                                                   lvaddr_t device_vbase,
+                                                   lpaddr_t device_pbase,
+                                                   size_t device_bytes);
+
+/**
+ * Add kernel mappings to newly constructed page table.
+ *
+ * @param new_table_addr  address of newly constructed page table.
+ * @param new_table_bytes size of newly constructed page table.
+ */
+void paging_make_good(lvaddr_t new_table_addr, size_t new_table_bytes);
+
+void paging_map_user_pages_l1(lvaddr_t table_addr, lvaddr_t vaddr, lpaddr_t paddr);
+
+void paging_set_l2_entry(uintptr_t* l2entry, lpaddr_t paddr, uintptr_t flags);
+
+void paging_context_switch(lpaddr_t table_addr);
+
+void paging_arm_reset(lpaddr_t paddr, size_t bytes);
+
+
+// REVIEW: [2010-05-04 orion]
+// these were deprecated in churn, enabling now to get system running again.
+
+void paging_map_kernel_section(uintptr_t ttbase,lvaddr_t vbase, lpaddr_t pbase);
+void paging_map_memory(uintptr_t ttbase, lpaddr_t paddr, size_t bytes);
+
+static inline bool is_root_pt(enum objtype type) {
+    return type == ObjType_VNode_ARM_l1;
+}
+
+static inline size_t get_pte_size(void) {
+    // both l1_entry and l2_entry are 4 bytes
+    return 4;
+}
+#define PTABLE_ENTRY_SIZE get_pte_size()
+
+void do_one_tlb_flush(genvaddr_t vaddr);
+
+/*
+ * \brief flushes all non-preserved TLB entries
+ * XXX: if the currently executing code is not cached or protected, flushing will lead to a crash
+ */
+void do_full_tlb_flush(void);
+
+/*
+ * Cortex-M3 on pandaboard specific stuff
+ * since the M3 memory model on the pandaboard is a bit weird, we need a few additional functions
+ * some of these would be provided by a cp15, but we don't have one
+ */
+
+
+/*
+ * \brief add another (protected) entry into L2 TLB
+ * size: 0 -> section (1MB)
+ *       1 -> large page (64KB)
+ *       2 -> small page (4KB)
+ *       3 -> supersection (16MB)
+ */
+void add_tlb_mapping(lvaddr_t vaddr, lpaddr_t paddr, bool preserved, uint8_t size);
+
+void set_tlb_lock_basevalue(uint8_t basevalue);
+
+/*
+ * \brief read the version number of the table (called ignored3), to see if the upper half 
+ * of the table has been modified and needs to be replicated. 
+ * These version numbers are currently necessary for paging_context_switch, because
+ * the upper half of the table is expected to be preserved
+ */
+uint8_t read_table_version(union arm_l1_entry* ttb);
+void write_table_version(union arm_l1_entry* ttb, uint8_t version);
+void increase_table_version(union arm_l1_entry* ttb);
+
+#endif // KERNEL_ARCH_ARM_PAGING_H
diff --git a/kernel/include/arch/armv7-m/phys_mmap.h b/kernel/include/arch/armv7-m/phys_mmap.h
new file mode 100644 (file)
index 0000000..f325650
--- /dev/null
@@ -0,0 +1,71 @@
+/**
+ * \file
+ * \brief Rudimentary physical memory map.
+ */
+
+/*
+ * Copyright (c) 2008, 2010, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef ARM_PHYS_MAP_H
+#define ARM_PHYS_MAP_H
+
+typedef struct
+{
+    lpaddr_t    start;
+    lpaddr_t    limit;
+} phys_region_t;
+
+#define PHYS_MAX_REGIONS (32)
+
+typedef struct phys_mmap
+{
+    int           region_count;
+    phys_region_t regions[PHYS_MAX_REGIONS];
+} phys_mmap_t;
+
+/**
+ * Insert range into physical memory map. Range must not overlap
+ * with existing map entries. This function does not coalesce
+ * abutting regions.
+ *
+ * @param mmap  memory map.
+ * @param start start of physical region.
+ * @param limit limit of physical region.
+ *
+ * @return non-zero upon success.
+ */
+int phys_mmap_add(phys_mmap_t* mmap,
+                  lpaddr_t     start,
+                  lpaddr_t     limit);
+
+/**
+ * Allocate and remove from physical memory map.
+ *
+ * @param mmap      memory map
+ * @param bytes     number of bytes to remove.
+ * @param alignment alignment required.
+ *
+ * @return pointer to allocated region or null.
+ */
+lpaddr_t phys_mmap_alloc(phys_mmap_t* mmap,
+                         size_t       bytes,
+                         size_t       alignment);
+
+/**
+ * Remove region from physical address map.
+ *
+ * @param mmap
+ * @param start
+ * @param limit
+ */
+void phys_mmap_remove(phys_mmap_t* mmap,
+                      lpaddr_t     start,
+                      lpaddr_t     limit);
+
+#endif // ARM_PHYS_MAP_H
diff --git a/kernel/include/arch/armv7-m/pl011_uart.h b/kernel/include/arch/armv7-m/pl011_uart.h
new file mode 100644 (file)
index 0000000..085746c
--- /dev/null
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2007, 2008, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef __PL011_UART_H__
+#define __PL011_UART_H__
+
+void pl011_uart_init(pl011_uart_t *uart, lvaddr_t base);
+void pl011_putchar(pl011_uart_t *uart, char c);
+char pl011_getchar(pl011_uart_t *uart);
+
+#endif // __PL011_UART_H__
diff --git a/kernel/include/arch/armv7-m/spinlock.h b/kernel/include/arch/armv7-m/spinlock.h
new file mode 100644 (file)
index 0000000..17f7755
--- /dev/null
@@ -0,0 +1,38 @@
+/**
+ * \file
+ * \brief interface for OMAP44XX hardware spinlock module
+ */
+/*
+ * Copyright (c) 2007-2013 ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, CAB F.78, Universitaestr. 6, CH-8092 Zurich. 
+ * Attn: Systems Group.
+ */
+
+#ifndef __SPINLOCK_H
+#define __SPINLOCK_H
+
+/* Need to include this for errval_t */
+#include <errors/errno.h>
+
+#define PRINTF_LOCK 0   //we currently only use one lock
+
+/*
+ * Initialize the module
+ */
+extern errval_t spinlock_init(void);
+extern errval_t spinlock_early_init(void);
+
+/*
+ * aquire and release specific locks
+ * chosen the names as a contrast to "aquire_spinlock", because the arguments differ
+ * (here we want the index of the lock in the module, instead of a generic address)
+ */
+void spinlock_aquire(int locknumber);
+void spinlock_release(int locknumber);
+
+
+#endif //__SPINLOCK_H
diff --git a/kernel/include/arch/armv7-m/start_aps.h b/kernel/include/arch/armv7-m/start_aps.h
new file mode 100644 (file)
index 0000000..e9bf2b5
--- /dev/null
@@ -0,0 +1,52 @@
+/**
+ * \file
+ * \brief Definitions for the startup of application processors.
+ *
+ *  This file contains the prototypes for the functions which start
+ *  the application processors
+ */
+
+/*
+ * Copyright (c) 2007, 2008, 2010, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef START_APS_H_
+#define START_APS_H_
+
+#if defined(__gem5__)
+
+    #define AP_STARTING_UP 1
+    #define AP_STARTED     2
+
+    //#define AP_LOCK_PHYS     0x20000
+    #define AP_WAIT_PHYS       0x20000
+    #define AP_GLOBAL_PHYS     0x21000
+
+#elif defined(__pandaboard__)
+
+    #define AP_STARTING_UP  4422
+    #define AP_STARTED      6633
+    #define AP_WAIT_PHYS    ((lpaddr_t)0x80020000)
+    #define AP_GLOBAL_PHYS  ((lpaddr_t)0x80021000)
+    #define AUX_CORE_BOOT_0 ((lpaddr_t)0x48281800)
+    #define AUX_CORE_BOOT_1 ((lpaddr_t)0x48281804)
+
+    // address of the section needed to map AUX_CORE vars
+    #define AUX_CORE_BOOT_SECT       (AUX_CORE_BOOT_0 & ~ARM_L1_SECTION_MASK)
+    // offset of AUX_CORE_BOOT_0 in the section
+    #define AUX_CORE_BOOT_0_OFFSET   (AUX_CORE_BOOT_0 & ARM_L1_SECTION_MASK)
+    // offset of AUX_CORE_BOOT_1 in the section
+    #define AUX_CORE_BOOT_1_OFFSET   (AUX_CORE_BOOT_1 & ARM_L1_SECTION_MASK)
+
+#else
+    #error "Unknown ARM arch"
+#endif
+
+int start_aps_arm_start(uint8_t core_id, lvaddr_t entry);
+
+#endif // START_APS_H_
diff --git a/kernel/include/arch/armv7-m/startup_arch.h b/kernel/include/arch/armv7-m/startup_arch.h
new file mode 100644 (file)
index 0000000..a2c7e22
--- /dev/null
@@ -0,0 +1,63 @@
+/**
+ * \file
+ * \brief Startup prototypes.
+ */
+
+/*
+ * Copyright (c) 2007, 2008, 2009, 2010, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef _STARTUP_ARCH_H_
+#define _STARTUP_ARCH_H_
+
+#include <startup.h>
+#include <offsets.h>
+
+#define INIT_L1_BYTES           (ARM_L1_MAX_ENTRIES * ARM_L1_BYTES_PER_ENTRY)
+
+#define INIT_L2_PAGES           ((INIT_SPACE_LIMIT - INIT_VBASE) / BASE_PAGE_SIZE)
+#define INIT_L2_BYTES           INIT_L2_PAGES * ARM_L2_BYTES_PER_ENTRY
+
+#define INIT_BOOTINFO_VBASE     0x200000
+#define INIT_ARGS_VBASE         (INIT_BOOTINFO_VBASE + BOOTINFO_SIZE)
+#define INIT_DISPATCHER_VBASE   (INIT_ARGS_VBASE + ARGS_SIZE)
+#define MON_URPC_VBASE          (INIT_DISPATCHER_VBASE + DISPATCHER_SIZE)
+
+/*
+//XXX: we currently ignore all paging flags
+#define INIT_PERM_RO            (ARM_L2_SMALL_CACHEABLE  | \
+                                 ARM_L2_SMALL_BUFFERABLE | \
+                                 ARM_L2_SMALL_USR_RO)
+
+#define INIT_PERM_RW            (ARM_L2_SMALL_CACHEABLE  | \
+                                 ARM_L2_SMALL_BUFFERABLE | \
+                                 ARM_L2_SMALL_USR_RW)
+*/
+#define INIT_PERM_RO        0
+#define INIT_PERM_RW        0
+
+
+// Well known address for glbl_core_data @64Kb
+#if defined(__gem5__)
+#define GLBL_COREDATA_BASE_PHYS         (0x10000)
+#elif defined(__pandaboard__)
+#define GLBL_COREDATA_BASE_PHYS                (GEN_ADDR(31) + 0x10000)
+#else
+#error "unknown armv7 platform"
+#endif
+
+void create_module_caps(struct spawn_state *st);
+
+struct dcb *spawn_bsp_init(const char *name, alloc_phys_func alloc_phys);
+
+struct dcb *spawn_app_init(struct arm_core_data *core_data,
+                           const char *name, alloc_phys_func alloc_phys);
+
+extern struct arm_core_data *glbl_core_data;
+
+#endif
diff --git a/kernel/include/arch/armv7-m/ti_i2c.h b/kernel/include/arch/armv7-m/ti_i2c.h
new file mode 100644 (file)
index 0000000..6f3fddd
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2012, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, CAB F.78, Universitaetstr 6, CH-8092 Zurich.
+ */
+#ifndef __TI_I2C_H__
+#define __TI_I2C_H__
+
+enum i2c_flags {
+    I2C_RD     = 0x1,
+    I2C_WR     = 0x2,
+    I2C_NOSTOP = 0x4,
+};
+
+struct i2c_msg {
+    // should not exceed 10 bits
+    uint16_t slave;
+    enum i2c_flags flags;
+    uint16_t length;
+    uint8_t *buf;
+};
+
+void ti_i2c_init(int i);
+errval_t ti_i2c_transfer(int i, struct i2c_msg *msgs, size_t msgcount);
+
+#endif // __TI_I2C_H__
index f07f706..96d586c 100644 (file)
@@ -66,6 +66,7 @@
       arch_assembly "x86_32"  = [ "arch/x86_32/entry.S" ]
       arch_assembly "x86_64"  = [ "arch/x86_64/entry.S" ]
       arch_assembly "arm"     = [ "arch/arm/entry.S", "arch/arm/syscall.S" ]
+      arch_assembly "armv7-m" = [ "arch/arm/entry.S", "arch/arm/syscall.S" ]
       arch_assembly _         = []
 
   in
index 9fbaa45..849555c 100644 (file)
@@ -43,9 +43,18 @@ STATIC_ASSERT(CPSR_REG == 0,  "broken context assumption");
 STATIC_ASSERT(NUM_REGS == 17, "broken context assumption");
 STATIC_ASSERT(PC_REG   == 16, "broken context assumption");
 
+
+/*
+ * XXX: there is no guarantee that the context has been set up by
+ * disp_save_context, so we can not cut corners by not restoring registers
+ * clobbered in disp_save_context.
+ * e.g. when a new thread is created, it is started using this function, with r0 and r1
+ * being arguments.
+ */
 static void __attribute__((naked)) __attribute__((noinline))
 disp_resume_context(struct dispatcher_shared_generic *disp, uint32_t *regs)
 {
+#ifndef __thumb2__  //use normal ARM assembly
     __asm volatile(
         /* Re-enable dispatcher */
         "    mov     r2, #0                                             \n\t"
@@ -58,11 +67,58 @@ disp_resume_context(struct dispatcher_shared_generic *disp, uint32_t *regs)
         "disp_resume_context_epilog:                                    \n\t"
         "    mov     r0, r0          ; nop                              \n\t"
                   );
+#else       //use pure thumb2
+//we can not use ldm in quite the same way,
+//so we have to restore some registers one by one
+
+//to restore both a general-purpose register AND the pc, we need them to be adjacent in memory
+//since this is normally not the case and we can not 
+//change the data structure we are reading from,
+//we first push them on the restored stack
+    __asm volatile(
+        /* Re-enable dispatcher */
+        "    mov     r2, #0                                             \n\t"
+        "    str     r2, [r0, # " XTR(OFFSETOF_DISP_DISABLED) "]        \n\t"
+        //restore sp and lr first, because they can not be used with ldr and need a temp
+        "    ldr     r0,  [r1, #(" XTR(SP_REG) "*4)]                    \n\t"//read sp
+        "    mov     sp,  r0                                            \n\t"
+        "    ldr     r0,  [r1, #(" XTR(LR_REG) "*4)]                    \n\t"//read lr
+        "    mov     lr,  r0                                            \n\t"
+        /* Restore apsr condition bits  */
+        "    ldr     r0, [r1, #(" XTR(CPSR_REG) "*4)]                   \n\t"
+        "    msr     apsr, r0                                           \n\t"
+        //read pc and r1 values and push them on stack
+        "    ldr     r2,  [r1, #(" XTR(R1_REG) "*4)]                    \n\t"//read r1
+        "    ldr     r3,  [r1, #(" XTR(PC_REG) "*4)]                    \n\t"//read pc
+        //make sure lsb is one (force thumb mode)
+        "    orr     r3,  #1                                            \n\t"
+        "    push    {r2, r3}                                           \n\t"
+        /* Restore registers */
+        "    ldr     r0,  [r1, #(" XTR(R0_REG) "*4)]                    \n\t"
+        "    ldr     r2,  [r1, #(" XTR(R2_REG) "*4)]                    \n\t"
+        "    ldr     r3,  [r1, #(" XTR(R3_REG) "*4)]                    \n\t"
+        "    ldr     r4,  [r1, #(" XTR(R4_REG) "*4)]                    \n\t"
+        "    ldr     r5,  [r1, #(" XTR(R5_REG) "*4)]                    \n\t"
+        "    ldr     r6,  [r1, #(" XTR(R6_REG) "*4)]                    \n\t"
+        "    ldr     r7,  [r1, #(" XTR(R7_REG) "*4)]                    \n\t"
+        "    ldr     r8,  [r1, #(" XTR(R8_REG) "*4)]                    \n\t"
+        "    ldr     r9,  [r1, #(" XTR(R9_REG) "*4)]                    \n\t"
+        "    ldr     r10, [r1, #(" XTR(R10_REG) "*4)]                   \n\t"
+        "    ldr     r11, [r1, #(" XTR(R11_REG) "*4)]                   \n\t"
+        "    ldr     r12, [r1, #(" XTR(R12_REG) "*4)]                   \n\t"
+        //pop r1 and pc, leaving no register clobbered
+        "    pop     {r1, pc}                                           \n\t"
+        "disp_resume_context_epilog:                                    \n\t"
+        "    nop                                                        \n\t"
+        );
+#endif //defined(__thumb2__)
 }
 
 static void __attribute__((naked))
 disp_save_context(uint32_t *regs)
 {
+#ifndef __thumb2__
+//use normal arm assembly
     __asm volatile(
         "    mrs     r1, cpsr                                           \n\t"
         "    adr     r2, disp_save_context_resume                       \n\t"
@@ -72,6 +128,22 @@ disp_save_context(uint32_t *regs)
         "disp_save_context_resume:                                      \n\t"
         "    bx      lr                                                 \n\t"
                   );
+#else   //use pure thumb2
+//stm can not store some combinations of registers, so we will have to store some one-by-one
+//also, we do not really need to store the already clobbered registers
+    __asm volatile(
+        "    mrs     r1, apsr                                           \n\t"
+        "    adr     r2, disp_save_context_resume                       \n\t"
+        "    str     r1,  [r0]                                          \n\t"//save apsr
+        "    add     r0,  #8                                            \n\t"//point to r1
+        "    stmia   r0!, {r1-r12}                                      \n\t"//save most registers
+        "    str     sp,  [r0], #4                                      \n\t"//save sp
+        "    str     lr,  [r0], #4                                      \n\t"//save lr
+        "    str     r2,  [r0]                                          \n\t"//set saved pc to resume label
+        "disp_save_context_resume:                                      \n\t"
+        "    bx      lr                                                 \n\t"
+                  );
+#endif  //defined(__thumb2__)
 }
 
 ///////////////////////////////////////////////////////////////////////////////
index 66dfaaf..06578b8 100644 (file)
 #include <barrelfish/dispatch.h>
 #include <asmoffsets.h>
 
+    .syntax unified
        .text
+       .extern disp_run, disp_pagefault, disp_pagefault_disabled, disp_trap
        .globl run_entry, pagefault_entry, disabled_pagefault_entry, trap_entry
 
+/*
+heteropanda: since thumb2 has a shorter branch range, I had to do long calls.
+   for some reason, putting the branches in a macro did not compile, so 
+   I put in case distinctions every time we branch (which is admittedly ugly)
+*/
+
 .macro init_sp offset
+#ifndef __thumb2__
+//normal arm operations
         ldr     r12, =(OFFSETOF_DISP_GENERIC +\offset)
         add     sp, THREAD_REGISTER, r12
+#else
+//XXX: this assumes we are allowed to clobber r3!
+        ldr     r3, =(OFFSETOF_DISP_GENERIC +\offset)
+        add     r3, THREAD_REGISTER, r3
+        mov     sp, r3
+#endif        
 .endm
+
+//since thumb2 has a shorter branch range, we have to do long calls
+.macro branch target
+#ifndef __thumb2__
+//normal arm operations
+        b   \target
+#else
+        ldr     r3, =(\target)
+        bx      r3
+#endif  
+.endm
+        
         
 //
 // void run_entry(struct disp_priv* p)
 //        
 run_entry:
-        init_sp OFFSETOF_DISP_PRIV_STACK_LIMIT
-        mov     r0, r9
-        b       disp_run
+        init_sp      OFFSETOF_DISP_PRIV_STACK_LIMIT
+        mov          r0, r9
+        branch       disp_run
+
 
 //
 // void pagefault_entry(disp ptr, vaddr_t fault_addr, uintptr_t error, vaddr_t pc)
 //
 pagefault_entry:
-        init_sp OFFSETOF_DISP_PRIV_STACK_LIMIT
-        b       disp_pagefault
+        init_sp      OFFSETOF_DISP_PRIV_STACK_LIMIT
+        branch       disp_pagefault
+
 
 //
 // void disabled_pagefault_entry(disp ptr, vaddr_t fault_addr, uintptr_t error, vaddr_t pc)
 //
 disabled_pagefault_entry:
-        init_sp OFFSETOF_DISP_PRIV_TRAP_STACK_LIMIT
-        b       disp_pagefault_disabled
+        init_sp      OFFSETOF_DISP_PRIV_TRAP_STACK_LIMIT
+        branch       disp_pagefault_disabled
+
 
 //
 // void trap_entry(disp ptr, uintptr_t irq, uintptr_t error, vaddr_t pc)
 //
 trap_entry:
-        init_sp OFFSETOF_DISP_PRIV_TRAP_STACK_LIMIT
-        b       disp_trap
+        init_sp      OFFSETOF_DISP_PRIV_TRAP_STACK_LIMIT
+        branch       disp_trap
+
index add591b..05b04fa 100644 (file)
 #include <stdio.h>
 
 // Location of VSpace managed by this system.
-#define VSPACE_BEGIN   ((lvaddr_t)1UL*1024*1024*1024)
+#ifdef __ARM_ARCH_7M__
+//virtual section 0x40000000-0x40100000 can not be used as regular memory 
+//because of "bit-banding".
+//0x42000000-0x44000000 is also dangerous, so we start after that
+//XXX: there are more virtual regions we 
+//are not allowed to use -> find out where to reserve those
+#define VSPACE_BEGIN   ((lvaddr_t)(1UL*1024*1024*1024 + 64UL*1024*1024))    //0x44000000
+#else       //"normal" arm architectures
+#define VSPACE_BEGIN   ((lvaddr_t)1UL*1024*1024*1024)   //0x40000000
+#endif
+
 
 // Amount of virtual address space reserved for mapping frames
 // backing refill_slabs.
index 8038025..1a771dd 100644 (file)
@@ -7,6 +7,8 @@
  * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
  */
 
+#ifndef __thumb2__
+//normal arm instructions
         .arm
         .text
         .globl syscall
@@ -38,3 +40,34 @@ swi_done:
        nop
        nop
        nop
+       
+       
+#else   //use only the thumb2 instruction set
+        .syntax unified
+        .text
+        .globl syscall
+syscall:
+    // Save pointer to return structure (r0), callee-save
+       // registers (r4-r10,r12) that are cloberred.
+        mov     r12, sp
+        //pc can not be in the list, but it is also not restored -> ignore
+        push    {r0, r4-r10, r11, r12, lr}
+        ldr     r0,  [sp, #76]   
+        ldmia   r12, {r4-r10, r12}
+        ldr     lr, =swi_done
+        svc     #0         
+swi_done:
+       // This is where we end up executing after the system call?
+       // This accesses the stack, which was restored in do_resume
+        pop     {r3,r4-r10} // pop 7 registers
+        // r0,r1 contain return values, r3 points to return structure
+        str     r0, [r3, #0]
+        str     r1, [r3, #4]
+        pop {r11}
+
+        pop {r3} // Using r3 to tempararily hold the value of sp register
+                 // Warning: Assuming that clobbering r3 is OK!
+        pop {lr} // Return address where we have to return
+        mov sp, r3  // Restoring sp from r3 now.
+        bx  lr      // return by loading pc
+#endif
index 6d5d3f2..1be8f33 100644 (file)
@@ -34,7 +34,9 @@ registers_set_initial(arch_registers_state_t *regs, struct thread *thread,
                       lvaddr_t entry, lvaddr_t stack, uint32_t arg1,
                       uint32_t arg2, uint32_t arg3, uint32_t arg4)
 {
+#ifndef __ARM_ARCH_7M__ //the armv7-m profile does not have such a mode field
     regs->named.cpsr = ARM_MODE_USR | CPSR_F_MASK;
+#endif
     regs->named.r0 = arg1;
     regs->named.r1 = arg2;
     regs->named.r2 = arg3;
index 716d7dc..cdbfadc 100644 (file)
@@ -13,7 +13,7 @@
  */
 
 #include <asmoffsets.h>
-
+    .syntax unified
        .text
        .globl  _start, _start_init
 
@@ -25,8 +25,8 @@ _start:
 
 _start_generic:
         ldr     sp, =crt0_temp_stack
-       // Call barrelfish_init_disabled(struct dispatcher* d, bool init_dom_arg)
-       b       barrelfish_init_disabled
+           // Call barrelfish_init_disabled(struct dispatcher* d, bool init_dom_arg)
+           b   barrelfish_init_disabled
 
 _start_init:
         // Entry for the init process
index 69930fb..b6ed5cb 100644 (file)
@@ -7,6 +7,7 @@ let
     arch_srcs "arm11mp" = [ "machine/arm/setjmp.S" ]
     arch_srcs "xscale"  = [ "machine/arm/setjmp.S" ]
     arch_srcs "armv7"   = [ "machine/arm/setjmp.S" ]
+    arch_srcs "armv7-m" = [ "machine/arm/setjmp.S" ]
     arch_srcs  x        = error ("Unknown architecture for newlib: " ++ x)
 in
 [ build library {
index e44f1bb..5b5577d 100644 (file)
@@ -223,8 +223,10 @@ void spawn_arch_set_registers(void *arch_load_info,
     disp_arm->got_base = got_base;
 
     enabled_area->regs[REG_OFFSET(PIC_REGISTER)] = got_base;
-    enabled_area->named.cpsr = CPSR_F_MASK | ARM_MODE_USR;
-
     disabled_area->regs[REG_OFFSET(PIC_REGISTER)] = got_base;
+    
+#ifndef __ARM_ARCH_7M__ //armv7-m does not support these flags
+    enabled_area->named.cpsr = CPSR_F_MASK | ARM_MODE_USR;
     disabled_area->named.cpsr = CPSR_F_MASK | ARM_MODE_USR;
+#endif
 }
index 0d54403..646b1c8 100755 (executable)
@@ -37,7 +37,9 @@ BINS=$(awk '/^kernel/ || /^module/ {print $2}' $MENU_LST)
 # IDX is a counter incremented for each binary.  
 IDX=1
 for BIN in $BINS; do
-  SLASH=${BIN////_}
+  #was SLASH=${BIN////_}, which only replaced slashes, but we need to replace "-" for armv7-m
+  UNDERSCORED=${BIN//-/_}
+  SLASH=${UNDERSCORED////_}
   BIN_OUT="$OUTPUT_PREFIX/${FILE_PREFIX}_$SLASH"
   OBJCOPY=$(which arm-none-linux-gnueabi-objcopy || which arm-linux-gnueabi-objcopy)
   echo $BIN '->' $BIN_OUT
index b692c39..886d314 100644 (file)
@@ -35,6 +35,9 @@ static char *get_symbol_name_prefix(char *original) {
     if (r[i] == '/') {
       r[i] = '_';
     }
+    if (r[i] == '-') {//needed for armv7-m
+      r[i] = '_';
+    }
   }
   return r;
 }
index 4870dbb..3b2e2ad 100644 (file)
@@ -26,7 +26,7 @@
  */
 
        .text
-       .arm
+//     .arm    //interferes with thumb code
        .globl  molly_start, molly_to_kernel_transition
        //.extern cd, kernel_entry
 
         // Entry point.  Initialize a stack and branch to the
         // C entry point in molly_init.c
 molly_start:
-       ldr             sp, =molly_kernel_stack
-       add             sp, sp, #4096
+       ldr             r6, =molly_kernel_stack
+       addw    r6, r6, #2048//done in two steps to make sure it is thumb-compatible
+       addw    r6, r6, #2048
+       mov     sp, r6
        b               molly_init
 
         // Never reached
@@ -50,6 +52,11 @@ halt:
         // r0 value through which the kernel expects
         // the core_data info.
 molly_to_kernel_transition:
+#ifdef __thumb2__
+//in thumb, the lsb of the starting address must be 1
+        mov     r6, #1
+        orr     r0, r0, r6
+#endif
         mov     lr, r0
         mov     r0, r1
         bx             lr
index 2cd6f0d..888b60d 100644 (file)
@@ -20,6 +20,6 @@
                         "omap/omap44xx_fdif",
                         "omap/omap44xx_device_prm" ],
                     addLibraries = ["driverkit"],
-                    architectures = ["armv7"]
+                    architectures = ["armv7", "armv7-m"]
                   }
-]
\ No newline at end of file
+]
index c61e639..595fbf6 100644 (file)
@@ -33,6 +33,9 @@ static coreid_t my_core_id;
 #elif defined(__ARM_ARCH_7A__)
 #       define MONITOR_NAME  "armv7/sbin/monitor"
 #       define MEM_SERV_NAME "armv7/sbin/mem_serv"
+#elif defined(__ARM_ARCH_7M__)
+#       define MONITOR_NAME  "armv7-m/sbin/monitor"
+#       define MEM_SERV_NAME "armv7-m/sbin/mem_serv"
 #elif defined(__arm__)
 #       define MONITOR_NAME  "armv5/sbin/monitor"
 #       define MEM_SERV_NAME "armv5/sbin/mem_serv"
index e926927..5d8560d 100644 (file)
@@ -37,6 +37,7 @@
      arch_srcs "armv5"   = [ "arch/arm/boot.c", "arch/arm/inter.c", "arch/arm/monitor_server.c" ]
      arch_srcs "xscale"  = [ "arch/arm/boot.c", "arch/arm/inter.c", "arch/arm/monitor_server.c" ]
      arch_srcs "armv7"   = [ "arch/armv7/boot.c", "arch/armv7/inter.c", "arch/armv7/monitor_server.c", "arch/armv7/notify_ipi.c" ]
+     arch_srcs "armv7-m"   = [ "arch/armv7/boot.c", "arch/armv7/inter.c", "arch/armv7/monitor_server.c", "arch/armv7/notify_ipi.c" ]
      arch_srcs _         = []
 
      idc_srcs = concat $ map getsrcs $ optInterconnectDrivers $ options arch
index 2db0d75..2f69870 100644 (file)
@@ -15,7 +15,7 @@
 #include "monitor.h"
 #include <inttypes.h>
 #include <elf/elf.h>
-#include <target/arm/barrelfish_kpi/paging_arm_v7.h>
+#include <barrelfish_kpi/paging_arch.h>
 #include <target/arm/barrelfish_kpi/arm_core_data.h>
 
 /// Round up n to the next multiple of size
index fac7c43..97e82ba 100644 (file)
@@ -17,6 +17,6 @@
                        mackerelDevices = [ "acpi_ec", "lpc_ioapic" ],                      
                        cFiles = [ "main.c", "octopus_stubs.c" ],
                        addLibraries = [ "octopus_server", "octopus_parser" ],
-                       architectures = [ "armv5", "armv7", "xscale", "scc" ]
+                       architectures = [ "armv5", "armv7","armv7-m", "xscale", "scc" ]
    }
 ]