#include <barrelfish/caddr.h>
#include <barrelfish_kpi/paging_arch.h>
-static inline struct sysret cap_invoke(struct capref to, uintptr_t cmd,
- uintptr_t arg1, uintptr_t arg2,
- uintptr_t arg3, uintptr_t arg4,
- uintptr_t arg5, uintptr_t arg6,
- uintptr_t arg7, uintptr_t arg8,
- uintptr_t arg9, uintptr_t arg10,
- uintptr_t arg11)
-{
- uint8_t invoke_bits = get_cap_valid_bits(to);
- capaddr_t invoke_cptr = get_cap_addr(to) >> (CPTR_BITS - invoke_bits);
-
- return syscall((invoke_bits << 16) | (cmd << 8) | SYSCALL_INVOKE,
- invoke_cptr, arg1, arg2, arg3, arg4, arg5, arg6,
- arg8, arg9, arg10, arg11);
-}
-
-#define cap_invoke12(to, _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l) \
- cap_invoke(to, _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l)
-#define cap_invoke11(to, _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k) \
- cap_invoke12(to, _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, 0)
-#define cap_invoke10(to, _a, _b, _c, _d, _e, _f, _g, _h, _i, _j) \
- cap_invoke11(to, _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, 0)
-#define cap_invoke9(to, _a, _b, _c, _d, _e, _f, _g, _h, _i) \
- cap_invoke10(to, _a, _b, _c, _d, _e, _f, _g, _h, _i, 0)
-#define cap_invoke8(to, _a, _b, _c, _d, _e, _f, _g, _h) \
- cap_invoke9(to, _a, _b, _c, _d, _e, _f, _g, _h, 0)
-#define cap_invoke7(to, _a, _b, _c, _d, _e, _f, _g) \
- cap_invoke8(to, _a, _b, _c, _d, _e, _f, _g, 0)
-#define cap_invoke6(to, _a, _b, _c, _d, _e, _f) \
- cap_invoke7(to, _a, _b, _c, _d, _e, _f, 0)
-#define cap_invoke5(to, _a, _b, _c, _d, _e) \
- cap_invoke6(to, _a, _b, _c, _d, _e, 0)
-#define cap_invoke4(to, _a, _b, _c, _d) \
- cap_invoke5(to, _a, _b, _c, _d, 0)
-#define cap_invoke3(to, _a, _b, _c) \
- cap_invoke4(to, _a, _b, _c, 0)
-#define cap_invoke2(to, _a, _b) \
- cap_invoke3(to, _a, _b, 0)
-#define cap_invoke1(to, _a) \
- cap_invoke2(to, _a, 0)
-
/**
* \brief Retype a capability.
*
uint32_t chan_id;
+ genpaddr_t kcb; ///< The kernel control block
}; //__attribute__ ((packed));
#define ARM_CORE_DATA_PAGES 1100
#include <getopt/getopt.h>
#include <cp15.h>
#include <elf/elf.h>
-#include <arm_core_data.h>
+#include <barrelfish_kpi/arm_core_data.h>
#include <startup_arch.h>
#include <kernel_multiboot.h>
#include <global.h>
#include <cpiobin.h>
#include <init.h>
#include <barrelfish_kpi/paging_arm_v7.h>
-#include <arm_core_data.h>
+#include <barrelfish_kpi/arm_core_data.h>
#include <kernel_multiboot.h>
#include <offsets.h>
#include <startup_arch.h>
#include <arm_hal.h>
#include <getopt/getopt.h>
#include <elf/elf.h>
-#include <arm_core_data.h>
+#include <barrelfish_kpi/arm_core_data.h>
#include <startup_arch.h>
#include <kernel_multiboot.h>
#include <global.h>
#include <paging_kernel_arch.h>
#include <elf/elf.h>
#include <kernel_multiboot.h>
-#include <arm_core_data.h>
+#include <barrelfish_kpi/arm_core_data.h>
#include <startup_arch.h>
struct arm_core_data *glbl_core_data = (struct arm_core_data *)GLBL_COREDATA_BASE_PHYS;
}
+static struct sysret handle_kcb_identify(struct capability *to,
+ arch_registers_state_t *context,
+ int argc)
+{
+ return sys_handle_kcb_identify(to);
+}
+
typedef struct sysret (*invocation_t)(struct capability*, arch_registers_state_t*, int);
static invocation_t invocations[ObjType_Num][CAP_MAX_CMD] = {
[DispatcherCmd_PerfMon] = handle_dispatcher_perfmon,
[DispatcherCmd_DumpPTables] = dispatcher_dump_ptables,
},
+ [ObjType_KernelControlBlock] = {
+ [FrameCmd_Identify] = handle_kcb_identify
+ },
[ObjType_Frame] = {
[FrameCmd_Identify] = handle_frame_identify,
[FrameCmd_ModifyFlags] = handle_frame_modify_flags,
#include <getopt/getopt.h>
#include <cp15.h>
#include <elf/elf.h>
-#include <arm_core_data.h>
+#include <barrelfish_kpi/arm_core_data.h>
#include <startup_arch.h>
#include <kernel_multiboot.h>
#include <global.h>
glbl_core_data = (struct arm_core_data *)
((lpaddr_t)&kernel_first_byte - BASE_PAGE_SIZE);
glbl_core_data->cmdline = (lpaddr_t)&glbl_core_data->kernel_cmdline;
+ kcb_current = (struct kcb*) (lpaddr_t)glbl_core_data->kcb;
my_core_id = glbl_core_data->dst_core_id;
// tell BSP that we are started up
*((volatile lvaddr_t *)ap_wait) = AP_STARTED;
}
+ if (kcb_current == NULL) {
+ panic("Did not receive a valid KCB.");
+ }
+
// XXX: print kernel address for debugging with gdb
printf("Barrelfish OMAP44xx CPU driver starting at addr 0x%"PRIxLVADDR" on core %"PRIuCOREID"\n",
local_phys_to_mem((lpaddr_t)&kernel_first_byte), my_core_id);
#include <cpiobin.h>
#include <init.h>
#include <barrelfish_kpi/paging_arch.h>
-#include <arm_core_data.h>
+#include <barrelfish_kpi/arm_core_data.h>
#include <kernel_multiboot.h>
#include <offsets.h>
#include <startup_arch.h>
break;
}
- if (kcb_current == 0x0) {
+ if (kcb_current == NULL) {
panic("Did not receive a valid KCB.");
}
return SYSRET(SYS_ERR_OK);
}
+struct sysret kernel_add_kcb(struct capability *kern_cap,
+ int cmd, uintptr_t *args)
+{
+ uintptr_t kcb_addr = args[0];
+ struct kcb *new_kcb = (struct kcb *)kcb_addr;
+
+ return sys_kernel_add_kcb(new_kcb);
+}
+
+struct sysret kernel_remove_kcb(struct capability *kern_cap,
+ int cmd, uintptr_t *args)
+{
+ printk(LOG_NOTE, "in kernel_remove_kcb invocation!\n");
+ uintptr_t kcb_addr = args[0];
+ struct kcb *to_remove = (struct kcb *)kcb_addr;
+
+ return sys_kernel_remove_kcb(to_remove);
+}
+
+struct sysret kernel_suspend_kcb_sched(struct capability *kern_cap,
+ int cmd, uintptr_t *args)
+{
+ printk(LOG_NOTE, "in kernel_suspend_kcb_sched invocation!\n");
+ return sys_kernel_remove_kcb((bool)args[0]);
+}
+
+struct sysret handle_kcb_identify(struct capability *to,
+ int cmd, uintptr_t *args)
+{
+ return sys_handle_kcb_identify(to);
+}
+
typedef struct sysret (*invocation_handler_t)(struct capability *to,
int cmd, uintptr_t *args);
return sysret;
}
+struct sysret kernel_add_kcb(struct capability *kern_cap,
+ int cmd, uintptr_t *args)
+{
+ uintptr_t kcb_addr = args[0];
+ struct kcb *new_kcb = (struct kcb *)kcb_addr;
+
+ return sys_kernel_add_kcb(new_kcb);
+}
+
+struct sysret kernel_remove_kcb(struct capability *kern_cap,
+ int cmd, uintptr_t *args)
+{
+ printk(LOG_NOTE, "in kernel_remove_kcb invocation!\n");
+ uintptr_t kcb_addr = args[0];
+ struct kcb *to_remove = (struct kcb *)kcb_addr;
+
+ return sys_kernel_remove_kcb(to_remove);
+}
+
+struct sysret kernel_suspend_kcb_sched(struct capability *kern_cap,
+ int cmd, uintptr_t *args)
+{
+ printk(LOG_NOTE, "in kernel_suspend_kcb_sched invocation!\n");
+ return sys_kernel_remove_kcb((bool)args[0]);
+}
+
+struct sysret handle_kcb_identify(struct capability *to,
+ int cmd, uintptr_t *args)
+{
+ return sys_handle_kcb_identify(to);
+}
+
+
typedef struct sysret (*invocation_handler_t)(struct capability *to,
int cmd, uintptr_t *args);
struct sysret sys_monitor_spawn_core(coreid_t core_id, enum cpu_type cpu_type,
genvaddr_t entry);
-struct sysret kernel_add_kcb(struct capability *kern_cap,
- int cmd, uintptr_t *args);
-struct sysret kernel_remove_kcb(struct capability *kern_cap,
- int cmd, uintptr_t *args);
-struct sysret kernel_suspend_kcb_sched(struct capability *kern_cap,
- int cmd, uintptr_t *args);
-struct sysret handle_kcb_identify(struct capability *to,
- int cmd, uintptr_t *args);
+struct sysret sys_kernel_add_kcb(struct kcb* new_kcb);
+struct sysret sys_kernel_remove_kcb(struct kcb* kcb_addr);
+struct sysret sys_kernel_suspend_kcb_sched(bool toggle);
+struct sysret sys_handle_kcb_identify(struct capability* to);
#endif
return SYSRET(SYS_ERR_OK);
}
-
-struct sysret kernel_add_kcb(struct capability *kern_cap,
- int cmd, uintptr_t *args)
+struct sysret sys_kernel_add_kcb(struct kcb *new_kcb)
{
- uintptr_t kcb_addr = args[0];
- struct kcb *new_kcb = (struct kcb *)kcb_addr;
kcb_add(new_kcb);
// update kernel_now offset
return SYSRET(SYS_ERR_OK);
}
-struct sysret kernel_remove_kcb(struct capability *kern_cap,
- int cmd, uintptr_t *args)
+struct sysret sys_kernel_remove_kcb(struct kcb * to_remove)
{
- printk(LOG_NOTE, "in kernel_remove_kcb invocation!\n");
- uintptr_t kcb_addr = args[0];
-
- struct kcb *to_remove = (struct kcb *)kcb_addr;
return SYSRET(kcb_remove(to_remove));
}
-struct sysret kernel_suspend_kcb_sched(struct capability *kern_cap,
- int cmd, uintptr_t *args)
+struct sysret sys_kernel_suspend_kcb_sched(bool suspend)
{
printk(LOG_NOTE, "in kernel_suspend_kcb_sched invocation!\n");
- kcb_sched_suspended = (bool)args[0];
+ kcb_sched_suspended = suspend;
return SYSRET(SYS_ERR_OK);
}
-struct sysret handle_kcb_identify(struct capability *to,
- int cmd, uintptr_t *args)
+struct sysret sys_handle_kcb_identify(struct capability* to)
{
// Return with physical base address of frame
// XXX: pack size into bottom bits of base address
assert(to->type == ObjType_KernelControlBlock);
lvaddr_t vkcb = (lvaddr_t) to->u.kernelcontrolblock.kcb;
assert((vkcb & BASE_PAGE_MASK) == 0);
+
return (struct sysret) {
.error = SYS_ERR_OK,
.value = mem_to_local_phys(vkcb) | OBJBITS_KCB,
};
extern coreid_t my_arch_id;
+extern struct capref kernel_cap;
errval_t get_core_info(coreid_t core_id,
archid_t* apic_id,
invoke_monitor_spawn_core(coreid_t core_id, enum cpu_type cpu_type,
forvaddr_t entry)
{
- return cap_invoke5(cap_kernel, KernelCmd_Spawn_core,
- core_id, cpu_type,
- (uintptr_t)(entry >> 32),
- (uintptr_t) entry).error;
+ uint8_t invoke_bits = get_cap_valid_bits(kernel_cap);
+ capaddr_t invoke_cptr = get_cap_addr(kernel_cap) >> (CPTR_BITS - invoke_bits);
+
+ return syscall6((invoke_bits << 16) | (KernelCmd_Spawn_core << 8)
+ | SYSCALL_INVOKE, invoke_cptr, core_id, cpu_type,
+ (uintptr_t)(entry >> 32), (uintptr_t) entry).error;
}
errval_t spawn_xcore_monitor(coreid_t coreid, int hwid,
// XXX: caching these for now, until we have unmap
static struct module_blob cpu_blob, monitor_blob;
err = module_blob_map(cpuname, &cpu_blob);
- if (!err_is_ok(err)) {
+ if (err_is_fail(err)) {
+ DEBUG_ERR(err, "module_blob_map");
return err;
}
err = module_blob_map(monitorname, &monitor_blob);
- if (!err_is_ok(err)) {
+ if (err_is_fail(err)) {
+ DEBUG_ERR(err, "module_blob_map");
return err;
}
&cpu_mem.cap,
&cpu_mem.buf,
&cpu_mem.frameid);
- if (!err_is_ok(err)) {
+ if (err_is_fail(err)) {
+ DEBUG_ERR(err, "cpu_memory_prepare");
return err;
}
cpu_mem.buf + arch_page_size,
cpu_mem.frameid.base + arch_page_size,
&reloc_entry);
- if (!err_is_ok(err)) {
- return err_push(err, LIB_ERR_FRAME_IDENTIFY);
+ if (err_is_fail(err)) {
+ DEBUG_ERR(err, "cpu_memory_prepare");
+ return err;
}
/* Chunk of memory to load monitor on the app core */
err = spawn_memory_prepare(ARM_CORE_DATA_PAGES*arch_page_size,
&spawn_mem_cap,
&spawn_mem_frameid);
- if (!err_is_ok(err)) {
+ if (err_is_fail(err)) {
+ DEBUG_ERR(err, "spawn_memory_prepare");
return err;
}
#ifdef CONFIG_FLOUNDER_BACKEND_UMP_IPI
core_data->chan_id = chanid;
#endif
+ struct frame_identity fid;
+ err = invoke_frame_identify(kcb, &fid);
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "Invoke frame identity for KCB failed. "
+ "Did you add the syscall handler for that architecture?");
+ }
+ core_data->kcb = (genpaddr_t) fid.base;
if (cmdline != NULL) {
// copy as much of command line as will fit
errval_t invoke_monitor_cap_remote(capaddr_t cap, int bits, bool is_remote,
bool *has_descendents)
{
- struct sysret r = cap_invoke4(kernel_cap, KernelCmd_Remote_cap, cap, bits,
- is_remote);
+ uint8_t invoke_bits = get_cap_valid_bits(kernel_cap);
+ capaddr_t invoke_cptr = get_cap_addr(kernel_cap) >> (CPTR_BITS - invoke_bits);
+
+ struct sysret r;
+ r = syscall5((invoke_bits << 16) | (KernelCmd_Remote_cap << 8)
+ | SYSCALL_INVOKE, invoke_cptr, cap, bits, is_remote);
if (err_is_ok(r.error)) {
*has_descendents = r.value;
}
USER_PANIC_ERR(err, "frame_alloc_identify failed.");
}
-// For some reason that syscall does not work on arm.
-#if defined(__x86__)
err = cap_mark_remote(frame);
if (err_is_fail(err)) {
USER_PANIC_ERR(err, "Can not mark cap remote.");
}
-#endif
struct monitor_binding *mb = get_monitor_binding();
err = mb->tx_vtbl.boot_core_request(mb, NOP_CONT, target_id, frame);
*/
#include "monitor.h"
-#include <inttypes.h>
-#include <elf/elf.h>
-#include <barrelfish_kpi/paging_arch.h>
-#include <target/arm/barrelfish_kpi/arm_core_data.h>
-
-/// Round up n to the next multiple of size
-#define ROUND_UP(n, size) ((((n) + (size) - 1)) & (~((size) - 1)))
-
-struct monitor_allocate_state {
- void *vbase;
- genvaddr_t elfbase;
-};
-
-static errval_t monitor_elfload_allocate(void *state, genvaddr_t base,
- size_t size, uint32_t flags,
- void **retbase)
-{
- struct monitor_allocate_state *s = state;
-
- *retbase = (char *)s->vbase + base - s->elfbase;
- return SYS_ERR_OK;
-}
-
-struct xcore_bind_handler {
- coreid_t coreid;
- enum cpu_type cputype;
- struct monitor_binding *binding;
-};
-
-#if defined(CONFIG_FLOUNDER_BACKEND_UMP_IPI)
-static errval_t
-setup_intermon_connection_ump_ipi(int local_hwid, int remote_hwid,
- struct intermon_ump_ipi_binding *ump_binding,
- void *buf)
-{
- // Bootee's notify channel ID is always 1
- struct capref notify_cap;
- err = notification_create_cap(1, remote_hwid, ¬ify_cap);
- assert(err == SYS_ERR_OK);
-
- // Allocate my own notification caps
- struct capref ep, my_notify_cap;
- struct lmp_endpoint *iep;
- int chanid;
- err = endpoint_create(LMP_RECV_LENGTH, &ep, &iep);
- assert(err_is_ok(err));
- err = notification_allocate(ep, &chanid);
- assert(err == SYS_ERR_OK);
- err = notification_create_cap(chanid, local_hwid, &my_notify_cap);
- assert(err == SYS_ERR_OK);
-
- // init our end of the binding and channel
- err = intermon_ump_ipi_init(ump_binding, get_default_waitset(),
- buf, MON_URPC_CHANNEL_LEN,
- buf + MON_URPC_CHANNEL_LEN,
- MON_URPC_CHANNEL_LEN, notify_cap,
- my_notify_cap, ep, iep);
-
- return err;
-}
-#endif
-
-static errval_t
-setup_intermon_connection(int local_hwid,
- int remote_hwid,
- struct intermon_binding **ret_binding,
- struct frame_identity *urpc_frame_id)
-{
- // compute size of frame needed and allocate it
- struct capref frame;
- size_t framesize;
- errval_t err;
-
- framesize = MON_URPC_CHANNEL_LEN * 2;
- err = frame_alloc(&frame, framesize, &framesize);
- if (err_is_fail(err)) {
- return err_push(err, LIB_ERR_FRAME_ALLOC);
- }
-
- // Mark it remote
- bool has_descendants;
- err = monitor_cap_remote(frame, true, &has_descendants);
- if (err_is_fail(err)) {
- return err;
- }
-
- // map it in
- void *buf;
- err = vspace_map_one_frame(&buf, framesize, frame, NULL, NULL);
- if (err_is_fail(err)) {
- cap_destroy(frame);
- return err_push(err, LIB_ERR_VSPACE_MAP);
- }
-
- #if defined(CONFIG_FLOUNDER_BACKEND_UMP_IPI)
- struct intermon_ump_ipi_binding *ump_binding;
- #else
- struct intermon_ump_binding *ump_binding;
- #endif
-
- ump_binding = malloc(sizeof(*ump_binding));
- assert(ump_binding != NULL);
-
- #if defined(CONFIG_FLOUNDER_BACKEND_UMP_IPI)
- err = setup_intermon_connection_ump_ipi(local_hwid, remote_hwid,
- ump_binding, buf);
- #else
- err = intermon_ump_init(ump_binding, get_default_waitset(),
- buf, MON_URPC_CHANNEL_LEN,
- (char *)buf + MON_URPC_CHANNEL_LEN,
- MON_URPC_CHANNEL_LEN);
- #endif
-
- if (err_is_fail(err)) {
- cap_destroy(frame);
- return err_push(err, LIB_ERR_UMP_CHAN_BIND);
- }
-
-
- // Identify UMP frame for tracing
- struct frame_identity umpid;
- err = invoke_frame_identify(frame, &umpid);
- assert(err_is_ok(err));
- ump_binding->ump_state.chan.recvid = (uintptr_t)umpid.base;
- ump_binding->ump_state.chan.sendid =
- (uintptr_t)(umpid.base + MON_URPC_CHANNEL_LEN);
-
- /* Look up information on the urpc_frame cap */
- err = invoke_frame_identify(frame, urpc_frame_id);
- if (err_is_fail(err)) {
- USER_PANIC_ERR(err, "frame_identify failed");
- }
-
- *ret_binding = &ump_binding->b;
- return err;
-}
-
-struct module_blob {
- size_t size;
- lvaddr_t vaddr;
- genpaddr_t paddr;
- struct mem_region *mem_region;
-};
-
-static errval_t
-module_blob_map(const char *name, struct module_blob *blob)
-{
- errval_t err;
- struct mem_region *mem_region = multiboot_find_module(bi, name);
- if (mem_region == NULL) {
- return SPAWN_ERR_FIND_MODULE;
- }
-
- // it's cached already
- if (blob->vaddr != 0) {
- assert(mem_region == blob->mem_region);
- return SYS_ERR_OK;
- }
-
- blob->mem_region = mem_region;
- err = spawn_map_module(mem_region,
- &blob->size,
- &blob->vaddr,
- &blob->paddr);
-
- if (err_is_fail(err)) {
- return err_push(err, SPAWN_ERR_MAP_MODULE);
- } else {
- return SYS_ERR_OK;
- }
-}
-
-static errval_t
-cpu_memory_prepare(size_t *size,
- struct capref *cap_ret, void **buf_ret,
- struct frame_identity *frameid)
-{
- errval_t err;
- struct capref cap;
- void *buf;
-
- /* Currently, the app kernel can only be loaded in the first 2GB
- of memory. Further, it must not overlap the integer
- boundaries, i.e. 0-1, 1-2 */
-
- // FIXME:
- // The code below does not make sure that the kernel is loaded in the first
- // 2G, but the disabled code does not work. So using a simple frame_alloc()
- // for now.
- #if 0
- uint64_t old_minbase, old_maxlimit;
- ram_get_affinity(&old_minbase, &old_maxlimit);
- for (uint64_t minbase = 0, maxlimit = (uint64_t)1 << 30;
- minbase < (uint64_t)2 << 30;
- minbase += (uint64_t)1 << 30, maxlimit += (uint64_t)1 << 30) {
-
- printf("minbase=%llu maxlimit=%llu cpu_memory=%zd\n",
- minbase, maxlimit, cpu_memory);
- ram_set_affinity(minbase, maxlimit);
- err = frame_alloc(&cpu_memory_cap, cpu_memory, &cpu_memory);
- if (err_is_ok(err)) {
- goto done;
- }
- }
- USER_PANIC("No memory in the first 2GB, cannot continue booting cores");
- done:
- ram_set_affinity(old_minbase, old_maxlimit);
- #else
- err = frame_alloc(&cap, *size, size);
- if (err_is_fail(err)) {
- USER_PANIC("Failed to allocate %zd memory\n", *size);
- }
- #endif
-
-#ifdef __gem5__
- // XXX: We map the frame for the new kernel as uncacheable. Gem5 has a
- // problem when one core has cacheing on and writes to a location where an
- // other core reads from without caches enabled. On real hardware one could
- // clean/flush the cache, but Gem5 doesn't support cache maintenance
- // operations for ARM
- err = vspace_map_one_frame_attr(&buf, *size, cap,
- VREGION_FLAGS_READ_WRITE_NOCACHE,
- NULL, NULL);
-#else
- err = vspace_map_one_frame(&buf, *size, cap, NULL, NULL);
-#endif
- if (err_is_fail(err)) {
- return err_push(err, LIB_ERR_VSPACE_MAP);
- }
-
- // Mark memory as remote
- bool has_descendants;
- err = monitor_cap_remote(cap, true, &has_descendants);
- if (err_is_fail(err)) {
- return err;
- }
-
- err = invoke_frame_identify(cap, frameid);
- if (err_is_fail(err)) {
- return err_push(err, LIB_ERR_FRAME_IDENTIFY);
- }
-
- *cap_ret = cap;
- *buf_ret = buf;
- return SYS_ERR_OK;
-}
-
-static errval_t
-cpu_memory_cleanup(struct capref cap, void *buf)
-{
- errval_t err;
-
- err = vspace_unmap(buf);
- if (err_is_fail(err)) {
- USER_PANIC_ERR(err, "vspace unmap CPU driver memory failed");
- }
-
- // XXX: Should not delete the remote cap
- err = cap_destroy(cap);
- if (err_is_fail(err)) {
- USER_PANIC_ERR(err, "cap_destroy failed");
- }
-
- return SYS_ERR_OK;
-}
-
-static errval_t
-spawn_memory_prepare(size_t size, struct capref *cap_ret,
- struct frame_identity *frameid)
-{
- errval_t err;
- struct capref cap;
-
- err = frame_alloc(&cap, size, NULL);
- if (err_is_fail(err)) {
- return err_push(err, LIB_ERR_FRAME_ALLOC);
- }
-
- // Mark memory as remote
- bool has_descendants;
- err = monitor_cap_remote(cap, true, &has_descendants);
- if (err_is_fail(err)) {
- return err;
- }
-
- err = invoke_frame_identify(cap, frameid);
- if (err_is_fail(err)) {
- USER_PANIC_ERR(err, "frame_identify failed");
- }
-
- *cap_ret = cap;
- return SYS_ERR_OK;
-}
-
-static errval_t
-spawn_memory_cleanup(struct capref cap)
-{
-
- errval_t err;
- err = cap_destroy(cap);
- if (err_is_fail(err)) {
- USER_PANIC_ERR(err, "cap_destroy failed");
- }
-
- return SYS_ERR_OK;
-}
-
-static errval_t
-elf_load_and_relocate(lvaddr_t blob_start, size_t blob_size,
- void *to, lvaddr_t reloc_dest,
- uintptr_t *reloc_entry)
-{
- genvaddr_t entry; // entry poing of the loaded elf image
- struct Elf32_Ehdr *head = (struct Elf32_Ehdr *)blob_start;
- struct Elf32_Shdr *symhead, *rel, *symtab;
- errval_t err;
-
- //state.vbase = (void *)ROUND_UP(to, ARM_L1_ALIGN);
- struct monitor_allocate_state state;
- state.vbase = to;
- state.elfbase = elf_virtual_base(blob_start);
-
- err = elf_load(head->e_machine,
- monitor_elfload_allocate,
- &state,
- blob_start, blob_size,
- &entry);
- if (err_is_fail(err)) {
- return err;
- }
-
- // Relocate to new physical base address
- symhead = (struct Elf32_Shdr *)(blob_start + (uintptr_t)head->e_shoff);
- rel = elf32_find_section_header_type(symhead, head->e_shnum, SHT_REL);
- symtab = elf32_find_section_header_type(symhead, head->e_shnum, SHT_DYNSYM);
- assert(rel != NULL && symtab != NULL);
-
- elf32_relocate(reloc_dest, state.elfbase,
- (struct Elf32_Rel *)(blob_start + rel->sh_offset),
- rel->sh_size,
- (struct Elf32_Sym *)(blob_start + symtab->sh_offset),
- symtab->sh_size,
- state.elfbase, state.vbase);
-
- *reloc_entry = entry - state.elfbase + reloc_dest;
- return SYS_ERR_OK;
-}
-
-errval_t spawn_xcore_monitor(coreid_t coreid, int hwid, enum cpu_type cpu_type,
- const char *cmdline,
- struct intermon_binding **ret_binding)
-{
- const char *monitorname = NULL, *cpuname = NULL;
- uint32_t arch_page_size;
- errval_t err;
-
- arch_page_size = BASE_PAGE_SIZE;
- monitorname = "armv7/sbin/monitor";
-#if defined(__gem5__)
- cpuname = "armv7/sbin/cpu_arm_gem5";
-#elif defined(__pandaboard__)
- cpuname = "armv7/sbin/cpu_omap44xx";
-#else
-#error "unknown armv7 architecture"
-#endif
-
- // Get my arch ID
- uintptr_t my_arch_id = 0;
- err = invoke_monitor_get_arch_id(&my_arch_id);
- if (err_is_fail(err)) {
- return err;
- }
-
- // setup an intermon connection for the new core
- struct frame_identity urpc_frame_id;
- err = setup_intermon_connection(my_arch_id, hwid, ret_binding, &urpc_frame_id);
- if (err_is_fail(err)) {
- return err;
- }
-
- // map cpu and monitor module
- // XXX: caching these for now, until we have unmap
- static struct module_blob cpu_blob, monitor_blob;
- err = module_blob_map(cpuname, &cpu_blob);
- if (!err_is_ok(err)) {
- return err;
- }
- err = module_blob_map(monitorname, &monitor_blob);
- if (!err_is_ok(err)) {
- return err;
- }
-
- // allocate memory for cpu driver: we allocate a page for arm_core_data and
- // the reset for the elf image
- assert(sizeof(struct arm_core_data) <= arch_page_size);
- struct {
- size_t size;
- struct capref cap;
- void *buf;
- struct frame_identity frameid;
- } cpu_mem = {
- .size = arch_page_size + elf_virtual_size(cpu_blob.vaddr)
- };
- err = cpu_memory_prepare(&cpu_mem.size,
- &cpu_mem.cap,
- &cpu_mem.buf,
- &cpu_mem.frameid);
- if (!err_is_ok(err)) {
- return err;
- }
-
- // Load cpu driver to the allocate space and do relocatation
- uintptr_t reloc_entry;
- err = elf_load_and_relocate(cpu_blob.vaddr,
- cpu_blob.size,
- cpu_mem.buf + arch_page_size,
- cpu_mem.frameid.base + arch_page_size,
- &reloc_entry);
- if (!err_is_ok(err)) {
- return err_push(err, LIB_ERR_FRAME_IDENTIFY);
- }
-
- /* Chunk of memory to load monitor on the app core */
- struct capref spawn_mem_cap;
- struct frame_identity spawn_mem_frameid;
- err = spawn_memory_prepare(ARM_CORE_DATA_PAGES*arch_page_size,
- &spawn_mem_cap,
- &spawn_mem_frameid);
- if (!err_is_ok(err)) {
- return err;
- }
-
- /* Setup the core_data struct in the new kernel */
- struct arm_core_data *core_data = (struct arm_core_data *)cpu_mem.buf;
-
- struct Elf32_Ehdr *head32 = (struct Elf32_Ehdr *)cpu_blob.vaddr;
- core_data->elf.size = sizeof(struct Elf32_Shdr);
- core_data->elf.addr = cpu_blob.paddr + (uintptr_t)head32->e_shoff;
- core_data->elf.num = head32->e_shnum;
-
- core_data->module_start = cpu_blob.paddr;
- core_data->module_end = cpu_blob.paddr + cpu_blob.size;
- core_data->urpc_frame_base = urpc_frame_id.base;
- core_data->urpc_frame_bits = urpc_frame_id.bits;
- core_data->monitor_binary = monitor_blob.paddr;
- core_data->monitor_binary_size = monitor_blob.size;
- core_data->memory_base_start = spawn_mem_frameid.base;
- core_data->memory_bits = spawn_mem_frameid.bits;
- core_data->src_core_id = my_core_id;
- core_data->src_arch_id = my_arch_id;
- core_data->dst_core_id = coreid;
-#ifdef CONFIG_FLOUNDER_BACKEND_UMP_IPI
- core_data->chan_id = chanid;
-#endif
-
- if (cmdline != NULL) {
- // copy as much of command line as will fit
- strncpy(core_data->kernel_cmdline, cmdline,
- sizeof(core_data->kernel_cmdline));
- // ensure termination
- core_data->kernel_cmdline[sizeof(core_data->kernel_cmdline) - 1] = '\0';
- }
-
- /* Invoke kernel capability to boot new core */
- // XXX: Confusion address translation about l/gen/addr
- err = invoke_monitor_spawn_core(hwid, cpu_type, (forvaddr_t)reloc_entry);
- if (err_is_fail(err)) {
- return err_push(err, MON_ERR_SPAWN_CORE);
- }
-
- err = cpu_memory_cleanup(cpu_mem.cap, cpu_mem.buf);
- if (err_is_fail(err)) {
- return err;
- }
-
- err = spawn_memory_cleanup(spawn_mem_cap);
- if (err_is_fail(err)) {
- return err;
- }
-
- return SYS_ERR_OK;
-}
/**
* \brief Initialize monitor running on app cores
* \param dispatcher Cap to the dispatcher of the new user program
* \param entry Kernel entry point in physical memory
*/
-//XXX: workaround for inline bug of arm-gcc 4.6.1 and lower
-#if defined(__ARM_ARCH_7A__) && defined(__GNUC__) \
- && __GNUC__ == 4 && __GNUC_MINOR__ <= 6 && __GNUC_PATCHLEVEL__ <= 1
-static __attribute__((noinline, unused)) errval_t
-#else
static inline errval_t
-#endif
invoke_monitor_spawn_core(coreid_t core_id, enum cpu_type cpu_type,
forvaddr_t entry)
{