/*
- * Copyright (c) 2009, 2010, 2012, 2015, ETH Zurich.
+ * Copyright (c) 2009, 2010, 2012, 2015, 2016, ETH Zurich.
- * Copyright (c) 2015, Hewlett Packard Enterprise Development LP.
+ * Copyright (c) 2015, 2016 Hewlett Packard Enterprise Development LP.
* All rights reserved.
*
* This file is distributed under the terms in the attached LICENSE file.
/* well-known capabilities */
extern struct capref cap_root, cap_monitorep, cap_irq, cap_io, cap_dispatcher,
cap_selfep, cap_kernel, cap_initep, cap_perfmon, cap_dispframe,
- cap_sessionid, cap_ipi, cap_vroot;
+ cap_sessionid, cap_ipi, cap_vroot, cap_argcn;
/**
- * \brief Returns the number of valid bits in the CSpace address of a cap
+ * \brief Returns the depth in the CSpace address of a cap
*/
- static inline uint8_t get_cap_valid_bits(struct capref cap)
+ static inline uint8_t get_cap_level(struct capref cap)
{
- uint8_t sum = cap.cnode.address_bits + cap.cnode.guard_size +
- cap.cnode.size_bits;
- if (sum > CPTR_BITS) {
- return sum % CPTR_BITS;
+ if (capref_is_null(cap)) {
+ return 0;
} else {
- return sum;
+ return cap.cnode.level + 1;
}
}
--- /dev/null
+ /**
+ * \file
+ * \brief Architecture independent capability invocations
+ */
+
+ /*
+ * Copyright (c) 2016, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Universitaetstr 6, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+ #ifndef INVOCATIONS_H
+ #define INVOCATIONS_H
+
+ #include <barrelfish_kpi/dispatcher_shared.h>
+ #include <barrelfish_kpi/distcaps.h> // for distcap_state_t
+ #include <barrelfish/caddr.h>
+
+ #include <barrelfish/invocations_arch.h>
+
+ /**
+ * \brief Create a capability.
+ *
+ * Create a new capability of type 'type' and size 'objbits'. The new cap will
+ * be placed in the slot 'dest_slot' of the CNode located at 'dest_cnode_cptr'
+ * in the address space rooted at 'root'.
+ *
+ * See also cap_create(), which wraps this.
+ *
+ * \param root Capability of the CNode to invoke.
+ * \param type Kernel object type to create.
+ * \param objsize Size of created object
+ * (ignored for fixed-size objects)
+ * \param dest_cnode_cptr Address of CNode cap, where newly created cap will be
+ * placed into.
+ * \param dest_level Depth/level of destination CNode.
+ * \param dest_slot Slot in CNode cap to place new cap.
+ *
+ * \return Error code
+ */
+ static inline errval_t invoke_cnode_create(struct capref root,
+ enum objtype type, size_t objsize,
+ capaddr_t dest_cnode_cptr,
+ enum cnode_type dest_level,
+ capaddr_t dest_slot)
+ {
+ assert(dest_cnode_cptr != CPTR_NULL);
+ return cap_invoke6(root, CNodeCmd_Create, type, objsize, dest_cnode_cptr,
+ dest_level, dest_slot).error;
+ }
+
+ /**
+ * \brief "Mint" a capability.
+ *
+ * Copies CPtr 'from' into slot 'slot' in the CNode, addressed by 'to', within
+ * the address space, rooted at 'root' and with 'tobits' and 'frombits' address
+ * bits of 'to' and 'from' valid, respectively.
+ *
+ * See also cap_mint(), which wraps this.
+ *
+ * \param root Capability of the source cspace root CNode to invoke
+ * \param to_cspace Destination cspace cap address relative to source cspace
+ * \param to Destination CNode address relative to destination cspace
+ * \param slot Slot in destination CNode cap to place copy into
+ * \param from Address of cap to copy.
+ * \param tolevel Level/depth of 'to'.
+ * \param fromlevel Level/depth of 'from'.
+ * \param param1 1st cap-dependent parameter.
+ * \param param2 2nd cap-dependent parameter.
+ *
+ * \return Error code
+ */
+ static inline errval_t invoke_cnode_mint(struct capref root, capaddr_t to_cspace,
+ capaddr_t to, capaddr_t slot,
+ capaddr_t from_cspace, capaddr_t from,
+ enum cnode_type tolevel,
+ enum cnode_type fromlevel,
+ uint64_t param1, uint64_t param2)
+ {
+ return cap_invoke10(root, CNodeCmd_Mint, to_cspace, to, slot, from_cspace,
+ from, tolevel, fromlevel, param1, param2).error;
+ }
+
+ /**
+ * \brief Copy a capability.
+ *
+ * Copies CPtr 'from' into slot 'slot' in the CNode, addressed by 'to', within
+ * the address space, rooted at 'root' and with 'tobits' and 'frombits' address
+ * bits of 'to' and 'from' valid, respectively.
+ *
+ * See also cap_copy(), which wraps this.
+ *
+ * \param root Capability of the source cspace root CNode to invoke
+ * \param to_cspace Capability address of destination root cnode relative
+ * to our cspace
+ * \param to CNode address to place copy into relative to
+ * destination cspace.
+ * \param slot Slot in CNode cap to place copy into.
+ * \param from_cspace Capability address of source root cnode relative
+ * to our cspace
+ * \param from Address of cap to copy.
+ * \param tolevel Level/depth of 'to'.
+ * \param fromlevel Level/depth of 'from'.
+ *
+ * \return Error code
+ */
+ static inline errval_t invoke_cnode_copy(struct capref root, capaddr_t to_cspace,
+ capaddr_t to, capaddr_t slot,
+ capaddr_t from_cspace, capaddr_t from,
+ enum cnode_type tolevel,
+ enum cnode_type fromlevel)
+ {
+ return cap_invoke8(root, CNodeCmd_Copy, to_cspace, to, slot, from_cspace,
+ from, tolevel, fromlevel).error;
+ }
+
+ /**
+ * \brief Delete a capability.
+ *
+ * Delete the capability pointed to by 'cap', with 'bits' address bits
+ * of it valid, from the address space rooted at 'root'.
+ *
+ * \param root Capability of the CNode to invoke
+ * \param cap Address of cap to delete.
+ * \param level Level/depth of 'cap'.
+ *
+ * \return Error code
+ */
+ static inline errval_t invoke_cnode_delete(struct capref root, capaddr_t cap,
+ enum cnode_type level)
+ {
+ return cap_invoke3(root, CNodeCmd_Delete, cap, level).error;
+ }
+
+ static inline errval_t invoke_cnode_revoke(struct capref root, capaddr_t cap,
+ enum cnode_type level)
+ {
+ return cap_invoke3(root, CNodeCmd_Revoke, cap, level).error;
+ }
+
+ static inline errval_t invoke_cnode_get_state(struct capref root, capaddr_t cap,
+ enum cnode_type level, distcap_state_t *ret)
+ {
+ struct sysret sysret = cap_invoke3(root, CNodeCmd_GetState, cap, level);
+
+ assert(ret != NULL);
+ if (err_is_ok(sysret.error)) {
+ *ret = sysret.value;
+ }
+ else {
+ *ret = 0;
+ }
+ return sysret.error;
+ }
+
+ static inline errval_t invoke_cnode_resize(struct capref root, capaddr_t new_cptr,
+ capaddr_t retcn_ptr, cslot_t retslot)
+ {
+ return cap_invoke4(root, CNodeCmd_Resize, new_cptr, retcn_ptr, retslot).error;
+ }
+
+ static inline errval_t invoke_vnode_unmap(struct capref cap,
+ capaddr_t mapping_addr,
+ enum cnode_type level)
+ {
+ return cap_invoke3(cap, VNodeCmd_Unmap, mapping_addr, level).error;
+ }
+
+ /**
+ * \brief Return the physical address and size of a frame capability
+ *
+ * \param frame CSpace address of frame capability
+ * \param ret frame_identity struct filled in with relevant data
+ *
+ * \return Error code
+ */
+ static inline errval_t invoke_frame_identify(struct capref frame,
+ struct frame_identity *ret)
+ {
+ assert(ret != NULL);
+ assert(get_croot_addr(frame) == CPTR_ROOTCN);
+
+ struct sysret sysret = cap_invoke2(frame, FrameCmd_Identify, (uintptr_t)ret);
+
+ if (err_is_ok(sysret.error)) {
+ return sysret.error;
+ }
+
+ ret->base = 0;
+ ret->bytes = 0;
+ return sysret.error;
+ }
+
+ static inline errval_t invoke_vnode_identify(struct capref vnode,
+ struct vnode_identity *ret)
+ {
+ assert(get_croot_addr(vnode) == CPTR_ROOTCN);
+ struct sysret sysret = cap_invoke1(vnode, VNodeCmd_Identify);
+
+ assert(ret != NULL);
+ if (err_is_ok(sysret.error)) {
+ ret->base = sysret.value & (~BASE_PAGE_MASK);
+ ret->type = sysret.value & BASE_PAGE_MASK;
+ return sysret.error;
+ }
+
+ ret->base = 0;
+ ret->type = 0;
+ return sysret.error;
+ }
+
+ /**
+ * \brief Modify mapping flags on parts of a mapping
+ *
+ * \param mapping CSpace address of mapping capability
+ * \param off Offset (in #pages) of the first page to get new set of flags
+ * from the first page in the mapping identified by `frame`
+ * \param pages Number of pages that should get new set of flags
+ * \param flags New set of flags
+ * \param va_hint Hint for selective TLB flushing
+ *
+ * \return Error code
+ */
+ static inline errval_t invoke_mapping_modify_flags(struct capref mapping,
+ size_t offset,
+ size_t pages,
+ size_t flags,
+ genvaddr_t va_hint)
+ {
+ return cap_invoke5(mapping, MappingCmd_Modify, offset,
+ pages, flags, va_hint).error;
+ }
+
+ /**
+ * \brief Setup a dispatcher, possibly making it runnable
+ *
+ * \param dispatcher Address of dispatcher capability relative to own
+ * cspace
+ * \param domdispatcher Address of existing dispatcher for domain ID relative
+ * to own cspace
+ * \param cspace Root of CSpace for new dispatcher relative to own
+ * cspace
+ * \param vspace Root of VSpace for new dispatcher relative to cspace
+ * for new dispatcher.
+ * \param dispframe Frame capability for dispatcher structure relative to
+ * cspace for new dispatcher.
+ * \param run Make runnable if true
+ *
+ * Need to either supply caprefs for all or none of cspace, vspace, dispframe
+ * and domdispatcher.
+ *
+ * \return Error code
+ */
+ static inline errval_t
+ invoke_dispatcher(struct capref dispatcher, struct capref domdispatcher,
+ struct capref cspace, struct capref vspace,
+ struct capref dispframe, bool run)
+ {
+ assert(get_croot_addr(dispatcher) == CPTR_ROOTCN);
+ assert(capref_is_null(cspace) || get_croot_addr(cspace) == CPTR_ROOTCN);
+ assert(capref_is_null(domdispatcher) || get_croot_addr(domdispatcher) == CPTR_ROOTCN);
+ assert(capref_is_null(vspace) || get_croot_addr(vspace) == get_cap_addr(cspace));
+ assert(capref_is_null(dispframe) || get_croot_addr(dispframe) == get_cap_addr(cspace));
+
+ capaddr_t root_caddr = get_cap_addr(cspace);
+ uint8_t root_level = get_cap_level(cspace);
+ capaddr_t vtree_caddr = get_cap_addr(vspace);
+ capaddr_t disp_caddr = get_cap_addr(dispframe);
+ capaddr_t dd_caddr = get_cap_addr(domdispatcher);
+
+ return cap_invoke7(dispatcher, DispatcherCmd_Setup, root_caddr,
+ root_level, vtree_caddr, disp_caddr, run,
+ dd_caddr).error;
+ }
+
+ static inline errval_t
+ invoke_dispatcher_properties(struct capref dispatcher,
+ enum task_type type, unsigned long deadline,
+ unsigned long wcet, unsigned long period,
+ unsigned long release, unsigned short weight)
+ {
+ return cap_invoke7(dispatcher, DispatcherCmd_Properties, type, deadline,
+ wcet, period, release, weight).error;
+ }
+
+
+ static inline errval_t invoke_dispatcher_dump_ptables(struct capref dispcap)
+ {
+ return cap_invoke1(dispcap, DispatcherCmd_DumpPTables).error;
+ }
+
+ static inline errval_t invoke_dispatcher_dump_capabilities(struct capref dispcap)
+ {
+ return cap_invoke1(dispcap, DispatcherCmd_DumpCapabilities).error;
+ }
+
+ /**
+ * IRQ manipulations
+ */
+ static inline errval_t invoke_irqdest_connect(struct capref irqcap, struct capref epcap)
+ {
+ struct sysret ret = cap_invoke2(irqcap, IRQDestCmd_Connect, get_cap_addr(epcap));
+ return ret.error;
+ }
+
-static inline errval_t invoke_irqdest_get_vector(struct capref irqcap, uint32_t * out_vec)
++static inline errval_t invoke_irqdest_get_vector(struct capref irqcap, uint64_t * out_vec)
+ {
+ struct sysret ret = cap_invoke1(irqcap, IRQDestCmd_GetVector);
+ *out_vec = ret.value;
+ return ret.error;
+ }
+
-static inline errval_t invoke_irqsrc_get_vector(struct capref irqcap, uint32_t * out_vec)
++static inline errval_t invoke_irqdest_get_cpu(struct capref irqcap, uint64_t * out_cpu)
++{
++ struct sysret ret = cap_invoke1(irqcap, IRQDestCmd_GetCpu);
++ *out_cpu = ret.value;
++ return ret.error;
++}
++
++static inline errval_t invoke_irqsrc_get_vector(struct capref irqcap, uint64_t * out_vec)
+ {
+ struct sysret ret = cap_invoke1(irqcap, IRQSrcCmd_GetVector);
+ *out_vec = ret.value;
+ return ret.error;
+ }
+
+ static inline errval_t invoke_irqtable_alloc_dest_cap(struct capref irqcap, struct capref dest_cap)
+ {
+ uint8_t dcn_level = get_cnode_level(dest_cap);
+ capaddr_t dcn_addr = get_cnode_addr(dest_cap);
+ struct sysret ret = cap_invoke4(irqcap, IRQTableCmd_AllocDestCap,
+ dcn_level, dcn_addr, dest_cap.slot);
+ return ret.error;
+ }
+
+ /**
+ * Deprecated. Use invoke_irqtable_alloc_dest_cap
+ */
+ static inline errval_t invoke_irqtable_alloc_vector(struct capref irqcap, int *retirq)
+ {
+ struct sysret ret = cap_invoke1(irqcap, IRQTableCmd_Alloc);
+ if (err_is_ok(ret.error)) {
+ *retirq = ret.value;
+ } else {
+ *retirq = 0;
+ }
+ return ret.error;
+ }
+
+ static inline errval_t invoke_irqtable_set(struct capref irqcap, int irq,
+ struct capref ep)
+ {
+ return cap_invoke3(irqcap, IRQTableCmd_Set, irq, get_cap_addr(ep)).error;
+ }
+
+ static inline errval_t invoke_irqtable_delete(struct capref irqcap, int irq)
+ {
+ return cap_invoke2(irqcap, IRQTableCmd_Delete, irq).error;
+ }
+
+ /**
+ * \brief do a kernel cap invocation to get the core id
+ */
+ static inline errval_t invoke_kernel_get_core_id(struct capref kern_cap,
+ coreid_t *core_id)
+ {
+ assert(core_id != NULL);
+
+ struct sysret sysret = cap_invoke1(kern_cap, KernelCmd_Get_core_id);
+ if (sysret.error == SYS_ERR_OK) {
+ *core_id = sysret.value;
+ }
+ return sysret.error;
+ }
+
+ #endif // INVOCATIONS_H
*/
/*
- * Copyright (c) 2007-2012, ETH Zurich.
+ * Copyright (c) 2007-2012, 2016, ETH Zurich.
- * Copyright (c) 2015, Hewlett Packard Enterprise Development LP.
+ * Copyright (c) 2015, 2016 Hewlett Packard Enterprise Development LP.
* All rights reserved.
*
* This file is distributed under the terms in the attached LICENSE file.
static inline bool type_is_vnode(enum objtype type)
{
- STATIC_ASSERT(48 == ObjType_Num, "Check VNode definitions");
- STATIC_ASSERT(47 == ObjType_Num, "Check VNode definitions");
++ STATIC_ASSERT(49 == ObjType_Num, "Check VNode definitions");
return (type == ObjType_VNode_x86_64_pml4 ||
type == ObjType_VNode_x86_64_pdpt ||
);
}
+ static inline bool type_is_vroot(enum objtype type)
+ {
- STATIC_ASSERT(47 == ObjType_Num, "Check VNode definitions");
++ STATIC_ASSERT(49 == ObjType_Num, "Check VNode definitions");
+
+ return (type == ObjType_VNode_x86_64_pml4 ||
+ #ifdef CONFIG_PAE
+ type == ObjType_VNode_x86_32_pdpt ||
+ #else
+ type == ObjType_VNode_x86_32_pdir ||
+ #endif
+ type == ObjType_VNode_AARCH64_l1 ||
+ type == ObjType_VNode_ARM_l1
+ );
+ }
/**
* Return size of vnode in bits. This is the size of a page table page.
*
static inline size_t vnode_objbits(enum objtype type)
{
// This function should be emitted by hamlet or somesuch.
- STATIC_ASSERT(48 == ObjType_Num, "Check VNode definitions");
- STATIC_ASSERT(47 == ObjType_Num, "Check VNode definitions");
++ STATIC_ASSERT(49 == ObjType_Num, "Check VNode definitions");
if (type == ObjType_VNode_x86_64_pml4 ||
type == ObjType_VNode_x86_64_pdpt ||
static inline size_t vnode_objsize(enum objtype type)
{
// This function should be emitted by hamlet or somesuch.
- STATIC_ASSERT(48 == ObjType_Num, "Check VNode definitions");
- STATIC_ASSERT(47 == ObjType_Num, "Check VNode definitions");
++ STATIC_ASSERT(49 == ObjType_Num, "Check VNode definitions");
if (type == ObjType_VNode_x86_64_pml4 ||
type == ObjType_VNode_x86_64_pdpt ||
*/
static inline size_t vnode_entry_bits(enum objtype type) {
// This function should be emitted by hamlet or somesuch.
- STATIC_ASSERT(48 == ObjType_Num, "Check VNode definitions");
- STATIC_ASSERT(47 == ObjType_Num, "Check VNode definitions");
++ STATIC_ASSERT(49 == ObjType_Num, "Check VNode definitions");
if (type == ObjType_VNode_x86_64_pml4 ||
type == ObjType_VNode_x86_64_pdpt ||
return 0;
}
+ /**
+ * Return number of slots for cnode in bits.
+ * @param type Object type.
+ * @return Number of page table entries in bits
+ */
+ static inline size_t cnode_get_slots(struct capability *cnode) {
- STATIC_ASSERT(47 == ObjType_Num, "Check CNode definitions");
++ STATIC_ASSERT(49 == ObjType_Num, "Check CNode definitions");
+
+ switch (cnode->type) {
+ case ObjType_L1CNode:
+ return cnode->u.l1cnode.allocated_bytes / (1UL << OBJBITS_CTE);
+ case ObjType_L2CNode:
+ return L2_CNODE_SLOTS;
+ default:
+ assert(!"not a cnode");
+ return 0;
+ }
+ }
+
static inline enum objtype get_mapping_type(enum objtype captype)
{
- STATIC_ASSERT(48 == ObjType_Num, "Knowledge of all mapping types");
- STATIC_ASSERT(47 == ObjType_Num, "Knowledge of all mapping types");
++ STATIC_ASSERT(49 == ObjType_Num, "Knowledge of all mapping types");
switch (captype) {
case ObjType_Frame:
static inline bool type_is_mapping(enum objtype type)
{
- STATIC_ASSERT(48 == ObjType_Num, "Knowledge of all mapping types");
- STATIC_ASSERT(47 == ObjType_Num, "Knowledge of all mapping types");
++ STATIC_ASSERT(49 == ObjType_Num, "Knowledge of all mapping types");
switch (type) {
case ObjType_Frame_Mapping:
static inline bool type_is_mappable(enum objtype type)
{
- STATIC_ASSERT(48 == ObjType_Num, "Knowledge of all mapping types");
- STATIC_ASSERT(47 == ObjType_Num, "Knowledge of all mappable types");
++ STATIC_ASSERT(49 == ObjType_Num, "Knowledge of all mappable types");
switch (type) {
case ObjType_Frame:
* Predicates related to sharing capabilities
*/
- STATIC_ASSERT(48 == ObjType_Num, "Knowledge of all cap types");
-STATIC_ASSERT(47 == ObjType_Num, "Knowledge of all cap types");
++STATIC_ASSERT(49 == ObjType_Num, "Knowledge of all cap types");
static inline bool
distcap_needs_locality(enum objtype type)
{
}
}
- STATIC_ASSERT(48 == ObjType_Num, "Knowledge of all cap types");
-STATIC_ASSERT(47 == ObjType_Num, "Knowledge of all cap types");
++STATIC_ASSERT(49 == ObjType_Num, "Knowledge of all cap types");
static inline bool
distcap_is_moveable(enum objtype type)
{
#ifndef __BITMACROS_H
#define __BITMACROS_H
+#include <stdint.h>
+
+ /* A one-bit mask at bit n of type t */
+ #define BIT_T(t, n) ((t)1 << (n))
+
/* A one-bit mask at bit n */
- #define BIT(n) (1ULL << (n))
+ #define BIT(n) BIT_T(uint64_t, n)
+
+ /* An n-bit mask, beginning at bit 0 of type t */
+ #define MASK_T(t, n) (BIT_T(t, n) - 1)
/* An n-bit mask, beginning at bit 0 */
- #define MASK(n) (BIT(n) - 1)
+ #define MASK(n) MASK_T(uint64_t, n)
/* An n-bit field selector, beginning at bit m */
#define FIELD(m,n,x) (((x) >> m) & MASK(n))
"arch/armv7/kludges.c",
"arch/armv7/paging.c",
"arch/armv7/plat_a15mpcore.c",
+ "arch/armv7/plat_id.c",
"arch/armv7/plat_priv_cbar.c",
"arch/armv7/plat_vexpress.c",
+ "arch/armv7/plat_vexpress_consts.c",
"arch/armv7/startup_arch.c",
"arch/armv7/syscall.c",
+ "arch/arm/debug.c",
"arch/arm/dispatch.c",
"arch/arm/exec.c",
"arch/arm/exn.c",
"arch/armv7/kludges.c",
"arch/armv7/paging.c",
"arch/armv7/plat_a9mpcore.c",
+ "arch/armv7/plat_id.c",
"arch/armv7/plat_priv_cbar.c",
"arch/armv7/plat_vexpress.c",
+ "arch/armv7/plat_vexpress_consts.c",
"arch/armv7/startup_arch.c",
"arch/armv7/syscall.c",
+ "arch/arm/debug.c",
"arch/arm/dispatch.c",
"arch/arm/exec.c",
"arch/arm/exn.c",
#include <global.h>
#include <kcb.h>
+#include <efi.h>
+#include <arch/arm/gic.h>
+
- #define CNODE(cte) (cte)->cap.u.cnode.cnode
+ #define CNODE(cte) get_address(&(cte)->cap)
#define UNUSED(x) (x) = (x)
#define STARTUP_PROGRESS() debug(SUBSYS_STARTUP, "%s:%d\n", \
[ObjType_DevFrame] = {
[FrameCmd_Identify] = handle_frame_identify,
},
- [ObjType_CNode] = {
- [CNodeCmd_Copy] = handle_copy,
- [CNodeCmd_Mint] = handle_mint,
- [CNodeCmd_Retype] = handle_retype,
- [CNodeCmd_Delete] = handle_delete,
- [CNodeCmd_Revoke] = handle_revoke,
- [CNodeCmd_Create] = handle_create,
+ [ObjType_L1CNode] = {
+ [CNodeCmd_Copy] = handle_copy,
+ [CNodeCmd_Mint] = handle_mint,
+ [CNodeCmd_Retype] = handle_retype,
+ [CNodeCmd_Create] = handle_create,
+ [CNodeCmd_Delete] = handle_delete,
+ [CNodeCmd_Revoke] = handle_revoke,
+ [CNodeCmd_GetState] = handle_get_state,
+ [CNodeCmd_Resize] = handle_resize,
+ },
+ [ObjType_L2CNode] = {
+ [CNodeCmd_Copy] = handle_copy,
+ [CNodeCmd_Mint] = handle_mint,
+ [CNodeCmd_Retype] = handle_retype,
+ [CNodeCmd_Create] = handle_create,
+ [CNodeCmd_Delete] = handle_delete,
+ [CNodeCmd_Revoke] = handle_revoke,
[CNodeCmd_GetState] = handle_get_state,
+ [CNodeCmd_Resize] = handle_resize,
},
+ [ObjType_VNode_AARCH64_l0] = {
+ [VNodeCmd_Map] = handle_map,
+ [VNodeCmd_Unmap] = handle_unmap,
+ },
[ObjType_VNode_AARCH64_l1] = {
[VNodeCmd_Map] = handle_map,
[VNodeCmd_Unmap] = handle_unmap,
/**
* \brief Cleanup the last cap copy for an object and the object itself
*/
-STATIC_ASSERT(47 == ObjType_Num, "Knowledge of all RAM-backed cap types");
++STATIC_ASSERT(49 == ObjType_Num, "Knowledge of all RAM-backed cap types");
static errval_t
cleanup_last(struct cte *cte, struct cte *ret_ram_cap)
{
*/
/*
- * Copyright (c) 2007-2012,2015, ETH Zurich.
+ * Copyright (c) 2007-2012,2015,2016 ETH Zurich.
- * Copyright (c) 2015, Hewlett Packard Enterprise Development LP.
+ * Copyright (c) 2015, 2016 Hewlett Packard Enterprise Development LP.
* All rights reserved.
*
* This file is distributed under the terms in the attached LICENSE file.
struct capability monitor_ep;
- STATIC_ASSERT(48 == ObjType_Num, "Knowledge of all cap types");
-STATIC_ASSERT(47 == ObjType_Num, "Knowledge of all cap types");
++STATIC_ASSERT(49 == ObjType_Num, "Knowledge of all cap types");
int sprint_cap(char *buf, size_t len, struct capability *cap)
{
switch (cap->type) {
cap->u.physaddr.base, cap->u.physaddr.bytes);
case ObjType_RAM:
- return snprintf(buf, len, "RAM cap (0x%" PRIxGENPADDR ":0x%zx)",
+ return snprintf(buf, len, "RAM cap (0x%" PRIxGENPADDR ":0x%" PRIxGENSIZE ")",
cap->u.ram.base, cap->u.ram.bytes);
- case ObjType_CNode: {
- int ret = snprintf(buf, len, "CNode cap "
- "(bits %u, rights mask 0x%" PRIxCAPRIGHTS ")",
- cap->u.cnode.bits, cap->u.cnode.rightsmask);
- if (cap->u.cnode.guard_size != 0 && ret < len) {
- ret += snprintf(&buf[ret], len - ret, " (guard 0x%" PRIxCADDR ":%u)",
- cap->u.cnode.guard, cap->u.cnode.guard_size);
- }
+ case ObjType_L1CNode: {
+ int ret = snprintf(buf, len, "L1 CNode cap "
+ "(base=%#"PRIxGENPADDR", allocated bytes %#"PRIxGENSIZE
+ ", rights mask %#"PRIxCAPRIGHTS")",
+ get_address(cap), get_size(cap),
+ cap->u.l1cnode.rightsmask);
+ return ret;
+ }
+
+ case ObjType_L2CNode: {
+ int ret = snprintf(buf, len, "L2 CNode cap "
+ "(base=%#"PRIxGENPADDR", rights mask %#"PRIxCAPRIGHTS")",
+ get_address(cap), cap->u.l1cnode.rightsmask);
return ret;
}
// If you create more capability types you need to deal with them
// in the table below.
- STATIC_ASSERT(48 == ObjType_Num, "Knowledge of all cap types");
-STATIC_ASSERT(47 == ObjType_Num, "Knowledge of all cap types");
++STATIC_ASSERT(49 == ObjType_Num, "Knowledge of all cap types");
static size_t caps_max_numobjs(enum objtype type, gensize_t srcsize, gensize_t objsize)
{
switch(type) {
*
* For the meaning of the parameters, see the 'caps_create' function.
*/
- STATIC_ASSERT(48 == ObjType_Num, "Knowledge of all cap types");
-STATIC_ASSERT(47 == ObjType_Num, "Knowledge of all cap types");
++STATIC_ASSERT(49 == ObjType_Num, "Knowledge of all cap types");
static errval_t caps_zero_objects(enum objtype type, lpaddr_t lpaddr,
gensize_t objsize, size_t count)
*/
// If you create more capability types you need to deal with them
// in the table below.
- STATIC_ASSERT(48 == ObjType_Num, "Knowledge of all cap types");
-STATIC_ASSERT(47 == ObjType_Num, "Knowledge of all cap types");
++STATIC_ASSERT(49 == ObjType_Num, "Knowledge of all cap types");
static errval_t caps_create(enum objtype type, lpaddr_t lpaddr, gensize_t size,
gensize_t objsize, size_t count, coreid_t owner,
//{{{1 Capability creation
/// check arguments, return true iff ok
-STATIC_ASSERT(47 == ObjType_Num, "Knowledge of all cap types");
++STATIC_ASSERT(49 == ObjType_Num, "Knowledge of all cap types");
static bool check_caps_create_arguments(enum objtype type,
size_t bytes, size_t objsize,
bool exact)
return SYS_ERR_OK;
}
- STATIC_ASSERT(48 == ObjType_Num, "Knowledge of all cap types");
-STATIC_ASSERT(47 == ObjType_Num, "Knowledge of all cap types");
++STATIC_ASSERT(49 == ObjType_Num, "Knowledge of all cap types");
/// Retype caps
/// Create `count` new caps of `type` from `offset` in src, and put them in
/// `dest_cnode` starting at `dest_slot`.
// check that size is multiple of BASE_PAGE_SIZE for mappable types
if (type_is_mappable(type) && objsize % BASE_PAGE_SIZE != 0) {
- printk(LOG_WARN, "%s: objsize = %" PRIuGENSIZE "\n", __FUNCTION__, objsize);
- debug(SUBSYS_CAPS, "%s: objsize = %zu\n", __FUNCTION__, objsize);
++ debug(SUBSYS_CAPS, "%s: objsize = %"PRIuGENSIZE"\n", __FUNCTION__, objsize);
return SYS_ERR_INVALID_SIZE;
}
-- // CNode is special for now, as we still specify CNode size in #slots
-- // expressed as 2^bits
- else if (type == ObjType_CNode &&
- ((objsize * sizeof(struct cte)) % BASE_PAGE_SIZE != 0))
+ else if (type == ObjType_L1CNode && objsize % OBJSIZE_L2CNODE != 0)
{
- printk(LOG_WARN, "%s: L1CNode: objsize = %zu\n", __FUNCTION__, objsize);
+ printk(LOG_WARN, "%s: CNode: objsize = %" PRIuGENSIZE "\n", __FUNCTION__, objsize);
return SYS_ERR_INVALID_SIZE;
}
- // TODO: clean up semantics for type == ObjType_CNode
- assert((type == ObjType_CNode
- && ((objsize * sizeof(struct cte)) % BASE_PAGE_SIZE == 0)) ||
- (type_is_mappable(type) && objsize % BASE_PAGE_SIZE == 0) ||
- !(type_is_mappable(type) || type == ObjType_CNode));
+ else if (type == ObjType_L2CNode && objsize != OBJSIZE_L2CNODE)
+ {
- printk(LOG_WARN, "%s: L2CNode: objsize = %zu\n", __FUNCTION__, objsize);
++ printk(LOG_WARN, "%s: L2CNode: objsize = %"PRIuGENSIZE"\n", __FUNCTION__, objsize);
+ return SYS_ERR_INVALID_SIZE;
+ }
+ assert((type_is_mappable(type) && objsize % BASE_PAGE_SIZE == 0) ||
+ (type == ObjType_L1CNode && objsize % OBJSIZE_L2CNODE == 0 &&
+ objsize >= OBJSIZE_L2CNODE) ||
+ (type == ObjType_L2CNode && objsize == OBJSIZE_L2CNODE) ||
+ !type_is_mappable(type));
/* No explicit retypes to Mapping allowed */
if (type_is_mapping(type)) {
}
/// Create copies to a cte
-STATIC_ASSERT(47 == ObjType_Num, "Knowledge of all cap types");
++STATIC_ASSERT(49 == ObjType_Num, "Knowledge of all cap types");
errval_t caps_copy_to_cte(struct cte *dest_cte, struct cte *src_cte, bool mint,
uintptr_t param1, uintptr_t param2)
{
.slot = TASKCN_SLOT_DISPFRAME
};
- #define ROOT_CNODE_INIT { \
- .address = CPTR_ROOTCN, \
- .address_bits = CPTR_BITS, \
- .size_bits = DEFAULT_CNODE_BITS, \
- .guard_size = 0 }
-
+/// Capability for ArgSpace
+struct capref cap_argcn = {
+ .cnode = ROOT_CNODE_INIT,
+ .slot = ROOTCN_SLOT_ARGCN
+};
+
/// Capability for monitor endpoint
struct capref cap_monitorep = {
- .cnode = ROOT_CNODE_INIT,
- .slot = ROOTCN_SLOT_MONITOREP
+ .cnode = TASK_CNODE_INIT,
+ .slot = TASKCN_SLOT_MONITOREP
};
/// Capability for kernel (only in monitor)
/**
* \brief Function to do the actual printing based on the type of capability
*/
- STATIC_ASSERT(48 == ObjType_Num, "Knowledge of all cap types");
-STATIC_ASSERT(47 == ObjType_Num, "Knowledge of all cap types");
++STATIC_ASSERT(49 == ObjType_Num, "Knowledge of all cap types");
int debug_print_cap(char *buf, size_t len, struct capability *cap)
{
switch (cap->type) {
cap->u.physaddr.base, cap->u.physaddr.bytes);
case ObjType_RAM:
- return snprintf(buf, len, "RAM cap (0x%" PRIxGENPADDR ":0x%zx)",
+ return snprintf(buf, len, "RAM cap (0x%" PRIxGENPADDR ":0x%" PRIuGENSIZE ")",
cap->u.ram.base, cap->u.ram.bytes);
- case ObjType_CNode: {
- int ret = snprintf(buf, len, "CNode cap "
- "(bits %u, rights mask 0x%" PRIxCAPRIGHTS ")",
- cap->u.cnode.bits, cap->u.cnode.rightsmask);
- if (cap->u.cnode.guard_size != 0 && ret < len) {
- ret += snprintf(&buf[ret], len - ret, " (guard 0x%" PRIxCADDR ":%u)",
- cap->u.cnode.guard, cap->u.cnode.guard_size);
- }
+ case ObjType_L1CNode: {
+ int ret = snprintf(buf, len, "L1 CNode cap "
+ "(allocated bytes %#"PRIxGENSIZE
+ ", rights mask %#"PRIxCAPRIGHTS")",
+ cap->u.l1cnode.allocated_bytes, cap->u.l1cnode.rightsmask);
+ return ret;
+ }
+
+ case ObjType_L2CNode: {
+ int ret = snprintf(buf, len, "L2 CNode cap "
+ "(rights mask %#"PRIxCAPRIGHTS")",
+ cap->u.l1cnode.rightsmask);
return ret;
}
cap->u.vnode_arm_l2_mapping.pte,
cap->u.vnode_arm_l2_mapping.pte_count);
+ case ObjType_VNode_AARCH64_l0_Mapping:
+ return snprintf(buf, len, "AARCH64 l0 Mapping (AARCH64 l0 cap @0x%p, "
+ "pte @0x%"PRIxLVADDR", pte_count=%hu)",
+ cap->u.vnode_aarch64_l0_mapping.frame,
+ cap->u.vnode_aarch64_l0_mapping.pte,
+ cap->u.vnode_aarch64_l0_mapping.pte_count);
+
case ObjType_VNode_AARCH64_l1_Mapping:
- return snprintf(buf, len, "AARCH64 l1 Mapping (AARCH64 l1 cap @0x%p, "
+ return snprintf(buf, len, "AARCH64 l1 Mapping (AARCH64 l1 cap @%p, "
"pte @0x%"PRIxLVADDR", pte_count=%hu)",
cap->u.vnode_aarch64_l1_mapping.frame,
cap->u.vnode_aarch64_l1_mapping.pte,
mdb_dump(mdb_root, 0);
}
-STATIC_ASSERT(47 == ObjType_Num, "Knowledge of all cap types");
++STATIC_ASSERT(49 == ObjType_Num, "Knowledge of all cap types");
static void print_cte(struct cte *cte, char *indent_buff)
{
struct mdbnode *node = N(cte);
return SYS_ERR_OK;
}
-errval_t pci_register_driver_movable_irq(pci_driver_init_fn init_func, uint32_t class,
- uint32_t subclass, uint32_t prog_if,
- uint32_t vendor, uint32_t device,
- uint32_t bus, uint32_t dev, uint32_t fun,
- interrupt_handler_fn handler,
- void *handler_arg,
- interrupt_handler_fn reloc_handler,
- void *reloc_handler_arg)
-{
- pci_caps_per_bar_t *caps_per_bar = NULL;
- uint8_t nbars;
- errval_t err, msgerr;
-
- err = pci_client->vtbl.
- init_pci_device(pci_client, class, subclass, prog_if, vendor,
- device, bus, dev, fun, &msgerr,
- &nbars, &caps_per_bar);
- if (err_is_fail(err)) {
+static errval_t check_src_capability(struct capref irq_src_cap){
+ struct capability irq_src_cap_data;
+ errval_t err;
+ err = debug_cap_identify(irq_src_cap, &irq_src_cap_data);
+ if(err_is_fail(err)){
+ DEBUG_ERR(err, "Could not identify cap?");
return err;
- } else if (err_is_fail(msgerr)) {
- free(caps_per_bar);
- return msgerr;
}
+ if(irq_src_cap_data.type != ObjType_IRQSrc){
+ PCI_CLIENT_DEBUG("First cap argument ist not of type IRQSrc (is=%d)."
+ "Driver not started by kaluga?\n", irq_src_cap_data.type);
+ return SYS_ERR_IRQ_NOT_IRQ_TYPE;
+ }
+ return SYS_ERR_OK;
+}
+
+/**
+ * This function does all the interrupt routing setup. It uses the interrupt source
+ * capability passed from kaluga out of the cspace.
+ * It allocates an interrupt destination capability from the monitor.
+ * It sets up a route between these two using the interrupt routing service
+ * It registers the handler passed as an argument as handler for the int destination
+ * capability.
+ * Finally, it instructs the PCI service to activate interrupts for this card.
+ */
+static errval_t setup_int_routing(int irq_idx, interrupt_handler_fn handler,
+ void *handler_arg,
+ interrupt_handler_fn reloc_handler,
+ void *reloc_handler_arg){
+ errval_t err, msgerr;
+ // We use the first passed vector of the device,
+ // for backward compatibility with function interface.
struct capref irq_src_cap;
- irq_src_cap.cnode = build_cnoderef(cap_argcn, DEFAULT_CNODE_BITS);
++ irq_src_cap.cnode = build_cnoderef(cap_argcn, CNODE_TYPE_OTHER);
+ irq_src_cap.slot = irq_idx;
- // Get vector 0 of the device.
- // For backward compatibility with function interface.
- err = pci_client->vtbl.get_irq_cap(pci_client, 0, &msgerr, &irq_src_cap);
- if (err_is_fail(err) || err_is_fail(msgerr)) {
- if (err_is_ok(err)) {
- err = msgerr;
- }
- DEBUG_ERR(err, "requesting cap for IRQ 0 of device");
- goto out;
+ err = check_src_capability(irq_src_cap);
+ if(err_is_fail(err)){
+ USER_PANIC_ERR(err, "No interrupt capability");
+ return err;
}
- uint32_t gsi = INVALID_VECTOR;
+ uint64_t gsi = INVALID_VECTOR;
err = invoke_irqsrc_get_vector(irq_src_cap, &gsi);
if (err_is_fail(err)) {
DEBUG_ERR(err, "Could not lookup GSI vector");
bar->frame_cap[nc] = cap;
if (nc == 0) {
struct frame_identity id = { .base = 0, .bytes = 0 };
- err = invoke_frame_identify(cap, &id);
- frame_identify(cap, &id);
++ err = frame_identify(cap, &id);
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "frame identify failed.");
+ }
bar->paddr = id.base;
bar->bits = log2ceil(id.bytes);
bar->bytes = id.bytes * ncaps;
MENUFILE=""
ARCH=""
DEBUG_SCRIPT=""
- SMP=2
+ # Grab SMP from env, if unset default to 2
+ SMP=${SMP:-2}
+
usage () {
echo "Usage: $0 --menu <file> --arch <arch> [options]"
echo " where:"
as = AcpiLoadTables();
if (ACPI_FAILURE(as)) {
ACPI_DEBUG("AcpiLoadTables failed %s\n", AcpiFormatException(as));
- return -1;
+ return as;
}
- ACPI_DEBUG("Scanning local and I/O APICs...\n");
- int r = init_all_apics();
+ ACPI_DEBUG("Scanning interrupt sources...\n");
+ int r = init_all_interrupt_sources();
assert(r == 0);
#ifdef USE_KALUGA_DVM
as = AcpiEnableSubsystem(ACPI_FULL_INITIALIZATION);
if (ACPI_FAILURE(as)) {
ACPI_DEBUG("AcpiEnableSubsystem failed %s\n", AcpiFormatException(as));
- return -1;
+ return as;
}
- // find and init any embedded controller drivers
- // we do this early, because control methods may need to access the EC space
- ec_probe_ecdt();
-
- as = AcpiInitializeObjects(ACPI_FULL_INITIALIZATION);
- if (ACPI_FAILURE(as)) {
- ACPI_DEBUG("AcpiInitializeObjects failed\n");
- return as;
- }
-
- if (!vtd_force_off) {
- vtd_init();
- }
- return 0;
+ return acpi_arch_init();
}
/**
--- /dev/null
+/**
+ * \file
+ * \brief PCI
+ */
+
+/*
+ * Copyright (c) 2007, 2008, 2009, 2011, 2016 ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Universitaetsstrasse 6, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#include <barrelfish/barrelfish.h>
+#include <barrelfish/capabilities.h>
+#include <barrelfish/nameservice_client.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <mm/mm.h>
+#include <if/monitor_blocking_rpcclient_defs.h>
+
+#include <octopus/octopus.h>
+#include <skb/skb.h>
+
+#include "acpi_debug.h"
+#include "acpi_shared.h"
+
- /**
- * Number of slots in the cspace allocator.
- * Keep it as a power of two and not smaller than DEFAULT_CNODE_SLOTS.
- */
- #define PCI_CNODE_SLOTS 2048
-
+uintptr_t my_hw_id;
+
+// Memory allocator instance for physical address regions and platform memory
+struct mm pci_mm_physaddr;
+
+// BIOS Copy
+struct capref physical_caps;
+struct capref my_devframes_cnode;
+
+// XXX should be from offests
+#define PADDR_SPACE_SIZE_BITS 48
+
+static errval_t init_allocators(void)
+{
+ errval_t err, msgerr;
+
+ struct monitor_blocking_rpc_client *cl = get_monitor_blocking_rpc_client();
+ assert(cl != NULL);
+
+ // Get the bootinfo and map it in.
+ struct capref bootinfo_frame;
+ size_t bootinfo_size;
+ struct bootinfo *bootinfo;
+
+ msgerr = cl->vtbl.get_bootinfo(cl, &err, &bootinfo_frame, &bootinfo_size);
+ if (err_is_fail(msgerr) || err_is_fail(err)) {
+ USER_PANIC_ERR(err_is_fail(msgerr) ? msgerr : err, "failed in get_bootinfo");
+ }
+
+ err = vspace_map_one_frame((void**)&bootinfo, bootinfo_size, bootinfo_frame,
+ NULL, NULL);
+ assert(err_is_ok(err));
+
+ /* Initialize the memory allocator to handle PhysAddr caps */
+ static struct range_slot_allocator devframes_allocator;
- err = range_slot_alloc_init(&devframes_allocator, PCI_CNODE_SLOTS, NULL);
++ err = range_slot_alloc_init(&devframes_allocator, L2_CNODE_SLOTS, NULL);
+ if (err_is_fail(err)) {
+ return err_push(err, LIB_ERR_SLOT_ALLOC_INIT);
+ }
+
+ /* This next parameter is important. It specifies the maximum
+ * amount that a cap may be "chunked" (i.e. broken up) at each
+ * level in the allocator. Setting it higher than 1 reduces the
+ * memory overhead of keeping all the intermediate caps around,
+ * but leads to problems if you chunk up a cap too small to be
+ * able to allocate a large subregion. This caused problems
+ * for me with a large framebuffer... -AB 20110810 */
+ err = mm_init(&pci_mm_physaddr, ObjType_DevFrame, 0, PADDR_SPACE_SIZE_BITS,
+ 1, slab_default_refill, slot_alloc_dynamic,
+ &devframes_allocator, false);
+ if (err_is_fail(err)) {
+ return err_push(err, MM_ERR_MM_INIT);
+ }
+
+ // XXX: The code below is confused about gen/l/paddrs.
+ // Caps should be managed in genpaddr, while the bus mgmt must be in lpaddr.
++
++ // Here we get a cnode cap, so we need to put it somewhere in the root cnode
++ // As we already have a reserved slot for a phyaddr caps cnode, we put it there
++
+ errval_t error_code;
+ struct capref requested_caps;
+ err = cl->vtbl.get_phyaddr_cap(cl, &requested_caps, &error_code);
+ assert(err_is_ok(err) && err_is_ok(error_code));
+ physical_caps = requested_caps;
+
++ struct capref pacn = {
++ .cnode = cnode_root,
++ .slot = ROOTCN_SLOT_PACN
++ };
++ // Move phyaddr cap to ROOTCN_SLOT_PACN to conform to 2 level cspace
++ err = cap_copy(pacn, requested_caps);
++ assert(err_is_ok(err));
++
+ // Build the capref for the first physical address capability
+ struct capref phys_cap;
- phys_cap.cnode = build_cnoderef(requested_caps, PHYSADDRCN_BITS);
++ phys_cap.cnode = build_cnoderef(pacn, CNODE_TYPE_OTHER);
+ phys_cap.slot = 0;
+
+ struct cnoderef devcnode;
- err = slot_alloc(&my_devframes_cnode);
- assert(err_is_ok(err));
- cslot_t slots;
- err = cnode_create(&my_devframes_cnode, &devcnode, 255, &slots);
++ err = cnode_create_l2(&my_devframes_cnode, &devcnode);
+ if (err_is_fail(err)) { USER_PANIC_ERR(err, "cnode create"); }
+ struct capref devframe;
+ devframe.cnode = devcnode;
+ devframe.slot = 0;
+
++ if (bootinfo->regions_length > L2_CNODE_SLOTS) {
++ USER_PANIC("boot info has more regions (%d) than fit into L2 CNode (%d)",
++ bootinfo->regions_length, L2_CNODE_SLOTS);
++ }
++
+ for (int i = 0; i < bootinfo->regions_length; i++) {
+ struct mem_region *mrp = &bootinfo->regions[i];
+ if (mrp->mr_type == RegionType_Module) {
+ skb_add_fact("memory_region(16'%" PRIxGENPADDR ",%u,%zu,%u,%tu).",
+ mrp->mr_base, 0, mrp->mrmod_size, mrp->mr_type,
+ mrp->mrmod_data);
+ }
+ else {
+ skb_add_fact("memory_region(16'%" PRIxGENPADDR ",%u,%zu,%u,%tu).",
+ mrp->mr_base, 0, mrp->mr_bytes, mrp->mr_type,
+ mrp->mrmod_data);
+ }
+
+ if (mrp->mr_type == RegionType_PhyAddr ||
+ mrp->mr_type == RegionType_PlatformData) {
+ ACPI_DEBUG("Region %d: %"PRIxGENPADDR" - %"PRIxGENPADDR" (%lu) bytes %s\n",
+ i, mrp->mr_base, mrp->mr_base + mrp->mr_bytes, mrp->mr_bytes,
+ mrp->mr_type == RegionType_PhyAddr ?
+ "physical address" : "platform data");
+
+ err = cap_retype(devframe, phys_cap, 0, ObjType_DevFrame, mrp->mr_bytes, 1);
+ if (err_is_ok(err)) {
+ err = mm_add_multi(&pci_mm_physaddr, devframe, mrp->mr_bytes,
+ mrp->mr_base);
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "adding region %d FAILED\n", i);
+ }
+ } else {
+ if (err_no(err) == SYS_ERR_REVOKE_FIRST) {
+ printf("cannot retype region %d: need to revoke first; ignoring it\n", i);
+ } else {
+ USER_PANIC_ERR(err, "error in retype\n");
+ }
+ }
+ devframe.slot++;
+ phys_cap.slot++;
+ }
+
+ if (mrp->mr_type == RegionType_ACPI_TABLE) {
+ debug_printf("FOUND ACPI TABLE: %lx\n", mrp->mr_base);
+ AcpiOsSetRootPointer(mrp->mr_base);
+ }
+
+ }
+
+ return SYS_ERR_OK;
+}
+
+static errval_t setup_skb_info(void)
+{
+ skb_execute("[pci_queries].");
+ errval_t err = skb_read_error_code();
+ if (err_is_fail(err)) {
+ ACPI_DEBUG("\npcimain.c: Could not load pci_queries.pl.\n"
+ "SKB returned: %s\nSKB error: %s\n",
+ skb_get_output(), skb_get_error_output());
+ return err;
+ }
+
+ skb_add_fact("mem_region_type(%d,ram).", RegionType_Empty);
+ skb_add_fact("mem_region_type(%d,roottask).", RegionType_RootTask);
+ skb_add_fact("mem_region_type(%d,phyaddr).", RegionType_PhyAddr);
+ skb_add_fact("mem_region_type(%d,multiboot_module).", RegionType_Module);
+ skb_add_fact("mem_region_type(%d,platform_data).", RegionType_PlatformData);
+ skb_add_fact("mem_region_type(%d,apic).", RegionType_LocalAPIC);
+ skb_add_fact("mem_region_type(%d,ioapic).", RegionType_IOAPIC);
+
+ return err;
+}
+
+int main(int argc, char *argv[])
+{
+ errval_t err;
+
+ // Parse CMD Arguments
+ bool got_apic_id = false;
+ for (int i = 1; i < argc; i++) {
+ if(sscanf(argv[i], "apicid=%" PRIuPTR, &my_hw_id) == 1) {
+ got_apic_id = true;
+ } else {
+ debug_printf("unkown argument: '%s'\n", argv[i]);
+ }
+ }
+
+ if(got_apic_id == false) {
+ fprintf(stderr, "Usage: %s APIC_ID\n", argv[0]);
+ fprintf(stderr, "Wrong monitor version?\n");
+ return EXIT_FAILURE;
+ }
+
+ err = oct_init();
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "Initialize dist");
+ }
+
+ //connect to the SKB
+ ACPI_DEBUG("acpi: connecting to the SKB...\n");
+ err = skb_client_connect();
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "Connecting to SKB failed.");
+ }
+
+ skb_execute("[pci_queries].");
+
+ err = setup_skb_info();
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "Populating SKB failed.");
+ }
+
+ err = init_allocators();
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "Init memory allocator");
+ }
+
+ int r = init_acpi();
+ assert(r == 0);
+
+ start_service();
+
+ messages_handler_loop();
+}
--- /dev/null
+/**
+ * \file
+ * \brief PCI
+ */
+
+/*
+ * Copyright (c) 2007, 2008, 2009, 2011, 2016 ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Universitaetsstrasse 6, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#include <barrelfish/barrelfish.h>
+#include <barrelfish/capabilities.h>
+#include <barrelfish/nameservice_client.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <mm/mm.h>
+#include <if/monitor_blocking_rpcclient_defs.h>
+
+#include <octopus/octopus.h>
+#include <skb/skb.h>
+
+#include "acpi_debug.h"
+#include "acpi_shared.h"
+
+#include "pcilnk_controller_client.h"
+#include "ioapic_controller_client.h"
+
- /**
- * Number of slots in the cspace allocator.
- * Keep it as a power of two and not smaller than DEFAULT_CNODE_SLOTS.
- */
- #define PCI_CNODE_SLOTS 2048
+
+uintptr_t my_hw_id;
+
+bool vtd_force_off;
+
+// Memory allocator instance for physical address regions and platform memory
+struct mm pci_mm_physaddr;
+
+// BIOS Copy
+struct capref biosmem;
+struct capref physical_caps;
+struct capref my_devframes_cnode;
+
+static errval_t copy_bios_mem(void) {
+ errval_t err = SYS_ERR_OK;
+
+ // Get a copy of the VBE BIOS before ACPI touches it
+ struct capref bioscap;
+
+ err = mm_alloc_range(&pci_mm_physaddr, BIOS_BITS, 0,
+ 1UL << BIOS_BITS, &bioscap, NULL);
+ assert(err_is_ok(err));
+
+ void *origbios;
+ struct vregion *origbios_vregion;
+ err = vspace_map_one_frame(&origbios, 1 << BIOS_BITS, bioscap,
+ NULL, &origbios_vregion);
+ assert(err_is_ok(err));
+
+ err = frame_alloc(&biosmem, 1 << BIOS_BITS, NULL);
+ assert(err_is_ok(err));
+
+ void *newbios;
+ struct vregion *newbios_vregion;
+ err = vspace_map_one_frame(&newbios, 1 << BIOS_BITS, biosmem,
+ NULL, &newbios_vregion);
+ assert(err_is_ok(err));
+
+ memcpy(newbios, origbios, 1 << BIOS_BITS);
+
+ // Unmap both vspace regions again
+ vregion_destroy(origbios_vregion);
+ vregion_destroy(newbios_vregion);
+
+ err = mm_free(&pci_mm_physaddr, bioscap, 0, BIOS_BITS);
+ assert(err_is_ok(err));
+
+ return err;
+}
+
+static errval_t init_allocators(void)
+{
+ errval_t err, msgerr;
+
+ struct monitor_blocking_rpc_client *cl = get_monitor_blocking_rpc_client();
+ assert(cl != NULL);
+
+ // Get the bootinfo and map it in.
+ struct capref bootinfo_frame;
+ size_t bootinfo_size;
+ struct bootinfo *bootinfo;
+
+ msgerr = cl->vtbl.get_bootinfo(cl, &err, &bootinfo_frame, &bootinfo_size);
+ if (err_is_fail(msgerr) || err_is_fail(err)) {
+ USER_PANIC_ERR(err_is_fail(msgerr) ? msgerr : err, "failed in get_bootinfo");
+ }
+
+ err = vspace_map_one_frame((void**)&bootinfo, bootinfo_size, bootinfo_frame,
+ NULL, NULL);
+ assert(err_is_ok(err));
+
+ /* Initialize the memory allocator to handle PhysAddr caps */
+ static struct range_slot_allocator devframes_allocator;
- err = range_slot_alloc_init(&devframes_allocator, PCI_CNODE_SLOTS, NULL);
++ err = range_slot_alloc_init(&devframes_allocator, L2_CNODE_SLOTS, NULL);
+ if (err_is_fail(err)) {
+ return err_push(err, LIB_ERR_SLOT_ALLOC_INIT);
+ }
+
+ err = mm_init(&pci_mm_physaddr, ObjType_DevFrame, 0, 48,
+ /* This next parameter is important. It specifies the maximum
+ * amount that a cap may be "chunked" (i.e. broken up) at each
+ * level in the allocator. Setting it higher than 1 reduces the
+ * memory overhead of keeping all the intermediate caps around,
+ * but leads to problems if you chunk up a cap too small to be
+ * able to allocate a large subregion. This caused problems
+ * for me with a large framebuffer... -AB 20110810 */
+ 1, /*was DEFAULT_CNODE_BITS,*/
- slab_default_refill, slot_alloc_dynamic, &devframes_allocator, false);
++ slab_default_refill, slot_alloc_dynamic,
++ slot_refill_dynamic, &devframes_allocator, false);
+ if (err_is_fail(err)) {
+ return err_push(err, MM_ERR_MM_INIT);
+ }
+
+ // Request I/O Cap
+ struct capref requested_caps;
+ errval_t error_code;
+ err = cl->vtbl.get_io_cap(cl, &requested_caps, &error_code);
+ assert(err_is_ok(err) && err_is_ok(error_code));
+ // Copy into correct slot
+ struct capref caps_io = {
+ .cnode = cnode_task,
+ .slot = TASKCN_SLOT_IO
+ };
+ err = cap_copy(caps_io, requested_caps);
+
+ // XXX: The code below is confused about gen/l/paddrs.
+ // Caps should be managed in genpaddr, while the bus mgmt must be in lpaddr.
++
++ // Here we get a cnode cap, so we need to put it somewhere in the root cnode
++ // As we already have a reserved slot for a phyaddr caps cnode, we put it there
+ err = cl->vtbl.get_phyaddr_cap(cl, &requested_caps, &error_code);
+ assert(err_is_ok(err) && err_is_ok(error_code));
+ physical_caps = requested_caps;
+
++ struct capref pacn = {
++ .cnode = cnode_root,
++ .slot = ROOTCN_SLOT_PACN
++ };
++ // Move phyaddr cap to ROOTCN_SLOT_PACN to conform to 2 level cspace
++ err = cap_copy(pacn, requested_caps);
++ assert(err_is_ok(err));
++
+ // Build the capref for the first physical address capability
+ struct capref phys_cap;
- phys_cap.cnode = build_cnoderef(requested_caps, PHYSADDRCN_BITS);
++ phys_cap.cnode = build_cnoderef(pacn, CNODE_TYPE_OTHER);
+ phys_cap.slot = 0;
+
+ struct cnoderef devcnode;
- err = slot_alloc(&my_devframes_cnode);
- assert(err_is_ok(err));
- cslot_t slots;
- err = cnode_create(&my_devframes_cnode, &devcnode, 255, &slots);
++ err = cnode_create_l2(&my_devframes_cnode, &devcnode);
+ if (err_is_fail(err)) { USER_PANIC_ERR(err, "cnode create"); }
+ struct capref devframe;
+ devframe.cnode = devcnode;
+ devframe.slot = 0;
+
++ if (bootinfo->regions_length > L2_CNODE_SLOTS) {
++ USER_PANIC("boot info has more regions (%d) than fit into L2 CNode (%d)",
++ bootinfo->regions_length, L2_CNODE_SLOTS);
++ }
++
+ for (int i = 0; i < bootinfo->regions_length; i++) {
+ struct mem_region *mrp = &bootinfo->regions[i];
+ if (mrp->mr_type == RegionType_Module) {
+ skb_add_fact("memory_region(16'%" PRIxGENPADDR ",%u,%zu,%u,%tu).",
+ mrp->mr_base, 0, mrp->mrmod_size, mrp->mr_type,
+ mrp->mrmod_data);
+ }
+ else {
+ skb_add_fact("memory_region(16'%" PRIxGENPADDR ",%u,%" PRIuGENSIZE ",%u,%tu).",
+ mrp->mr_base, 0, mrp->mr_bytes, mrp->mr_type,
+ mrp->mrmod_data);
+ }
+
+ if (mrp->mr_type == RegionType_PhyAddr ||
+ mrp->mr_type == RegionType_PlatformData) {
+ ACPI_DEBUG("Region %d: %"PRIxGENPADDR" - %"PRIxGENPADDR" %s\n",
+ i, mrp->mr_base, mrp->mr_base + mrp->mr_bytes,
+ mrp->mr_type == RegionType_PhyAddr ?
+ "physical address" : "platform data");
+
+ err = cap_retype(devframe, phys_cap, 0, ObjType_DevFrame, mrp->mr_bytes, 1);
+ if (err_no(err) == SYS_ERR_REVOKE_FIRST) {
+ printf("cannot retype region %d: need to revoke first; ignoring it\n", i);
+ } else {
++ if (err_is_fail(err)) {
++ DEBUG_ERR(err, "cap_retype while creating region caps");
++ }
+ assert(err_is_ok(err));
+
+ err = mm_add_multi(&pci_mm_physaddr, devframe, mrp->mr_bytes,
+ mrp->mr_base);
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "adding region %d FAILED\n", i);
+ }
+ }
+
+ phys_cap.slot++;
+ devframe.slot++;
+ }
+ }
+
+ return SYS_ERR_OK;
+}
+
+static errval_t load_irq_routing_new(void){
+ // Load irq file
+ errval_t err;
+ err = skb_execute("[irq_routing_new].");
+ if (err_is_fail(err)) {
+ ACPI_DEBUG("Could not load irq_routing_new.pl.\n"
+ "SKB returned: %s\nSKB error: %s\n",
+ skb_get_output(), skb_get_error_output());
+ return err;
+ } else if(strstr(skb_get_error_output(), "library not found") != NULL) {
+ debug_printf("Error processing irq_routing_new.pl.\n"
+ "SKB stdout: %s\nSKB stderr: %s\n",
+ skb_get_output(), skb_get_error_output());
+ return SKB_ERR_EXECUTION;
+ } else {
+ ACPI_DEBUG("Successfully loaded irq_routing_new.pl.\n"
+ "SKB returned: %s\nSKB error: %s\n",
+ skb_get_output(), skb_get_error_output());
+ return SYS_ERR_OK;
+ }
+}
+
+static errval_t setup_skb_irq_controllers(void){
+ errval_t err;
+ // Execute add x86 controllers
+ skb_execute("add_x86_controllers.");
+ err = skb_read_error_code();
+ if (err_is_fail(err)) {
+ debug_printf("Failure executing add_irq_controllers\n"
+ "SKB returned: %s\nSKB error: %s\n",
+ skb_get_output(), skb_get_error_output());
+ return err;
+ } else {
+ ACPI_DEBUG("Add x86 controllers successful.\n");
+ }
+ return SYS_ERR_OK;
+}
+
+static errval_t setup_skb_info(void)
+{
+ skb_execute("[pci_queries].");
+ errval_t err = skb_read_error_code();
+ if (err_is_fail(err)) {
+ ACPI_DEBUG("\npcimain.c: Could not load pci_queries.pl.\n"
+ "SKB returned: %s\nSKB error: %s\n",
+ skb_get_output(), skb_get_error_output());
+ return err;
+ }
+
+ skb_add_fact("mem_region_type(%d,ram).", RegionType_Empty);
+ skb_add_fact("mem_region_type(%d,roottask).", RegionType_RootTask);
+ skb_add_fact("mem_region_type(%d,phyaddr).", RegionType_PhyAddr);
+ skb_add_fact("mem_region_type(%d,multiboot_module).", RegionType_Module);
+ skb_add_fact("mem_region_type(%d,platform_data).", RegionType_PlatformData);
+ skb_add_fact("mem_region_type(%d,apic).", RegionType_LocalAPIC);
+ skb_add_fact("mem_region_type(%d,ioapic).", RegionType_IOAPIC);
+
+ return err;
+}
+
+int main(int argc, char *argv[])
+{
+ errval_t err;
+
+ // Parse CMD Arguments
+ bool got_apic_id = false;
+ bool do_video_init = false;
+ vtd_force_off = true;
+ for (int i = 1; i < argc; i++) {
+ if(sscanf(argv[i], "apicid=%" PRIuPTR, &my_hw_id) == 1) {
+ got_apic_id = true;
+ }
+
+ if (strcmp(argv[i], "video_init") == 0) {
+ do_video_init = true;
+ } else if (strncmp(argv[i], "vtd_force_off", strlen("vtd_force_off")) == 0) {
+ vtd_force_off = true;
+ }
+ }
+
+ if(got_apic_id == false) {
+ fprintf(stderr, "Usage: %s APIC_ID\n", argv[0]);
+ fprintf(stderr, "Wrong monitor version?\n");
+ return EXIT_FAILURE;
+ }
+
+ err = oct_init();
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "Initialize dist");
+ }
+
+ //connect to the SKB
+ ACPI_DEBUG("acpi: connecting to the SKB...\n");
+ err = skb_client_connect();
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "Connecting to SKB failed.");
+ }
+
+ skb_execute("[pci_queries].");
+
+
+ err = setup_skb_info();
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "Populating SKB failed.");
+ }
+
+ err = init_allocators();
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "Init memory allocator");
+ }
+
+ err = copy_bios_mem();
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "Copy BIOS Memory");
+ }
+
+ err = load_irq_routing_new();
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "load irq routing new.");
+ }
+
+ int r = init_acpi();
+ assert(r == 0);
+
+ buttons_init();
+
+ if (do_video_init) {
+ video_init();
+ }
+
+
+ err = setup_skb_irq_controllers();
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "setup skb irq controllers");
+ }
+
+ err = pcilnk_controller_client_init();
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "int controller client init");
+ }
+
+ err = ioapic_controller_client_init();
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "ioapic controller client init");
+ }
+
+ start_service();
+
+
+ messages_handler_loop();
+}
+
invoke_monitor_spawn_core(coreid_t core_id, enum cpu_type cpu_type,
forvaddr_t entry)
{
- uint8_t invoke_bits = get_cap_valid_bits(ipi_cap);
- capaddr_t invoke_cptr = get_cap_addr(ipi_cap) >> (CPTR_BITS - invoke_bits);
-
- return syscall6((invoke_bits << 16) | (KernelCmd_Spawn_core << 8)
- | SYSCALL_INVOKE, invoke_cptr, core_id, cpu_type,
- (uintptr_t)(entry >> 32), (uintptr_t) entry).error;
+ return cap_invoke4(ipi_cap, core_id, cpu_type,
+ (uintptr_t)(entry >> 32), (uintptr_t) entry).error;
}
+#endif
errval_t spawn_xcore_monitor(coreid_t coreid, int hwid,
enum cpu_type cpu_type,
struct terminal_state *ts = get_terminal_state();
term_client_blocking_config(&ts->client, TerminalConfig_CTRLC, false);
linenoiseHistorySetMaxLen(1024);
+
+ // Create inherit CNode to pass session cap to programs spawned from fish
+ errval_t err;
+ err = alloc_inheritcn_with_caps(&inheritcn_cap, NULL_CAP, cap_sessionid, NULL_CAP);
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "Error allocating inherit CNode with session cap.");
+ }
+
for (;;) {
-
char* input = NULL;
int cmd_argc;
char *cmd_argv[64]; // Support a max of 64 cmd args
--- /dev/null
+/**
+ * \file
+ * \brief Provides a generic startup function for the ARM OMAP platform
+ */
+/*
+ * Copyright (c) 2013, ETH Zurich.
+ * Copyright (c) 2015, Hewlett Packard Enterprise Development LP.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+
+#include <barrelfish/barrelfish.h>
+#include <barrelfish/spawn_client.h>
+#include <barrelfish_kpi/platform.h>
+#include <if/monitor_blocking_rpcclient_defs.h>
+
+#include <arch/arm/omap44xx/device_registers.h>
+#include <omap44xx_map.h>
+#include <vexpress_map.h>
+
+#include "kaluga.h"
+
+struct allowed_registers
+{
+ char* binary;
+ lpaddr_t registers[][2];
+};
+
+static struct allowed_registers usb = {
+ .binary = "hw.arm.omap44xx.usb",
+ .registers =
+ {
+ {OMAP44XX_MAP_L4_CFG_HSUSBHOST, OMAP44XX_MAP_L4_CFG_HSUSBHOST_SIZE},
+ {0x0, 0x0}
+ }
+};
+
+static struct allowed_registers fdif = {
+ .binary = "hw.arm.omap44xx.fdif",
+ .registers =
+ {
+ {OMAP44XX_CAM_CM2, 0x1000},
+ {OMAP44XX_DEVICE_PRM, 0x1000},
+ {OMAP44XX_CAM_PRM, 0x1000},
+ {OMAP44XX_MAP_L4_CFG_FACE_DETECT,OMAP44XX_MAP_L4_CFG_FACE_DETECT_SIZE},
+ {0x0, 0x0}
+ }
+};
+
+static struct allowed_registers mmchs = {
+ .binary = "hw.arm.omap44xx.mmchs",
+ .registers =
+ {
+ {OMAP44XX_CM2, 0x1000},
+ {OMAP44XX_CLKGEN_CM2, 0x1000},
+ {OMAP44XX_L4PER_CM2, 0x1000},
+ // i2c
+ {OMAP44XX_MAP_L4_PER_I2C1, OMAP44XX_MAP_L4_PER_I2C1_SIZE},
+ {OMAP44XX_MAP_L4_PER_I2C2, OMAP44XX_MAP_L4_PER_I2C2_SIZE},
+ {OMAP44XX_MAP_L4_PER_I2C3, OMAP44XX_MAP_L4_PER_I2C3_SIZE},
+ {OMAP44XX_MAP_L4_PER_I2C4, OMAP44XX_MAP_L4_PER_I2C4_SIZE},
+ // ctrlmodules
+ {OMAP44XX_MAP_L4_CFG_SYSCTRL_GENERAL_CORE, OMAP44XX_MAP_L4_CFG_SYSCTRL_GENERAL_CORE_SIZE},
+ {OMAP44XX_MAP_L4_WKUP_SYSCTRL_GENERAL_WKUP, OMAP44XX_MAP_L4_WKUP_SYSCTRL_GENERAL_WKUP_SIZE},
+ {OMAP44XX_MAP_L4_CFG_SYSCTRL_PADCONF_CORE, OMAP44XX_MAP_L4_CFG_SYSCTRL_PADCONF_CORE_SIZE},
+ {OMAP44XX_MAP_L4_WKUP_SYSCTRL_PADCONF_WKUP, OMAP44XX_MAP_L4_WKUP_SYSCTRL_PADCONF_WKUP_SIZE},
+ // MMCHS
+ {OMAP44XX_MAP_L4_PER_HSMMC1, OMAP44XX_MAP_L4_PER_HSMMC1_SIZE},
+ {OMAP44XX_MAP_L4_PER_HSMMC2, OMAP44XX_MAP_L4_PER_HSMMC2_SIZE},
+ {OMAP44XX_MAP_L4_PER_MMC_SD3, OMAP44XX_MAP_L4_PER_MMC_SD3_SIZE},
+ {OMAP44XX_MAP_L4_PER_MMC_SD4, OMAP44XX_MAP_L4_PER_MMC_SD4_SIZE},
+ {OMAP44XX_MAP_L4_PER_MMC_SD5, OMAP44XX_MAP_L4_PER_MMC_SD5_SIZE},
+ {0x0, 0x0}
+ }
+};
+
+static struct allowed_registers prcm = {
+ .binary = "hw.arm.omap44xx.prcm",
+ .registers =
+ {
+ {OMAP44XX_MAP_L4_WKUP_PRM, OMAP44XX_MAP_L4_WKUP_PRM_SIZE},
+ {OMAP44XX_DEVICE_PRM, 0x1000},
+ {OMAP44XX_MAP_L4_PER_I2C1, OMAP44XX_MAP_L4_PER_I2C1_SIZE},
+ {OMAP44XX_MAP_L4_PER_I2C2, OMAP44XX_MAP_L4_PER_I2C2_SIZE},
+ {OMAP44XX_MAP_L4_PER_I2C3, OMAP44XX_MAP_L4_PER_I2C3_SIZE},
+ {OMAP44XX_MAP_L4_PER_I2C4, OMAP44XX_MAP_L4_PER_I2C4_SIZE},
+ {0x0, 0x0}
+ }
+};
+
+static struct allowed_registers omap_uart = {
+ .binary = "hw.arm.omap44xx.uart",
+ .registers =
+ {
+ {OMAP44XX_MAP_L4_PER_UART1,OMAP44XX_MAP_L4_PER_UART1_SIZE},
+ {OMAP44XX_MAP_L4_PER_UART2,OMAP44XX_MAP_L4_PER_UART2_SIZE},
+ {OMAP44XX_MAP_L4_PER_UART3,OMAP44XX_MAP_L4_PER_UART3_SIZE},
+ {OMAP44XX_MAP_L4_PER_UART4,OMAP44XX_MAP_L4_PER_UART4_SIZE},
+ {0x0, 0x0}
+ }
+};
+
+static struct allowed_registers sdma = {
+ .binary = "hw.arm.omap44xx.sdma",
+ .registers =
+ {
+ {OMAP44XX_MAP_L4_CFG_SDMA, OMAP44XX_MAP_L4_CFG_SDMA_SIZE},
+ {0x0, 0x0}
+ }
+};
+
+static struct allowed_registers* omap44xx[] = {
+ &usb,
+ &fdif,
+ &mmchs,
+ &prcm,
+ &omap_uart,
+ &sdma,
+ NULL,
+};
+
+static struct allowed_registers vexpress_uart = {
+ .binary = "hw.arm.vexpress.uart",
+ .registers =
+ {
+ {VEXPRESS_MAP_UART0, VEXPRESS_MAP_UART0_SIZE},
+ {VEXPRESS_MAP_UART1, VEXPRESS_MAP_UART1_SIZE},
+ {VEXPRESS_MAP_UART2, VEXPRESS_MAP_UART2_SIZE},
+ {VEXPRESS_MAP_UART3, VEXPRESS_MAP_UART3_SIZE},
+ {0x0, 0x0}
+ }
+};
+
+static struct allowed_registers* vexpress[] = {
+ &vexpress_uart,
+ NULL,
+};
+
+/**
+ * \brief Startup function for ARMv7 drivers.
+ *
+ * Makes sure we get the device register capabilities.
+ */
+errval_t
+default_start_function(coreid_t where, struct module_info* driver,
+ char* record, struct driver_argument* int_arg)
+{
+ assert(driver != NULL);
+ assert(record != NULL);
+
+ errval_t err;
+
+ struct monitor_blocking_rpc_client *m=
+ get_monitor_blocking_rpc_client();
+ assert(m != NULL);
+
+ uint32_t arch, platform;
+ err = m->vtbl.get_platform(m, &arch, &platform);
+ assert(err_is_ok(err));
+ assert(arch == PI_ARCH_ARMV7A);
+
+ struct allowed_registers **regs= NULL;
+ switch(platform) {
+ case PI_PLATFORM_OMAP44XX:
+ regs= omap44xx;
+ break;
+ case PI_PLATFORM_VEXPRESS:
+ regs= vexpress;
+ break;
+ default:
+ printf("Unrecognised ARMv7 platform\n");
+ abort();
+ }
+
+ // TODO Request the right set of caps and put in device_range_cap
+ struct cnoderef dev_cnode;
+ struct capref dev_cnode_cap;
- cslot_t retslots;
- err = cnode_create(&dev_cnode_cap, &dev_cnode, 255, &retslots);
++ err = cnode_create_l2(&dev_cnode_cap, &dev_cnode);
+ assert(err_is_ok(err));
+
+ struct capref device_cap;
+ device_cap.cnode = dev_cnode;
+ device_cap.slot = 0;
+
+ char* name;
+ err = oct_read(record, "%s", &name);
+ assert(err_is_ok(err));
+ KALUGA_DEBUG("%s:%d: Starting driver for %s\n", __FUNCTION__, __LINE__, name);
+ for (size_t i=0; regs[i] != NULL; i++) {
+
+ if(strcmp(name, regs[i]->binary) != 0) {
+ continue;
+ }
+
+ // Get the device cap from the managed capability tree
+ // put them all in a single cnode
+ for (size_t j=0; regs[i]->registers[j][0] != 0x0; j++) {
+ struct capref device_frame;
+ KALUGA_DEBUG("%s:%d: mapping 0x%"PRIxLPADDR" %"PRIuLPADDR"\n", __FUNCTION__, __LINE__,
+ regs[i]->registers[j][0], regs[i]->registers[j][1]);
+
+ lpaddr_t base = regs[i]->registers[j][0] & ~(BASE_PAGE_SIZE-1);
+ err = get_device_cap(base,
+ regs[i]->registers[j][1],
+ &device_frame);
+ assert(err_is_ok(err));
+
+ KALUGA_DEBUG("get_device_cap worked\n");
+
+ err = cap_copy(device_cap, device_frame);
+ assert(err_is_ok(err));
+ device_cap.slot++;
+ }
+ }
+ free(name);
+
+ err = spawn_program_with_caps(0, driver->path, driver->argv, environ,
+ NULL_CAP, dev_cnode_cap, 0, driver->did);
+ if (err_is_fail(err)) {
+ DEBUG_ERR(err, "Spawning %s failed.", driver->path);
+ return err;
+ }
+
+ return SYS_ERR_OK;
+}
}
struct frame_identity ret;
-- err = invoke_frame_identify(requested_cap, &ret);
++ err = frame_identify(requested_cap, &ret);
+ size_t capbits= log2ceil(ret.bytes);
assert (err_is_ok(err));
- assert((1ULL << log2ceil(ret.bytes)) == ret.bytes);
+ assert((1ULL << capbits) == ret.bytes);
- err = mm_init(®ister_manager, ObjType_DevFrame, ret.base, log2ceil(ret.bytes),
- 1, slab_default_refill, slot_alloc_dynamic, slot_refill_dynamic,
+ err = mm_init(®ister_manager, ObjType_DevFrame, ret.base, capbits, 1,
- slab_default_refill, slot_alloc_dynamic,
++ slab_default_refill, slot_alloc_dynamic, slot_refill_dynamic,
&devframes_allocator, false);
if (err_is_fail(err)) {
return err_push(err, MM_ERR_MM_INIT);