cap VNode_x86_64_ptable_Mapping from VNode_x86_64_ptable inherit Mapping;
+/* Extended page table types for x86 nested paging */
+cap VNode_x86_64_ept_pml4 from RAM inherit VNode;
+cap VNode_x86_64_ept_pml4_Mapping from VNode_x86_64_ept_pml4 inherit Mapping;
+cap VNode_x86_64_ept_pdpt from RAM inherit VNode;
+cap VNode_x86_64_ept_pdpt_Mapping from VNode_x86_64_ept_pdpt inherit Mapping;
+cap VNode_x86_64_ept_pdir from RAM inherit VNode;
+cap VNode_x86_64_ept_pdir_Mapping from VNode_x86_64_ept_pdir inherit Mapping;
+cap VNode_x86_64_ept_ptable from RAM inherit VNode;
+cap VNode_x86_64_ept_ptable_Mapping from VNode_x86_64_ept_ptable inherit Mapping;
+ /* Intel VT-d */
+
+ cap VNode_VTd_root_table from RAM inherit VNode;
+
+ cap VNode_VTd_root_table_Mapping from VNode_VTd_root_table inherit Mapping;
+
+ cap VNode_VTd_ctxt_table from RAM inherit VNode;
+
+ cap VNode_VTd_ctxt_table_Mapping from VNode_VTd_ctxt_table inherit Mapping;
+
+ /* cap IntelVTd from PhysAddr from_self inherit Memory; */
+
+
/* x86_32-specific capabilities: */
/* PDPT */
eq coreid coreid; /* Core where the domain was created. */
eq uint32 core_local_id; /* Core-local ID of the domain. */
};
+
-
+ /*
+ cap ProtectionDomainManager is_always_copy {
+ // Capability to act as device manager, i.e. create new device id caps
+ };
+
+ cap ProtectionDomain from IOMMUDomainManager {
+ uint16 id;
+ uint16 type
+ };
+ */
+
+ cap DeviceIDManager is_always_copy {
+ // Capability to act as device manager, i.e. create new device id caps
+ };
+
+ cap DeviceID from DeviceIDManager {
+ uint16 segment;
+ uint8 bus;
+ uint8 device;
+ uint8 function;
+ uint8 type;
+ uint16 flags;
+ };
#include <barrelfish/caddr.h>
#include <barrelfish/invocations_arch.h>
+ #include <barrelfish/idc.h>
+static inline errval_t invoke_ram_noop(struct capref ram)
+{
+ return cap_invoke1(ram, RAMCmd_Noop).error;
+}
+
/**
* \brief Create a capability.
*
return sysret.error;
}
+ static inline errval_t invoke_device_identify(struct capref deviceid,
+ struct device_identity *ret)
+ {
+ assert(ret != NULL);
+ assert(get_croot_addr(deviceid) == CPTR_ROOTCN);
+
+ struct sysret sysret = cap_invoke2(deviceid, DeviceID_Identify, (uintptr_t)ret);
+
+ if (err_is_ok(sysret.error)) {
+ return sysret.error;
+ }
+
+ ret->bus = 0;
+ ret->device = 0;
+ ret->function = 0;
+ ret->flags = 0;
+ ret->type = DEVICE_ID_TYPE_UNKNOWN;
+ ret->segment = 0;
+ return sysret.error;
+ }
+
+
+ static inline errval_t invoke_endpoint_identify(struct capref ep,
+ struct endpoint_identity *ret)
+ {
+ assert(ret != NULL);
+ assert(get_croot_addr(ep) == CPTR_ROOTCN);
+
+ struct sysret sysret = cap_invoke2(ep, EndPointCMD_Identify, (uintptr_t)ret);
+
+ if (err_is_ok(sysret.error)) {
+ switch(ret->eptype) {
+ case ObjType_EndPointLMP :
+ ret->eptype = IDC_ENDPOINT_LMP;
+ break;
+ case ObjType_EndPointUMP :
+ ret->eptype = IDC_ENDPOINT_UMP;
+ break;
+ default:
+ return SYS_ERR_INVALID_SOURCE_TYPE;
+ }
+ return sysret.error;
+ }
+
+ ret->iftype = 0;
+ ret->base = 0;
+ ret->length = 0;
+ ret->eptype = 0;
+
+ return sysret.error;
+ }
+
+ static inline errval_t invoke_endpoint_set_iftype(struct capref ep,
+ uint16_t iftype)
+ {
+ assert(get_croot_addr(ep) == CPTR_ROOTCN);
+
+ return cap_invoke2(ep, EndPointCMD_SetIftype, (uintptr_t) iftype).error;
+ }
+
+
/**
+ * \brief Cleans all dirty bits in a page table.
+ */
+static inline errval_t invoke_clean_dirty_bits(struct capref vnode, size_t* how_many)
+{
+
+ struct sysret ret = cap_invoke1(vnode, VNodeCmd_CleanDirtyBits);
+ if (how_many != NULL) {
+ *how_many = ret.value;
+ }
+ return ret.error;
+}
+
+/**
* \brief Modify mapping flags on parts of a mapping
*
* \param mapping CSpace address of mapping capability
static inline bool type_is_vnode(enum objtype type)
{
- STATIC_ASSERT(58 == ObjType_Num, "Check VNode definitions");
- STATIC_ASSERT(60 == ObjType_Num, "Check VNode definitions");
++ STATIC_ASSERT(68 == ObjType_Num, "Check VNode definitions");
- return (type == ObjType_VNode_x86_64_pml4 ||
+ return (type == ObjType_VNode_VTd_root_table ||
+ type == ObjType_VNode_VTd_ctxt_table ||
+ type == ObjType_VNode_x86_64_pml5 ||
+ type == ObjType_VNode_x86_64_pml4 ||
type == ObjType_VNode_x86_64_pdpt ||
type == ObjType_VNode_x86_64_pdir ||
type == ObjType_VNode_x86_64_ptable ||
static inline bool type_is_vroot(enum objtype type)
{
- STATIC_ASSERT(58 == ObjType_Num, "Check VNode definitions");
- STATIC_ASSERT(60 == ObjType_Num, "Check VNode definitions");
++ STATIC_ASSERT(68 == ObjType_Num, "Check VNode definitions");
return (type == ObjType_VNode_x86_64_pml4 ||
+ type == ObjType_VNode_x86_64_ept_pml4 ||
#ifdef CONFIG_PAE
type == ObjType_VNode_x86_32_pdpt ||
#else
*
* @return Number of bits a VNode object occupies.
*/
- static inline size_t vnode_objbits(enum objtype type)
+ static inline uint8_t vnode_objbits(enum objtype type)
{
// This function should be emitted by hamlet or somesuch.
- STATIC_ASSERT(58 == ObjType_Num, "Check VNode definitions");
- STATIC_ASSERT(60 == ObjType_Num, "Check VNode definitions");
++ STATIC_ASSERT(68 == ObjType_Num, "Check VNode definitions");
- if (type == ObjType_VNode_x86_64_pml4 ||
+ if (type == ObjType_VNode_VTd_root_table ||
+ type == ObjType_VNode_VTd_ctxt_table ||
+ type == ObjType_VNode_x86_64_pml5 ||
+ type == ObjType_VNode_x86_64_pml4 ||
type == ObjType_VNode_x86_64_pdpt ||
type == ObjType_VNode_x86_64_pdir ||
type == ObjType_VNode_x86_64_ptable ||
return 0;
}
+static inline bool type_is_ept(enum objtype type)
+{
- STATIC_ASSERT(58 == ObjType_Num, "Check VNode definitions");
++ STATIC_ASSERT(68 == ObjType_Num, "Check VNode definitions");
+
+ return (type == ObjType_VNode_x86_64_ept_pml4 ||
+ type == ObjType_VNode_x86_64_ept_pdpt ||
+ type == ObjType_VNode_x86_64_ept_pdir ||
+ type == ObjType_VNode_x86_64_ept_ptable);
+}
+
/**
* Return size of vnode in bytes. This is the size of a page table page.
*
static inline size_t vnode_objsize(enum objtype type)
{
// This function should be emitted by hamlet or somesuch.
- STATIC_ASSERT(58 == ObjType_Num, "Check VNode definitions");
- STATIC_ASSERT(60 == ObjType_Num, "Check VNode definitions");
++ STATIC_ASSERT(68 == ObjType_Num, "Check VNode definitions");
- if (type == ObjType_VNode_x86_64_pml4 ||
+ if (type == ObjType_VNode_VTd_root_table ||
+ type == ObjType_VNode_VTd_ctxt_table ||
+ type == ObjType_VNode_x86_64_pml5 ||
+ type == ObjType_VNode_x86_64_pml4 ||
type == ObjType_VNode_x86_64_pdpt ||
type == ObjType_VNode_x86_64_pdir ||
type == ObjType_VNode_x86_64_ptable ||
*/
static inline size_t vnode_entry_bits(enum objtype type) {
// This function should be emitted by hamlet or somesuch.
- STATIC_ASSERT(58 == ObjType_Num, "Check VNode definitions");
- STATIC_ASSERT(60 == ObjType_Num, "Check VNode definitions");
++ STATIC_ASSERT(68 == ObjType_Num, "Check VNode definitions");
- if (type == ObjType_VNode_x86_64_pml4 ||
+ if (type == ObjType_VNode_VTd_root_table ||
+ type == ObjType_VNode_VTd_ctxt_table ||
+ type == ObjType_VNode_x86_64_pml5 ||
+ type == ObjType_VNode_x86_64_pml4 ||
type == ObjType_VNode_x86_64_pdpt ||
type == ObjType_VNode_x86_64_pdir ||
- type == ObjType_VNode_x86_64_ptable)
+ type == ObjType_VNode_x86_64_ptable ||
+ type == ObjType_VNode_x86_64_ept_pml4 ||
+ type == ObjType_VNode_x86_64_ept_pdpt ||
+ type == ObjType_VNode_x86_64_ept_pdir ||
+ type == ObjType_VNode_x86_64_ept_ptable)
{
return 9; // log2(X86_64_PTABLE_SIZE)
}
* @return Number of page table entries in bits
*/
static inline size_t cnode_get_slots(struct capability *cnode) {
- STATIC_ASSERT(58 == ObjType_Num, "Check CNode definitions");
- STATIC_ASSERT(60 == ObjType_Num, "Check CNode definitions");
++ STATIC_ASSERT(68 == ObjType_Num, "Check CNode definitions");
switch (cnode->type) {
case ObjType_L1CNode:
static inline enum objtype get_mapping_type(enum objtype captype)
{
- STATIC_ASSERT(58 == ObjType_Num, "Knowledge of all mapping types");
- STATIC_ASSERT(60 == ObjType_Num, "Knowledge of all mapping types");
++ STATIC_ASSERT(68 == ObjType_Num, "Knowledge of all mapping types");
switch (captype) {
case ObjType_Frame:
static inline bool type_is_mapping(enum objtype type)
{
- STATIC_ASSERT(58 == ObjType_Num, "Knowledge of all mapping types");
- STATIC_ASSERT(60 == ObjType_Num, "Knowledge of all mapping types");
++ STATIC_ASSERT(68 == ObjType_Num, "Knowledge of all mapping types");
switch (type) {
case ObjType_Frame_Mapping:
static inline bool type_is_mappable(enum objtype type)
{
- STATIC_ASSERT(58 == ObjType_Num, "Knowledge of all mappable types");
- STATIC_ASSERT(60 == ObjType_Num, "Knowledge of all mappable types");
++ STATIC_ASSERT(68 == ObjType_Num, "Knowledge of all mappable types");
switch (type) {
case ObjType_Frame:
* Predicates related to sharing capabilities
*/
- STATIC_ASSERT(58 == ObjType_Num, "Knowledge of all cap types");
-STATIC_ASSERT(60 == ObjType_Num, "Knowledge of all cap types");
++STATIC_ASSERT(68 == ObjType_Num, "Knowledge of all cap types");
static inline bool
distcap_needs_locality(enum objtype type)
{
}
}
- STATIC_ASSERT(58 == ObjType_Num, "Knowledge of all cap types");
-STATIC_ASSERT(60 == ObjType_Num, "Knowledge of all cap types");
++STATIC_ASSERT(68 == ObjType_Num, "Knowledge of all cap types");
static inline bool
distcap_is_moveable(enum objtype type)
{
/* Architecture-specific syscalls - X86
* FIXME: shouldn't these be in an arch-specific header? -AB */
- #define SYSCALL_X86_FPU_TRAP_ON 8 ///< Turn FPU trap on (x86)
-#define SYSCALL_X86_RELOAD_LDT 8 ///< Reload the LDT register (x86_64)
-#define SYSCALL_SUSPEND 9 ///< Suspend the CPU
+#define SYSCALL_X86_RELOAD_LDT 9 ///< Reload the LDT register (x86_64)
+#define SYSCALL_SUSPEND 10 ///< Suspend the CPU
- #define SYSCALL_GET_ABS_TIME 11 ///< Get time elapsed since boot
- #define SYSCALL_MAP_NPT 12 ///< Unused? -SG, 2017-11-28.
/* Architecture-specific syscalls - ARMv7 */
#define SYSCALL_ARMv7_CACHE_CLEAN 8 ///< Clean (write back) by VA
if (src->type != ObjType_Frame &&
src->type != ObjType_DevFrame &&
+ //(!type_is_ept(dest->type) &&
+ src->type != ObjType_RAM &&
- !type_is_vnode(src->type)) { // Right mapping
++ !type_is_vnode(src->type) &&
+ src->type != ObjType_EndPointUMP) { // Right mapping
debug(SUBSYS_PAGING, "src type invalid\n");
return SYS_ERR_WRONG_MAPPING;
}
/// Dispatcher table for the type of mapping to create
static mapping_handler_t handler[ObjType_Num] = {
- [ObjType_VNode_x86_64_pml4] = x86_64_non_ptable,
- [ObjType_VNode_x86_64_pdpt] = x86_64_non_ptable,
- [ObjType_VNode_x86_64_pdir] = x86_64_non_ptable,
- [ObjType_VNode_x86_64_ptable] = x86_64_ptable,
+ [ObjType_VNode_VTd_root_table] = x86_64_vtd_table,
+ [ObjType_VNode_VTd_ctxt_table] = x86_64_vtd_table,
+ [ObjType_VNode_x86_64_pml5] = x86_64_non_ptable,
+ [ObjType_VNode_x86_64_pml4] = x86_64_non_ptable,
+ [ObjType_VNode_x86_64_pdpt] = x86_64_non_ptable,
+ [ObjType_VNode_x86_64_pdir] = x86_64_non_ptable,
+ [ObjType_VNode_x86_64_ptable] = x86_64_ptable,
+ [ObjType_VNode_x86_64_ept_pml4] = x86_64_non_ptable,
+ [ObjType_VNode_x86_64_ept_pdpt] = x86_64_non_ptable,
+ [ObjType_VNode_x86_64_ept_pdir] = x86_64_non_ptable,
+ [ObjType_VNode_x86_64_ept_ptable] = x86_64_ptable,
};
[CNodeCmd_Revoke] = handle_revoke,
[CNodeCmd_GetState] = handle_get_state,
[CNodeCmd_Resize] = handle_resize,
+ [CNodeCmd_CapIdentify] = handle_cap_identify,
},
+ [ObjType_VNode_VTd_root_table] = {
+ [VNodeCmd_Identify] = handle_vnode_identify,
+ [VNodeCmd_Map] = handle_map,
+ [VNodeCmd_Unmap] = handle_unmap,
+ [VNodeCmd_ModifyFlags] = handle_vnode_modify_flags,
+ },
+ [ObjType_VNode_VTd_ctxt_table] = {
+ [VNodeCmd_Identify] = handle_vnode_identify,
+ [VNodeCmd_Map] = handle_map,
+ [VNodeCmd_Unmap] = handle_unmap,
+ [VNodeCmd_ModifyFlags] = handle_vnode_modify_flags,
+ },
+ [ObjType_VNode_x86_64_pml5] = {
+ [VNodeCmd_Identify] = handle_vnode_identify,
+ [VNodeCmd_Map] = handle_map,
+ [VNodeCmd_Unmap] = handle_unmap,
+ [VNodeCmd_ModifyFlags] = handle_vnode_modify_flags,
+ },
[ObjType_VNode_x86_64_pml4] = {
[VNodeCmd_Identify] = handle_vnode_identify,
[VNodeCmd_Map] = handle_map,
}
// run the monitor
- dispatch(dcb->guest_desc.monitor_ep.cap.u.endpoint.listener);
+ dispatch(dcb->guest_desc.monitor_ep.cap.u.endpointlmp.listener);
}
-struct sysret sys_syscall(uint64_t syscall, uint64_t arg0, uint64_t arg1,
- uint64_t *args, uint64_t rflags, uint64_t rip);
+__attribute__((unused))
+static void dump_page_tables(lpaddr_t root_pt_phys)
+{
+ lvaddr_t root_pt = local_phys_to_mem(root_pt_phys);
+ printk(LOG_NOTE, "dumping page tables rooted at 0x%"PRIxLPADDR"\n", root_pt_phys);
+
+ // loop over pdpts
+ union x86_64_ptable_entry *pt;
+ size_t kernel_pml4e = X86_64_PML4_BASE(X86_64_MEMORY_OFFSET);
+ for (int pdpt_index = 0; pdpt_index < kernel_pml4e; pdpt_index++) {
+ union x86_64_pdir_entry *pdpt = (union x86_64_pdir_entry *)root_pt + pdpt_index;
+ if (!pdpt->raw) { continue; }
+ else {
+ genpaddr_t paddr = (genpaddr_t)pdpt->d.base_addr << BASE_PAGE_BITS;
+ printf("%d: 0x%"PRIxGENPADDR" (%d %d), raw=0x%"PRIx64"\n",
+ pdpt_index, paddr,
+ pdpt->d.read_write, pdpt->d.user_supervisor,
+ pdpt->raw);
+ }
+ genpaddr_t pdpt_gp = pdpt->d.base_addr << BASE_PAGE_BITS;
+ lvaddr_t pdpt_lv = local_phys_to_mem(gen_phys_to_local_phys(pdpt_gp));
+
+ for (int pdir_index = 0; pdir_index < X86_64_PTABLE_SIZE; pdir_index++) {
+ // get pdir
+ union x86_64_pdir_entry *pdir = (union x86_64_pdir_entry *)pdpt_lv + pdir_index;
+ pt = (union x86_64_ptable_entry*)pdir;
+ if (!pdir->raw) { continue; }
+ // check if pdir or huge page
+ if (pt->huge.always1) {
+ // is huge page mapping
+ genpaddr_t paddr = (genpaddr_t)pt->huge.base_addr << HUGE_PAGE_BITS;
+ printf("%d.%d: 0x%"PRIxGENPADDR" (%d %d %d)\n", pdpt_index,
+ pdir_index, paddr, pt->huge.read_write,
+ pt->huge.dirty, pt->huge.accessed);
+ // goto next pdpt entry
+ continue;
+ } else {
+ genpaddr_t paddr = (genpaddr_t)pdir->d.base_addr << BASE_PAGE_BITS;
+ printf("%d.%d: 0x%"PRIxGENPADDR" (%d %d), raw=0x%"PRIx64"\n",
+ pdpt_index, pdir_index, paddr,
+ pdir->d.read_write, pdir->d.user_supervisor,
+ pdir->raw);
+ }
+ genpaddr_t pdir_gp = pdir->d.base_addr << BASE_PAGE_BITS;
+ lvaddr_t pdir_lv = local_phys_to_mem(gen_phys_to_local_phys(pdir_gp));
+
+ for (int ptable_index = 0; ptable_index < X86_64_PTABLE_SIZE; ptable_index++) {
+ // get ptable
+ union x86_64_pdir_entry *ptable = (union x86_64_pdir_entry *)pdir_lv + ptable_index;
+ pt = (union x86_64_ptable_entry *)ptable;
+ if (!ptable->raw) { continue; }
+ // check if ptable or large page
+ if (pt->large.always1) {
+ // is large page mapping
+ genpaddr_t paddr = (genpaddr_t)pt->large.base_addr << LARGE_PAGE_BITS;
+ printf("%d.%d.%d: 0x%"PRIxGENPADDR" (%d %d %d)\n",
+ pdpt_index, pdir_index, ptable_index, paddr,
+ pt->large.read_write, pt->large.dirty, pt->large.accessed);
+ // goto next pdir entry
+ continue;
+ } else {
+ genpaddr_t paddr = (genpaddr_t)ptable->d.base_addr << BASE_PAGE_BITS;
+ printf("%d.%d.%d: 0x%"PRIxGENPADDR" (%d %d), raw=0x%"PRIx64"\n",
+ pdpt_index, pdir_index, ptable_index, paddr,
+ ptable->d.read_write, ptable->d.user_supervisor,
+ ptable->raw);
+ }
+ genpaddr_t ptable_gp = ptable->d.base_addr << BASE_PAGE_BITS;
+ lvaddr_t ptable_lv = local_phys_to_mem(gen_phys_to_local_phys(ptable_gp));
+
+ for (int entry = 0; entry < X86_64_PTABLE_SIZE; entry++) {
+ union x86_64_ptable_entry *e =
+ (union x86_64_ptable_entry *)ptable_lv + entry;
+ genpaddr_t paddr = (genpaddr_t)e->base.base_addr << BASE_PAGE_BITS;
+ if (!paddr) {
+ continue;
+ }
+ printf("%d.%d.%d.%d: 0x%"PRIxGENPADDR" (%d %d %d), raw=0x%"PRIx64"\n",
+ pdpt_index, pdir_index, ptable_index, entry,
+ paddr, e->base.read_write, e->base.dirty, e->base.accessed,
+ e->raw);
+ }
+ }
+ }
+ }
+}
+
+struct sysret sys_vmcall(uint64_t syscall, uint64_t arg0, uint64_t arg1,
+ uint64_t *args, uint64_t rflags, uint64_t rip,
+ struct capability *root);
extern uint64_t user_stack_save;
/**
* \brief Cleanup the last cap copy for an object and the object itself
*/
- STATIC_ASSERT(58 == ObjType_Num, "Knowledge of all RAM-backed cap types");
-STATIC_ASSERT(60 == ObjType_Num, "Knowledge of all RAM-backed cap types");
++STATIC_ASSERT(68 == ObjType_Num, "Knowledge of all RAM-backed cap types");
static errval_t
cleanup_last(struct cte *cte, struct cte *ret_ram_cap)
{
if(ram.bytes > 0) {
// Send back as RAM cap to monitor
if (ret_ram_cap) {
+ TRACE(KERNEL_CAPOPS, CREATE_RAM, seqnum);
- if (dcb_current != monitor_ep.u.endpoint.listener) {
+ if (dcb_current != monitor_ep.u.endpointlmp.listener) {
printk(LOG_WARN, "sending fresh ram cap to non-monitor?\n");
}
assert(ret_ram_cap->cap.type == ObjType_Null);
struct capability monitor_ep;
- STATIC_ASSERT(58 == ObjType_Num, "Knowledge of all cap types");
-STATIC_ASSERT(60 == ObjType_Num, "Knowledge of all cap types");
++STATIC_ASSERT(68 == ObjType_Num, "Knowledge of all cap types");
int sprint_cap(char *buf, size_t len, struct capability *cap)
{
char *mappingtype;
case ObjType_VNode_x86_64_pml4:
return snprintf(buf, len, "x86_64 PML4 at 0x%" PRIxGENPADDR,
cap->u.vnode_x86_64_pml4.base);
+ case ObjType_VNode_x86_64_pml5:
+ return snprintf(buf, len, "x86_64 PML5 at 0x%" PRIxGENPADDR,
+ cap->u.vnode_x86_64_pml4.base);
+ case ObjType_VNode_VTd_root_table:
+ return snprintf(buf, len, "VTd root table at 0x%" PRIxGENPADDR,
+ cap->u.vnode_x86_64_pml4.base);
+ case ObjType_VNode_VTd_ctxt_table:
+ return snprintf(buf, len, "VTd ctxt table at 0x%" PRIxGENPADDR,
+ cap->u.vnode_x86_64_pml4.base);
+ case ObjType_VNode_x86_64_ept_ptable:
+ return snprintf(buf, len, "x86_64 EPT Page table at 0x%" PRIxGENPADDR,
+ cap->u.vnode_x86_64_ept_ptable.base);
+
+ case ObjType_VNode_x86_64_ept_pdir:
+ return snprintf(buf, len, "x86_64 EPT Page directory at 0x%" PRIxGENPADDR,
+ cap->u.vnode_x86_64_ept_pdir.base);
+
+ case ObjType_VNode_x86_64_ept_pdpt:
+ return snprintf(buf, len, "x86_64 EPT PDPT at 0x%" PRIxGENPADDR,
+ cap->u.vnode_x86_64_ept_pdpt.base);
+
+ case ObjType_VNode_x86_64_ept_pml4:
+ return snprintf(buf, len, "x86_64 EPT PML4 at 0x%" PRIxGENPADDR,
+ cap->u.vnode_x86_64_ept_pml4.base);
+
case ObjType_Frame_Mapping:
mappingtype = "Frame";
goto ObjType_Mapping;
// If you create more capability types you need to deal with them
// in the table below.
- STATIC_ASSERT(58 == ObjType_Num, "Knowledge of all cap types");
-STATIC_ASSERT(60 == ObjType_Num, "Knowledge of all cap types");
++STATIC_ASSERT(68 == ObjType_Num, "Knowledge of all cap types");
static size_t caps_max_numobjs(enum objtype type, gensize_t srcsize, gensize_t objsize)
{
switch(type) {
*
* For the meaning of the parameters, see the 'caps_create' function.
*/
- STATIC_ASSERT(58 == ObjType_Num, "Knowledge of all cap types");
-STATIC_ASSERT(60 == ObjType_Num, "Knowledge of all cap types");
++STATIC_ASSERT(68 == ObjType_Num, "Knowledge of all cap types");
static errval_t caps_zero_objects(enum objtype type, lpaddr_t lpaddr,
gensize_t objsize, size_t count)
case ObjType_VNode_x86_64_pdir:
case ObjType_VNode_x86_64_pdpt:
case ObjType_VNode_x86_64_pml4:
+ case ObjType_VNode_x86_64_ept_ptable:
+ case ObjType_VNode_x86_64_ept_pdir:
+ case ObjType_VNode_x86_64_ept_pdpt:
+ case ObjType_VNode_x86_64_ept_pml4:
+ case ObjType_VNode_x86_64_pml5:
+ case ObjType_VNode_VTd_root_table:
+ case ObjType_VNode_VTd_ctxt_table:
// objsize is size of VNode; but not given as such
objsize = vnode_objsize(type);
debug(SUBSYS_CAPS, "VNode: zeroing %zu bytes @%#"PRIxLPADDR"\n",
*/
// If you create more capability types you need to deal with them
// in the table below.
- STATIC_ASSERT(58 == ObjType_Num, "Knowledge of all cap types");
-STATIC_ASSERT(60 == ObjType_Num, "Knowledge of all cap types");
++STATIC_ASSERT(68 == ObjType_Num, "Knowledge of all cap types");
static errval_t caps_create(enum objtype type, lpaddr_t lpaddr, gensize_t size,
gensize_t objsize, size_t count, coreid_t owner,
//{{{1 Capability creation
/// check arguments, return true iff ok
- STATIC_ASSERT(58 == ObjType_Num, "Knowledge of all cap types");
-STATIC_ASSERT(60 == ObjType_Num, "Knowledge of all cap types");
++STATIC_ASSERT(68 == ObjType_Num, "Knowledge of all cap types");
#ifndef NDEBUG
static bool check_caps_create_arguments(enum objtype type,
size_t bytes, size_t objsize,
return SYS_ERR_OK;
}
- STATIC_ASSERT(58 == ObjType_Num, "Knowledge of all cap types");
-STATIC_ASSERT(60 == ObjType_Num, "Knowledge of all cap types");
++STATIC_ASSERT(68 == ObjType_Num, "Knowledge of all cap types");
/// Retype caps
/// Create `count` new caps of `type` from `offset` in src, and put them in
/// `dest_cnode` starting at `dest_slot`.
}
if (err_is_fail(err)) {
debug(SUBSYS_CAPS, "caps_retype: failed to create a dest cap\n");
+ TRACE(KERNEL_CAPOPS, RETYPE_DONE, retype_seqnum);
return err_push(err, SYS_ERR_RETYPE_CREATE);
}
+ TRACE(KERNEL_CAPOPS, RETYPE_CREATE_CAPS_DONE, retype_seqnum);
/* special initialisation for endpoint caps */
- if (type == ObjType_EndPoint) {
+ if (type == ObjType_EndPointLMP) {
assert(src_cap->type == ObjType_Dispatcher);
assert(count == 1);
struct capability *dest_cap = &dest_cte->cap;
}
/// Create copies to a cte
- STATIC_ASSERT(58 == ObjType_Num, "Knowledge of all cap types");
-STATIC_ASSERT(60 == ObjType_Num, "Knowledge of all cap types");
++STATIC_ASSERT(68 == ObjType_Num, "Knowledge of all cap types");
errval_t caps_copy_to_cte(struct cte *dest_cte, struct cte *src_cte, bool mint,
uintptr_t param1, uintptr_t param2)
{
* Cap tracing
*/
#ifdef TRACE_PMEM_CAPS
- STATIC_ASSERT(58 == ObjType_Num, "knowledge of all cap types");
-STATIC_ASSERT(50 == ObjType_Num, "knowledge of all cap types");
++// XXX: this is not gonna work anymore! -SG, 2018-10-22.
++STATIC_ASSERT(68 == ObjType_Num, "knowledge of all cap types");
STATIC_ASSERT(64 >= ObjType_Num, "cap types fit in uint64_t bitfield");
#define MAPPING_TYPES \
((1ull<<ObjType_VNode_x86_64_pml4_Mapping) | \
/**
* \brief Function to do the actual printing based on the type of capability
*/
- STATIC_ASSERT(58 == ObjType_Num, "Knowledge of all cap types");
-STATIC_ASSERT(60 == ObjType_Num, "Knowledge of all cap types");
++STATIC_ASSERT(68 == ObjType_Num, "Knowledge of all cap types");
int debug_print_cap(char *buf, size_t len, struct capability *cap)
{
char *mappingtype;
return snprintf(buf, len, "x86_64 PML4 at 0x%" PRIxGENPADDR,
cap->u.vnode_x86_64_pml4.base);
+ case ObjType_VNode_x86_64_ept_ptable:
+ return snprintf(buf, len, "x86_64 EPT Page table at 0x%" PRIxGENPADDR,
+ cap->u.vnode_x86_64_ept_ptable.base);
+
+ case ObjType_VNode_x86_64_ept_pdir:
+ return snprintf(buf, len, "x86_64 EPT Page directory at 0x%" PRIxGENPADDR,
+ cap->u.vnode_x86_64_ept_pdir.base);
+
+ case ObjType_VNode_x86_64_ept_pdpt:
+ return snprintf(buf, len, "x86_64 EPT PDPT at 0x%" PRIxGENPADDR,
+ cap->u.vnode_x86_64_ept_pdpt.base);
+
+ case ObjType_VNode_x86_64_ept_pml4:
+ return snprintf(buf, len, "x86_64 EPT PML4 at 0x%" PRIxGENPADDR,
+ cap->u.vnode_x86_64_ept_pml4.base);
+
+ case ObjType_VNode_x86_64_pml5:
+ return snprintf(buf, len, "x86_64 PML5 at 0x%" PRIxGENPADDR,
+ cap->u.vnode_x86_64_pml5.base);
+
+ case ObjType_VNode_VTd_root_table:
+ return snprintf(buf, len, "VTd Root Table at 0x%" PRIxGENPADDR,
+ cap->u.vnode_vtd_root_table.base);
+
+ case ObjType_VNode_VTd_ctxt_table:
+ return snprintf(buf, len, "VTd Ctxt Table at 0x%" PRIxGENPADDR,
+ cap->u.vnode_vtd_ctxt_table.base);
+
case ObjType_Frame_Mapping:
mappingtype = "Frame";
goto ObjType_Mapping;
mdb_dump(mdb_root, 0);
}
- STATIC_ASSERT(58 == ObjType_Num, "Knowledge of all cap types");
-STATIC_ASSERT(60 == ObjType_Num, "Knowledge of all cap types");
++STATIC_ASSERT(68 == ObjType_Num, "Knowledge of all cap types");
static void print_cte(struct cte *cte, char *indent_buff)
{
struct mdbnode *node = N(cte);
#endif
+static void *base = NULL;
+static size_t offset = 0; ///< How much is currently used
+static size_t goffset = 0; ///< Maximum ever allocated
+static struct memobj_append memobj_;
+static struct memobj *memobj = NULL;
+static struct vregion vregion_;
+static struct vregion *vregion = NULL;
+
+struct memobj_anon* sbrk_get_memobj(void)
+{
+ assert(memobj != NULL);
+ return (struct memobj_anon*) memobj;
+}
+
+struct vregion* sbrk_get_vregion(void)
+{
+ assert(vregion != NULL);
+ return vregion;
+}
+
+void* sbrk_get_base(void)
+{
+ assert(base != NULL);
+ return base;
+}
+
+size_t sbrk_get_offset(void)
+{
+ assert(offset != 0);
+ return offset;
+}
+
+#ifdef SBRK_COLLECT_STATS
+uint64_t sbrk_times = 0;
+
+static inline unsigned long bf_ticks(void)
+{
+ unsigned int a, d;
+ __asm__ volatile("rdtsc" : "=a" (a), "=d" (d));
+ return ((unsigned long) a) | (((unsigned long) d) << 32);
+}
+#endif
-
void *sbrk(intptr_t increment)
{
++#ifdef SBRK_COLLECT_STATS
+ uint64_t start = bf_ticks();
++#endif
errval_t err;
size_t orig_offset;
- static void *base;
- static size_t offset = 0;
- static size_t goffset = 0;
- static struct memobj_anon memobj_;
- static struct memobj *memobj = NULL;
- static struct vregion vregion_;
- static struct vregion *vregion = NULL;
-
if (!memobj) { // Initialize
-- err = vspace_map_anon_nomalloc(&base, &memobj_, &vregion_,
-- SBRK_REGION_BYTES, NULL,
-- SBRK_FLAGS, SBRK_REGION_ALIGNMENT);
++ err = vspace_map_append_nomalloc(&base, &memobj_, &vregion_,
++ SBRK_REGION_BYTES, NULL,
++ SBRK_FLAGS, SBRK_REGION_ALIGNMENT);
if (err_is_fail(err)) {
DEBUG_ERR(err, "vspace_map_anon_nomalloc failed");
return (void *)-1;
coreid_t from;
genvaddr_t st;
errval_t status;
+ size_t pending_agreements;
struct revoke_slave_st *next;
+ uint64_t seqnum;
};
static void revoke_result__rx(errval_t result,
static void revoke_done__send(struct intermon_binding *b,
struct intermon_msg_queue_elem *e);
static void revoke_master_steps__fin(void *st);
-
+ //static errval_t capops_revoke_subscribe()
+static uint64_t revoke_seqnum = 0;
+
+
+ struct revoke_register_st
+ {
+ struct monitor_client_req req;
+ struct revoke_register_st *next, **prev_next;
+ struct monitor_binding *subscriber;
+ genpaddr_t base;
+ genpaddr_t limit;
+ bool acked;
+ bool notified;
+ struct event_closure cont;
+ };
+
+ struct revoke_register_st *revoke_subs = NULL;
+
+ static inline void revoke_subs_add(struct revoke_register_st *rvk_st)
+ {
+ rvk_st->next = revoke_subs;
+
+ if (revoke_subs) {
+ revoke_subs->prev_next = &rvk_st->next;
+ }
+ rvk_st->prev_next = &revoke_subs;
+ revoke_subs = rvk_st;
+ }
+
+ static inline void revoke_subs_remove(struct revoke_register_st *rvk_st)
+ {
+ if (rvk_st->next) {
+ rvk_st->next->prev_next = rvk_st->prev_next;
+ }
+
+ *rvk_st->prev_next = rvk_st->next;
+ }
+
+ static inline struct revoke_register_st *revoke_subs_lookup_by_id(uintptr_t id)
+ {
+ struct revoke_register_st *rvk_st = revoke_subs;
+ while(rvk_st) {
+ if (rvk_st->req.reqid == id) {
+ return rvk_st;
+ }
+ rvk_st = rvk_st->next;
+ }
+ }
+
+ static struct revoke_register_st *
+ revoke_subs_remove_by_range(genpaddr_t base, genpaddr_t limit,
+ struct revoke_register_st *curr)
+ {
+ if (curr == NULL) {
+ curr = revoke_subs;
+ } else {
+ curr = curr->next;
+ }
+
+ struct revoke_register_st *rvk_st = curr;
+ while(rvk_st) {
+ if (rvk_st->base <= base && rvk_st->limit >= limit) {
+ assert(rvk_st->prev_next);
+ *rvk_st->prev_next = rvk_st->next;
+ return rvk_st;
+ }
+ rvk_st = rvk_st->next;
+ }
+ return NULL;
+ }
+
+
+ static void cap_revoke_response(struct monitor_binding *sub, uintptr_t id)
+ {
+ struct monitor_state *mst = sub->st;
+
+ if (mst->reqs == NULL) {
+ DEBUG_CAPOPS("Received message but no outstanding requests???");
+ assert(mst->reqs == NULL);
+ }
+
+ struct monitor_client_req *reqs = mst->reqs;
+ struct monitor_client_req **prev_next = &mst->reqs;
+ while(reqs) {
+ if (reqs->reqid == id) {
+ *prev_next = reqs->next;
+ break;
+ }
+ prev_next = &reqs->next;
+ reqs = reqs->next;
+ }
+
+ struct revoke_register_st *rvk_st = (struct revoke_register_st *)reqs;
+ rvk_st->cont.handler(rvk_st);
+ }
+
+ errval_t capops_revoke_register_subscribe(struct capability *cap, uintptr_t id,
+ struct monitor_binding *subscriber)
+ {
+ assert(cap);
+ assert(subscriber);
+
+ struct revoke_register_st *rvk_st = calloc(1, sizeof(*rvk_st));
+ if (rvk_st == NULL) {
+ return LIB_ERR_MALLOC_FAIL;
+ }
+
+ rvk_st->req.reqid = id;
+ rvk_st->subscriber = subscriber;
+
+ subscriber->rx_vtbl.cap_revoke_response = cap_revoke_response;
+
+ /* set the ranges */
+ rvk_st->base = get_address(cap);
+ rvk_st->limit = rvk_st->base + get_size(cap) - 1;
+
+ DEBUG_CAPOPS("%s:%u: cap=[%" PRIxGENPADDR "..%" PRIxGENPADDR "], id=%"
+ PRIuPTR " sub=%p\n", __FUNCTION__, __LINE__,
+ rvk_st->base, rvk_st->limit, id, subscriber);
+
+ /* add it to the subscribers */
+ revoke_subs_add(rvk_st);
+
+ return SYS_ERR_OK;
+ }
+
+
+
+ static void
+ revoke_agreement_request_cont(struct monitor_binding *b,
+ struct revoke_register_st *st)
+ {
+
+ errval_t err = b->tx_vtbl.cap_revoke_request(b, NOP_CONT, 0, st->req.reqid);
+ assert(err_is_ok(err));
+ }
+
+ static void revoke_master_cont(void *arg)
+ {
+ errval_t err;
+ struct revoke_register_st *st = arg;
+ struct revoke_master_st *rvk_st = st->cont.arg;
+
+ free(arg);
+
+ rvk_st->pending_agreements--;
+ if (rvk_st->pending_agreements) {
+ return;
+ }
+
+ /* continue with the protocol */
+ DEBUG_CAPOPS("%s ## revocation: commit phase\n", __FUNCTION__);
+ err = capsend_relations(&rvk_st->rawcap, revoke_commit__send,
+ &rvk_st->revoke_mc_st, &rvk_st->dests);
+ PANIC_IF_ERR(err, "enqueing revoke_commit multicast");
+
+ delete_steps_resume();
+
+ struct event_closure steps_fin_cont
+ = MKCLOSURE(revoke_master_steps__fin, rvk_st);
+ delete_queue_wait(&rvk_st->del_qn, steps_fin_cont);
+
+ }
+
+ static void revoke_slave_cont(void *arg)
+ {
+ errval_t err;
+ struct revoke_register_st *st = arg;
+ struct revoke_slave_st *rvk_st = st->cont.arg;
+
+ free(arg);
+
+ rvk_st->pending_agreements--;
+ if (rvk_st->pending_agreements) {
+ return;
+ }
+
+ DEBUG_CAPOPS("### %s:%u continue with the protocol\n",
+ __FUNCTION__, __LINE__);
+
+ /* continue with the protocol */
+ rvk_st->im_qn.cont = revoke_ready__send;
+ err = capsend_target(rvk_st->from, (struct msg_queue_elem*)rvk_st);
+ PANIC_IF_ERR(err, "enqueing revoke_ready");
+ }
+
+ /*
+ * TODO: the following two functions essentially do the same thing, but with
+ * a different revoke state (slave or master)
+ */
+
+ static bool capops_revoke_requires_agreement_local(struct revoke_master_st *rvk_st)
+ {
+
+ if (revoke_subs == NULL) {
+ return false;
+ }
+
+ genpaddr_t base = get_address(&rvk_st->rawcap);
+ gensize_t limit = base + get_size(&rvk_st->rawcap) - 1;
+
+ DEBUG_CAPOPS("%s:%u: cap=[%" PRIxGENPADDR "..%" PRIxGENPADDR "]\n",
+ __FUNCTION__, __LINE__, base, limit);
+
+ struct revoke_register_st *st = revoke_subs_remove_by_range(base, limit, NULL);
+ if (st == NULL) {
+ DEBUG_CAPOPS("### %s:%u no matching request\n",
+ __FUNCTION__, __LINE__);
+ return false;
+ }
+
+ while(st != NULL) {
+ if (st->notified) {
+ /* don't notify two times, shouldn't actually happen */
+ st = revoke_subs_remove_by_range(base, limit, st);
+ continue;
+ }
+
+ struct monitor_binding *b = st->subscriber;
+ struct monitor_state *mst = b->st;
+
+ st->req.next = mst->reqs;
+ mst->reqs = &st->req;
+ st->notified = true;
+ st->cont.arg = rvk_st;
+ st->cont.handler = revoke_master_cont;
+ rvk_st->pending_agreements++;
+ revoke_agreement_request_cont(b, st);
+
+ st = revoke_subs_remove_by_range(base, limit, st);
+ }
+
+ return true;
+ }
+
+
+ static bool capops_revoke_requires_agreement_relations(struct revoke_slave_st *rvk_st)
+ {
+ if (revoke_subs == NULL) {
+ return false;
+ }
+
+ genpaddr_t base = get_address(&rvk_st->rawcap);
+ gensize_t limit = base + get_size(&rvk_st->rawcap) - 1;
+
+
+ DEBUG_CAPOPS("%s:%u: cap=[%" PRIxGENPADDR "..%" PRIxGENPADDR "]\n",
+ __FUNCTION__, __LINE__, base, limit);
+
+ struct revoke_register_st *st = revoke_subs_remove_by_range(base, limit, NULL);
+ if (st == NULL) {
+ DEBUG_CAPOPS("### %s:%u no matching request\n",
+ __FUNCTION__, __LINE__);
+ return false;
+ }
+
+ while(st != NULL) {
+ if (st->notified) {
+ /* don't notify two times, should'nt actually happen */
+ st = revoke_subs_remove_by_range(base, limit, st);
+ continue;
+ }
+
+ struct monitor_binding *b = st->subscriber;
+ struct monitor_state *mst = b->st;
+
+ st->req.next = mst->reqs;
+ mst->reqs = &st->req;
+ st->notified = true;
+ st->cont.arg = rvk_st;
+ st->cont.handler = revoke_slave_cont;
+ rvk_st->pending_agreements++;
+ revoke_agreement_request_cont(b, st);
+
+ st = revoke_subs_remove_by_range(base, limit, st);
+ }
+
+ return true;
+ }
+
-
-
void
capops_revoke(struct domcapref cap,
revoke_result_handler_t result_handler,
st->cap.level);
PANIC_IF_ERR(err, "marking revoke");
-
+ TRACE(CAPOPS, REVOKE_DO_MARK, 0);
DEBUG_CAPOPS("%s ## revocation: mark phase\n", __FUNCTION__);
// XXX: could check whether remote copies exist here(?), -SG, 2014-11-05
err = capsend_relations(&st->rawcap, revoke_mark__send,
return;
}
+ TRACE(CAPOPS, REVOKE_DO_COMMIT, 0);
+ if (capops_revoke_requires_agreement_local(rvk_st)) {
+ return;
+ }
+
DEBUG_CAPOPS("%s ## revocation: commit phase\n", __FUNCTION__);
err = capsend_relations(&rvk_st->rawcap, revoke_commit__send,
&rvk_st->revoke_mc_st, &rvk_st->dests);