failure RETRY_THROUGH_MONITOR "There is a remote copy of the capability, monitor must be involved to perform a cross core agreement protocol",
failure TYPE_NOT_CREATABLE "Specified capability type is not creatable at runtime. Consider retyping it from another capability.",
+ // errors on the monitor-kernel interface
+ failure DELETE_LAST_OWNED "Tried to delete the last copy of a locally owned capability that may have remote copies",
+ failure CAP_LOCKED "The cap has already been locked",
+ success RAM_CAP_CREATED "A new RAM cap has been created",
+
// errors specific to page mapping
- failure VNODE_SLOT_INVALID "Destination slot exceeds size of page table",
- failure WRONG_MAPPING "Wrong source/destination mapping type",
- failure FRAME_OFFSET_INVALID "Specified offset exceeds size of frame",
- failure VNODE_SLOT_RESERVED "Destination slot is reserved",
- failure VNODE_SLOT_INUSE "Destination slot in use: unmap first",
+ failure VNODE_SLOT_INVALID "Destination slot exceeds size of page table",
+ failure WRONG_MAPPING "Wrong source/destination mapping type",
+ failure FRAME_OFFSET_INVALID "Specified offset exceeds size of frame",
+ failure VNODE_SLOT_RESERVED "Destination slot is reserved",
+ failure VNODE_SLOT_INUSE "Destination slot in use: unmap first",
+ failure VNODE_TYPE "Encountered non-VNode capability when manipulating page tables",
+ failure VNODE_LOOKUP_NEXT "Could not find next level page table",
+ failure VNODE_NOT_INSTALLED "VNode not in rooted page table tree",
+ failure VM_ALREADY_MAPPED "This cap copy is already mapped",
+ failure VM_MAP_SIZE "Mapping size too large",
+ failure VM_MAP_OFFSET "Mapping offset too large",
+ failure VM_RETRY_SINGLE "Mapping overlaps multiple leaf page tables, retry",
// errors related to IRQ table
failure IRQ_LOOKUP "Specified capability was not found while inserting in IRQ table",
// ID capability
failure ID_SPACE_EXHAUSTED "ID space exhausted",
+
+ // I2C driver
+ // XXX: should not be in kernel
+ failure I2C_UNINITIALIZED "Trying to use uninitialized i2c controller",
+ failure I2C_ZERO_LENGTH_MSG "Zero byte transfers not allowed",
+ failure I2C_WAIT_FOR_BUS "Wait for bus free timed out",
+ failure I2C_FAILURE "I2C subsystem failure",
+
};
-// errors generated by libcaps
+// errors generated by libmdb
errors libcaps CAPS_ERR_ {
// general errors
failure INVALID_ARGS "Invalid arguments",
message multihop_message(vci_t vci, uint8 direction, uint8 flags, uint32 ack, uint8 payload[size]);
message multihop_cap_send(vci_t vci, uint8 direction, capid_t capid, errval err, caprep cap, bool null_cap);
+ // cap operation messages
+ message capops_request_copy(coreid dest, caprep cap, capop_st st);
+ message capops_recv_copy(caprep cap, uint8 owner_relations, capop_st st);
+ message capops_recv_copy_result(errval status, capaddr_t cap, capbits_t bits, capslot_t slot, capop_st st);
+
+ message capops_move_request(caprep cap, uint8 relations, capop_st st);
+ message capops_move_result(errval status, capop_st st);
+ message capops_retrieve_request(caprep cap, capop_st st);
+ message capops_retrieve_result(errval status, uint8 relations, capop_st st);
+
+ message capops_delete_remote(caprep cap, capop_st st);
+ message capops_delete_remote_result(errval status, capop_st st);
+
+ message capops_revoke_mark(caprep cap, capop_st st);
+ message capops_revoke_ready(capop_st st);
+ message capops_revoke_commit(capop_st st);
+ message capops_revoke_done(capop_st st);
+
+ // XXX: uint32 for bits? -MN
+ message capops_request_retype(caprep src, int desttype, uint32 destbits, capop_st st);
+ message capops_retype_response(errval status, capop_st st);
+
+ // ownership / relation messages
+ message capops_update_owner(caprep cap, capop_st st);
+ message capops_owner_updated(capop_st st);
+
+ message capops_find_cap(caprep cap, capop_st st);
+ message capops_find_cap_result(errval status, capop_st st);
+
+ message capops_find_descendants(caprep cap, capop_st st);
+ message capops_find_descendants_result(errval status, capop_st st);
+
+ /* Tracing Framework */
+
+ // Notify a core that it should prepare the tracing state. The origin core
+ // is the one who initiated the preparation request.
+ message trace_prepare(coreid origin_core);
+
+ // Notify the core who initiated the preparation that it is finished.
+ message trace_prepare_finished();
+
+ // Notify core 0 that you want to make a time measurement (Network Time
+ // Protocol). The origin core is the one who initiated the trace preparation.
+ call trace_measure(coreid_t origin_core, uint64 t0);
+
+ // The response to a measurement call from a core.
+ response trace_measure_ack(coreid origin_core, uint64 t0, uint64 t1, uint64 t2);
+
+ /* bfscope - the tracing server */
+
+ // Forward a trace-flush command to the monitor on the core on which bfscope
+ // is running.
+ call bfscope_flush_send_forward(iref iref);
+
+ // This message is sent, once bfscope has finished flushing.
+ response bfscope_flush_ack_forward();
};
*/
interface monitor "The monitor to client Interface" {
- alias vci_t uint64;
- alias capid_t uint32;
-
- call alloc_iref_request(
- uintptr service_id);
-
- response alloc_iref_reply(
- uintptr service_id,
- iref iref,
- errval err);
-
- /* TODO: move to monitor_blocking as RPC? */
- call boot_core_request(uint8 id, int hwid, int type, string cmdline);
- response boot_core_reply(errval err);
-
- /* TODO: move to monitor_blocking as RPC? */
- call boot_initialize_request();
- response boot_initialize_reply();
-
- call new_monitor_binding_request(
- uintptr st);
-
- response new_monitor_binding_reply(
- errval err,
- cap ep,
- uintptr st);
-
- // a client sends this to the monitor to initiate a bind
- message bind_lmp_client_request(
- iref iref,
- uintptr conn_id,
- size buflen,
- cap ep);
-
- // Request multiboot caps from the monitor.
- // I imagine only a file system doing this.
- /* TODO: move to monitor_blocking as RPC? */
- message multiboot_cap_request(uint32 slot);
- message multiboot_cap_reply(cap cap, errval err);
-
- // the monitor sends this to a service to pass on a bind request
- message bind_lmp_service_request(
- uintptr service_id,
- uintptr mon_id,
- size buflen,
- cap ep);
-
-
- message bind_lmp_reply_monitor(
- errval err,
- uintptr mon_id,
- uintptr conn_id,
- cap ep);
-
- message bind_lmp_reply_client(
- errval err,
- uintptr mon_id,
- uintptr conn_id,
- cap ep);
-
- // a client sends this to the monitor to initiate a bind
- message bind_ump_client_request(
- iref iref,
- uintptr conn_id,
- cap frame,
- size channel_length_in,
- size channel_length_out,
- cap notify);
-
- // the monitor sends this to a service to pass on a bind request
- message bind_ump_service_request(
- uintptr service_id,
- uintptr mon_id,
- cap frame,
- size channel_length_in,
- size channel_length_out,
- cap notify);
-
- call bind_ump_reply_monitor(
- uintptr mon_id,
- uintptr conn_id,
- errval err,
- cap notify);
-
- response bind_ump_reply_client(
- uintptr mon_id,
- uintptr conn_id,
- errval err,
- cap notify);
-
- call ipi_alloc_notify_request(cap ep, uintptr state);
- response ipi_alloc_notify_reply(uintptr state, cap notify, errval err);
-
- call get_mem_iref_request();
- response get_mem_iref_reply(
- iref iref);
-
- call get_name_iref_request(uintptr st);
- response get_name_iref_reply(iref iref, uintptr st);
-
- call get_ramfs_iref_request(uintptr st);
- response get_ramfs_iref_reply(iref iref, uintptr st);
- call set_ramfs_iref_request(iref iref);
-
- call set_mem_iref_request(
- iref iref);
-
- call set_name_iref_request(
- iref iref);
- response set_name_iref_reply(
- errval err);
-
- call get_monitor_rpc_iref_request(uintptr st);
- response get_monitor_rpc_iref_reply(iref iref, uintptr st);
-
- /* for UMP/BMP cap tx */
- call cap_send_request(
- uintptr mon_id,
- cap cap,
- capid_t capid,
- uint8 give_away);
- response cap_receive_request(
- uintptr conn_id,
- errval err,
- cap cap,
- capid_t capid);
-
- call span_domain_request(
- uintptr domain_id,
- uint8 core_id,
- cap vroot,
- cap disp);
- response span_domain_reply(
- errval err,
- uintptr domain_id);
-
- /* TODO: move to monitor_blocking as RPC? */
- call num_cores_request();
- response num_cores_reply(uint8 num);
-
- /* Multi-hop interconnect driver */
-
- // Send routing table
- // First message, which describes the set of valid core IDs
- call multihop_routing_table_new(coreid max_coreid, coreid nentries);
- // Subsequent messages (repeated) which each contain a portion of the routing table from a single core
- call multihop_routing_table_set(coreid from, coreid to[len]);
-
- // Connection set-up between monitor and client
- call multihop_bind_client_request(iref iref, vci_t sender_vci);
- response multihop_bind_client_reply(vci_t receiver_vci, vci_t sender_vci, errval err);
-
- // Connection set-up between monitor and service
- call multihop_bind_service_request(uintptr service_id, vci_t sender_vci);
- response multihop_bind_service_reply(vci_t receiver_vci , vci_t sender_vci, errval err);
-
- // user message
- message multihop_message(vci_t vci, uint8 direction, uint8 flags, uint32 ack, uint8 payload[size]);
-
- // cap transfer
- call multihop_cap_send(vci_t vci, uint8 direction, errval err, cap cap, capid_t capid);
-
- /* Tracing Framework */
-
- // Notify a core that it should prepare the tracing state. The origin core
- // is the one who initiated the preparation request.
- message trace_prepare(coreid origin_core);
-
- // Notify the core who initiated the preparation that it is finished.
- message trace_prepare_finished();
-
- /* bfscope - the tracing server */
-
- // Send a message to bfscope, to notify that it should flush
- call bfscope_flush_send(iref iref);
-
- // Notify the initiatior of the flush request that it has been completed.
- response bfscope_flush_ack();
+ alias vci_t uint64;
+ alias capid_t uint32;
+
+ call alloc_iref_request(
+ uintptr service_id);
+
+ response alloc_iref_reply(
+ uintptr service_id,
+ iref iref,
+ errval err);
+
+ /* TODO: move to monitor_blocking as RPC? */
+ call boot_core_request(uint8 id, int hwid, int type, string cmdline);
+ response boot_core_reply(errval err);
+
+ /* TODO: move to monitor_blocking as RPC? */
+ call boot_initialize_request();
+ response boot_initialize_reply();
+
+ call new_monitor_binding_request(
+ uintptr st);
+
+ response new_monitor_binding_reply(
+ errval err,
+ cap ep,
+ uintptr st);
+
+ // a client sends this to the monitor to initiate a bind
+ message bind_lmp_client_request(
+ iref iref,
+ uintptr conn_id,
+ size buflen,
+ cap ep);
+
+ // Request multiboot caps from the monitor.
+ // I imagine only a file system doing this.
+ /* TODO: move to monitor_blocking as RPC? */
+ message multiboot_cap_request(uint32 slot);
+ message multiboot_cap_reply(cap cap, errval err);
+
+ // the monitor sends this to a service to pass on a bind request
+ message bind_lmp_service_request(
+ uintptr service_id,
+ uintptr mon_id,
+ size buflen,
+ cap ep);
+
+
+ message bind_lmp_reply_monitor(
+ errval err,
+ uintptr mon_id,
+ uintptr conn_id,
+ cap ep);
+
+ message bind_lmp_reply_client(
+ errval err,
+ uintptr mon_id,
+ uintptr conn_id,
+ cap ep);
+
+ // a client sends this to the monitor to initiate a bind
+ message bind_ump_client_request(
+ iref iref,
+ uintptr conn_id,
+ cap frame,
+ size channel_length_in,
+ size channel_length_out,
+ cap notify);
+
+ // the monitor sends this to a service to pass on a bind request
+ message bind_ump_service_request(
+ uintptr service_id,
+ uintptr mon_id,
+ cap frame,
+ size channel_length_in,
+ size channel_length_out,
+ cap notify);
+
+ call bind_ump_reply_monitor(
+ uintptr mon_id,
+ uintptr conn_id,
+ errval err,
+ cap notify);
+
+ response bind_ump_reply_client(
+ uintptr mon_id,
+ uintptr conn_id,
+ errval err,
+ cap notify);
+
+ call ipi_alloc_notify_request(cap ep, uintptr state);
+ response ipi_alloc_notify_reply(uintptr state, cap notify, errval err);
+
+ call get_mem_iref_request();
+ response get_mem_iref_reply(
+ iref iref);
+
+ call get_name_iref_request(uintptr st);
+ response get_name_iref_reply(iref iref, uintptr st);
+
+ call get_ramfs_iref_request(uintptr st);
+ response get_ramfs_iref_reply(iref iref, uintptr st);
+ call set_ramfs_iref_request(iref iref);
+
+ call set_mem_iref_request(
+ iref iref);
+
+ call set_name_iref_request(
+ iref iref);
+ response set_name_iref_reply(
+ errval err);
+
+ call get_monitor_rpc_iref_request(uintptr st);
+ response get_monitor_rpc_iref_reply(iref iref, uintptr st);
+
+ /* for UMP/BMP cap tx */
+ call cap_send_request(
+ uintptr mon_id,
+ cap cap,
+ capid_t capid);
+ call cap_move_request(
+ uintptr mon_id,
+ give_away_cap cap,
+ capid_t capid);
+ response cap_receive_request(
+ uintptr conn_id,
+ errval err,
+ give_away_cap cap,
+ capid_t capid);
+
+ call span_domain_request(
+ uintptr domain_id,
+ uint8 core_id,
+ cap vroot,
+ cap disp);
+ response span_domain_reply(
+ errval err,
+ uintptr domain_id);
+
+ /* TODO: move to monitor_blocking as RPC? */
+ call num_cores_request();
+ response num_cores_reply(uint8 num);
+
+ /* Multi-hop interconnect driver */
+
+ // Send routing table
+ // First message, which describes the set of valid core IDs
+ call multihop_routing_table_new(coreid max_coreid, coreid nentries);
+ // Subsequent messages (repeated) which each contain a portion of the routing table from a single core
+ call multihop_routing_table_set(coreid from, coreid to[len]);
+
+ // Connection set-up between monitor and client
+ call multihop_bind_client_request(iref iref, vci_t sender_vci);
+ response multihop_bind_client_reply(vci_t receiver_vci, vci_t sender_vci, errval err);
+
+ // Connection set-up between monitor and service
+ call multihop_bind_service_request(uintptr service_id, vci_t sender_vci);
+ response multihop_bind_service_reply(vci_t receiver_vci , vci_t sender_vci, errval err);
+
+ // user message
+ message multihop_message(vci_t vci, uint8 direction, uint8 flags, uint32 ack, uint8 payload[size]);
+
+ // cap transfer
+ call multihop_cap_send(vci_t vci, uint8 direction, errval err, cap cap, capid_t capid);
++
++ /* Tracing Framework */
++
++ // Notify a core that it should prepare the tracing state. The origin core
++ // is the one who initiated the preparation request.
++ message trace_prepare(coreid origin_core);
++
++ // Notify the core who initiated the preparation that it is finished.
++ message trace_prepare_finished();
++
++ /* bfscope - the tracing server */
++
++ // Send a message to bfscope, to notify that it should flush
++ call bfscope_flush_send(iref iref);
++
++ // Notify the initiatior of the flush request that it has been completed.
++ response bfscope_flush_ack();
};
return cap_invoke3(root, CNodeCmd_Revoke, cap, bits).error;
}
+static inline errval_t invoke_cnode_get_state(struct capref root, capaddr_t cap,
+ int bits, distcap_state_t *ret)
+{
+ struct sysret sysret = cap_invoke3(root, CNodeCmd_GetState, cap, bits);
+
+ assert(ret != NULL);
+ if (err_is_ok(sysret.error)) {
+ *ret = sysret.value;
+ }
+ else {
+ *ret = 0;
+ }
+ return sysret.error;
+}
+
- static inline errval_t invoke_vnode_unmap(struct capref cap, size_t entry)
+ static inline errval_t invoke_vnode_map(struct capref ptable, capaddr_t slot,
+ capaddr_t src, int frombits, size_t flags,
+ size_t offset, size_t pte_count)
{
- return cap_invoke2(cap, VNodeCmd_Unmap, entry).error;
+ return cap_invoke7(ptable, VNodeCmd_Map, slot, src, frombits, flags, offset, pte_count).error;
+ }
+
+ static inline errval_t invoke_vnode_unmap(struct capref cap, capaddr_t mapping_addr,
+ int bits, size_t entry, size_t num_pages)
+ {
+ return cap_invoke5(cap, VNodeCmd_Unmap, mapping_addr, bits, entry, num_pages).error;
}
/**
#ifndef INCLUDEBARRELFISH_CADDR_H
#define INCLUDEBARRELFISH_CADDR_H
+ #include <stdbool.h>
+ #include <sys/cdefs.h>
+
#include <barrelfish_kpi/types.h>
+#include <stdint.h>
+#include <stdbool.h>
+ __BEGIN_DECLS
+
/**
* \brief User-level representation of a CNode, its CSpace address and size
*/
#include <barrelfish_kpi/types.h>
#include <barrelfish_kpi/capabilities.h>
#include <barrelfish_kpi/dispatcher_shared.h>
+#include <barrelfish_kpi/distcaps.h>
#include <barrelfish/invocations_arch.h>
+ __BEGIN_DECLS
+
errval_t cnode_create(struct capref *ret_dest, struct cnoderef *cnoderef,
cslot_t slots, cslot_t *retslots);
errval_t cnode_create_raw(struct capref dest, struct cnoderef *cnoderef,
return err;
}
+static inline errval_t cap_get_state(struct capref cap, distcap_state_t *state)
+{
+ uint8_t vbits = get_cap_valid_bits(cap);
+ capaddr_t caddr = get_cap_addr(cap) >> (CPTR_BITS - vbits);
+
+ return invoke_cnode_get_state(cap_root, caddr, vbits, state);
+}
+
+ __END_DECLS
+
#endif //INCLUDEBARRELFISH_CAPABILITIES_H
#ifndef BARRELFISH_DEBUG_H
#define BARRELFISH_DEBUG_H
+ #include <sys/cdefs.h>
+
#include <errors/errno.h>
#include <barrelfish/caddr.h>
+#include <stddef.h>
#include <barrelfish_kpi/registers_arch.h>
+ __BEGIN_DECLS
+
struct capability;
errval_t debug_cap_identify(struct capref cap, struct capability *ret);
+ errval_t debug_dump_hw_ptables(void);
void debug_cspace(struct capref root);
void debug_my_cspace(void);
void debug_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2)));
#ifndef __ASSEMBLER__
+ #include <sys/cdefs.h>
#include <barrelfish_kpi/dispatcher_handle.h>
+#include <barrelfish_kpi/registers_arch.h>
+#include <barrelfish_kpi/types.h>
+ __BEGIN_DECLS
+
#ifdef NDEBUG
#define assert_disabled(e) ((void)sizeof(e))
#define warn_disabled(v,e) ((void)sizeof(e))
#ifndef BARRELFISH_EVENT_QUEUE_H
#define BARRELFISH_EVENT_QUEUE_H
+ #include <sys/cdefs.h>
+
#include <barrelfish/waitset.h>
-#include <barrelfish/threads.h>
+#include <barrelfish/thread_sync.h>
+ __BEGIN_DECLS
+
/// What mode does an event queue operate in?
enum event_queue_mode {
/// Run events continuously, as the waitset allows
#include <barrelfish/waitset.h>
#include <barrelfish/lmp_endpoints.h>
#include <barrelfish/idc.h>
+#include <assert.h>
+ __BEGIN_DECLS
+
struct lmp_chan;
struct event_queue_node;
#ifndef LIBBARRELFISH_LMP_ENDPOINTS_H
#define LIBBARRELFISH_LMP_ENDPOINTS_H
+ #include <sys/cdefs.h>
+
#include <barrelfish/waitset.h>
#include <barrelfish_kpi/lmp.h>
+#include <barrelfish/caddr.h>
+#include <barrelfish_kpi/dispatcher_handle.h>
+ __BEGIN_DECLS
+
/// In-endpoint size of a maximum-sized LMP message plus header
#define LMP_RECV_LENGTH (LMP_MSG_LENGTH + LMP_RECV_HEADER_LENGTH)
#ifndef LIBBARRELFISH_MEMOBJ_H
#define LIBBARRELFISH_MEMOBJ_H
+#include <barrelfish/slab.h>
+ #include <sys/cdefs.h>
+
+ __BEGIN_DECLS
// FIXME: these enum names need to be scoped (e.g. MEMOBJ_X rather than X) -AB
enum memobj_type {
#ifndef BARRELFISH_RAM_ALLOC_H
#define BARRELFISH_RAM_ALLOC_H
+#include <stdint.h>
+#include <errors/errno.h>
+ #include <sys/cdefs.h>
+
+ __BEGIN_DECLS
+struct capref;
+
typedef errval_t (* ram_alloc_func_t)(struct capref *ret, uint8_t size_bits,
uint64_t minbase, uint64_t maxlimit);
#ifndef LIBBARRELFISH_THREADS_H
#define LIBBARRELFISH_THREADS_H
+ #include <barrelfish/caddr.h> // for struct capref.
#include <barrelfish/thread_sync.h>
+#include <barrelfish/caddr.h>
#include <barrelfish_kpi/registers_arch.h>
#include <barrelfish_kpi/dispatcher_handle.h>
+ #include <errors/errno.h>
+ #include <sys/cdefs.h>
+
+ __BEGIN_DECLS
typedef int (*thread_func_t)(void *);
#ifndef LIBBARRELFISH_VREGION_H
#define LIBBARRELFISH_VREGION_H
+#include <barrelfish_kpi/types.h>
+ #include <sys/cdefs.h>
+
+ __BEGIN_DECLS
#define VREGION_FLAGS_READ 0x01 // Reading allowed
#define VREGION_FLAGS_WRITE 0x02 // Writing allowed
#ifndef LIBBARRELFISH_VSPACE_LAYOUT_H
#define LIBBARRELFISH_VSPACE_LAYOUT_H
+#include <assert.h>
+ #include <sys/cdefs.h>
+
+ __BEGIN_DECLS
struct vspace_layout;
struct vspace_layout_funcs {
#ifndef LIBBARRELFISH_VSPACE_MMU_AWARE_H
#define LIBBARRELFISH_VSPACE_MMU_AWARE_H
+#include <barrelfish/vregion.h>
+ #include <sys/cdefs.h>
+
+ __BEGIN_DECLS
struct vspace_mmu_vregion_list {
struct vregion vregion;
#ifndef BARRELFISH_WAITSET_H
#define BARRELFISH_WAITSET_H
+#include <barrelfish/types.h>
+#include <errors/errno.h>
+ #include <sys/cdefs.h>
+
+ __BEGIN_DECLS
struct waitset;
struct thread;
void remove_mapping(struct cte *cte);
errval_t mdb_get_copy(struct capability *cap, struct capability **ret);
bool mdb_is_sane(void);
-void set_cap_remote(struct cte *cte, bool is_remote);
-bool is_cap_remote(struct cte *cte);
+void mdb_set_relations(struct cte *cte, uint8_t relations, uint8_t mask);
+ __END_DECLS
+
#endif // LIBMDB_MDB_H
#include <syscall.h>
#include <barrelfish_kpi/syscalls.h>
#include <mdb/mdb.h>
+#include <mdb/mdb_tree.h>
#include <dispatch.h>
#include <paging_kernel_arch.h>
+ #include <paging_generic.h>
#include <exec.h>
#include <arch/x86/apic.h>
#include <arch/x86/global.h>
[CNodeCmd_Create] = handle_create,
[CNodeCmd_Delete] = handle_delete,
[CNodeCmd_Revoke] = handle_revoke,
+ [CNodeCmd_GetState] = handle_get_state,
},
[ObjType_VNode_x86_64_pml4] = {
+ [VNodeCmd_Map] = handle_map,
[VNodeCmd_Unmap] = handle_unmap,
},
[ObjType_VNode_x86_64_pdpt] = {
[KernelCmd_Setup_trace] = handle_trace_setup,
[KernelCmd_Register] = monitor_handle_register,
[KernelCmd_Domain_Id] = monitor_handle_domain_id,
- [MonitorCmd_Retype] = monitor_handle_retype,
- [MonitorCmd_Delete] = monitor_handle_delete,
- [MonitorCmd_Revoke] = monitor_handle_revoke,
+ [KernelCmd_Get_cap_owner] = monitor_get_cap_owner,
+ [KernelCmd_Set_cap_owner] = monitor_set_cap_owner,
+ [KernelCmd_Lock_cap] = monitor_lock_cap,
+ [KernelCmd_Unlock_cap] = monitor_unlock_cap,
+ [KernelCmd_Retype] = monitor_handle_retype,
+ [KernelCmd_Has_descendants] = monitor_handle_has_descendants,
+ [KernelCmd_Delete_last] = monitor_handle_delete_last,
+ [KernelCmd_Delete_foreigns] = monitor_handle_delete_foreigns,
+ [KernelCmd_Revoke_mark_target] = monitor_handle_revoke_mark_tgt,
+ [KernelCmd_Revoke_mark_relations] = monitor_handle_revoke_mark_rels,
+ [KernelCmd_Delete_step] = monitor_handle_delete_step,
+ [KernelCmd_Clear_step] = monitor_handle_clear_step,
[KernelCmd_Sync_timer] = monitor_handle_sync_timer,
[KernelCmd_IPI_Register] = kernel_ipi_register,
- [KernelCmd_IPI_Delete] = kernel_ipi_delete
+ [KernelCmd_IPI_Delete] = kernel_ipi_delete,
+ [KernelCmd_DumpPTables] = kernel_dump_ptables
},
[ObjType_IRQTable] = {
[IRQTableCmd_Set] = handle_irq_table_set,
/* Set the type specific fields and insert into #dest_caps */
switch(type) {
case ObjType_Frame:
+ trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 1);
+ // XXX: SCC hack, while we don't have a devframe allocator
+ if(lpaddr + ((lpaddr_t)1 << bits) < PADDR_SPACE_LIMIT) {
+ memset((void*)lvaddr, 0, (lvaddr_t)1 << bits);
+ } else {
+ printk(LOG_WARN, "Allocating RAM at 0x%" PRIxLPADDR
+ " uninitialized\n", lpaddr);
+ }
+ trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 0);
- for(size_t i = 0; i < numobjs; i++) {
+ for(dest_i = 0; dest_i < numobjs; dest_i++) {
// Initialize type specific fields
- src_cap.u.frame.base = genpaddr + i * ((genpaddr_t)1 << objbits);
+ src_cap.u.frame.base = genpaddr + dest_i * ((genpaddr_t)1 << objbits);
src_cap.u.frame.bits = objbits;
// Insert the capabilities
- err = set_cap(&dest_caps[i].cap, &src_cap);
+ err = set_cap(&dest_caps[dest_i].cap, &src_cap);
if (err_is_fail(err)) {
- return err;
+ break;
}
}
- return SYS_ERR_OK;
+ break;
case ObjType_PhysAddr:
- for(size_t i = 0; i < numobjs; i++) {
+ for(dest_i = 0; dest_i < numobjs; dest_i++) {
// Initialize type specific fields
- src_cap.u.physaddr.base = genpaddr + i * ((genpaddr_t)1 << objbits);
+ src_cap.u.physaddr.base = genpaddr + dest_i * ((genpaddr_t)1 << objbits);
src_cap.u.physaddr.bits = objbits;
// Insert the capabilities
- err = set_cap(&dest_caps[i].cap, &src_cap);
+ err = set_cap(&dest_caps[dest_i].cap, &src_cap);
if (err_is_fail(err)) {
- return err;
+ break;
}
}
- return SYS_ERR_OK;
+ break;
+
case ObjType_RAM:
- for(size_t i = 0; i < numobjs; i++) {
+ for(dest_i = 0; dest_i < numobjs; dest_i++) {
// Initialize type specific fields
- src_cap.u.ram.base = genpaddr + i * ((genpaddr_t)1 << objbits);
+ src_cap.u.ram.base = genpaddr + dest_i * ((genpaddr_t)1 << objbits);
src_cap.u.ram.bits = objbits;
// Insert the capabilities
- err = set_cap(&dest_caps[i].cap, &src_cap);
+ err = set_cap(&dest_caps[dest_i].cap, &src_cap);
if (err_is_fail(err)) {
- return err;
+ break;
}
}
- return SYS_ERR_OK;
+ break;
+
case ObjType_DevFrame:
- for(size_t i = 0; i < numobjs; i++) {
+ for(dest_i = 0; dest_i < numobjs; dest_i++) {
// Initialize type specific fields
- src_cap.u.devframe.base = genpaddr + i * ((genpaddr_t)1 << objbits);
+ src_cap.u.devframe.base = genpaddr + dest_i * ((genpaddr_t)1 << objbits);
src_cap.u.devframe.bits = objbits;
// Insert the capabilities
- err = set_cap(&dest_caps[i].cap, &src_cap);
+ err = set_cap(&dest_caps[dest_i].cap, &src_cap);
if (err_is_fail(err)) {
- return err;
+ break;
}
}
- return SYS_ERR_OK;
+ break;
case ObjType_CNode:
assert((1UL << OBJBITS_CTE) >= sizeof(struct cte));
+ trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 1);
+ memset((void*)lvaddr, 0, 1UL << bits);
+ trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 0);
- for(size_t i = 0; i < numobjs; i++) {
+ for(dest_i = 0; dest_i < numobjs; dest_i++) {
// Initialize type specific fields
src_cap.u.cnode.cnode =
- lpaddr + i * ((lpaddr_t)1 << (objbits + OBJBITS_CTE));
+ lpaddr + dest_i * ((lpaddr_t)1 << (objbits + OBJBITS_CTE));
src_cap.u.cnode.bits = objbits;
src_cap.u.cnode.guard = 0;
src_cap.u.cnode.guard_size = 0;
{
size_t objbits_vnode = vnode_objbits(type);
+ trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 1);
+ memset((void*)lvaddr, 0, 1UL << bits);
+ trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 0);
+
- for(size_t i = 0; i < numobjs; i++) {
+ for(dest_i = 0; dest_i < numobjs; dest_i++) {
// Initialize type specific fields
src_cap.u.vnode_arm_l1.base =
- genpaddr + i * ((genpaddr_t)1 << objbits_vnode);
+ genpaddr + dest_i * ((genpaddr_t)1 << objbits_vnode);
#ifdef __arm__
// Insert kernel/mem mappings into new table.
{
size_t objbits_vnode = vnode_objbits(type);
+ trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 1);
+ memset((void*)lvaddr, 0, 1UL << bits);
+ trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 0);
+
- for(size_t i = 0; i < numobjs; i++) {
+ for(dest_i = 0; dest_i < numobjs; dest_i++) {
// Initialize type specific fields
src_cap.u.vnode_arm_l2.base =
- genpaddr + i * ((genpaddr_t)1 << objbits_vnode);
+ genpaddr + dest_i * ((genpaddr_t)1 << objbits_vnode);
// Insert the capability
- err = set_cap(&dest_caps[i].cap, &src_cap);
+ err = set_cap(&dest_caps[dest_i].cap, &src_cap);
if (err_is_fail(err)) {
- return err;
+ break;
}
}
- return SYS_ERR_OK;
+ break;
}
case ObjType_VNode_x86_32_ptable:
{
size_t objbits_vnode = vnode_objbits(type);
+ trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 1);
+ memset((void*)lvaddr, 0, 1UL << bits);
+ trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 0);
+
- for(size_t i = 0; i < numobjs; i++) {
+ for(dest_i = 0; dest_i < numobjs; dest_i++) {
// Initialize type specific fields
src_cap.u.vnode_x86_32_ptable.base =
- genpaddr + i * ((genpaddr_t)1 << objbits_vnode);
+ genpaddr + dest_i * ((genpaddr_t)1 << objbits_vnode);
// Insert the capability
- err = set_cap(&dest_caps[i].cap, &src_cap);
+ err = set_cap(&dest_caps[dest_i].cap, &src_cap);
if (err_is_fail(err)) {
- return err;
+ break;
}
}
- return SYS_ERR_OK;
+ break;
}
case ObjType_VNode_x86_32_pdir:
{
size_t objbits_vnode = vnode_objbits(type);
+ trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 1);
+ memset((void*)lvaddr, 0, 1UL << bits);
+ trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 0);
+
- for(size_t i = 0; i < numobjs; i++) {
+ for(dest_i = 0; dest_i < numobjs; dest_i++) {
// Initialize type specific fields
src_cap.u.vnode_x86_32_pdir.base =
- genpaddr + i * ((genpaddr_t)1 << objbits_vnode);
+ genpaddr + dest_i * ((genpaddr_t)1 << objbits_vnode);
#if defined(__i386__) && !defined(CONFIG_PAE)
// Make it a good PDE by inserting kernel/mem VSpaces
{
size_t objbits_vnode = vnode_objbits(type);
+ trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 1);
+ memset((void*)lvaddr, 0, 1UL << bits);
+ trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 0);
+
- for(size_t i = 0; i < numobjs; i++) {
+ for(dest_i = 0; dest_i < numobjs; dest_i++) {
// Initialize type specific fields
src_cap.u.vnode_x86_32_pdir.base =
- genpaddr + i * ((genpaddr_t)1 << objbits_vnode);
+ genpaddr + dest_i * ((genpaddr_t)1 << objbits_vnode);
#if defined(__i386__) && defined(CONFIG_PAE)
// Make it a good PDPTE by inserting kernel/mem VSpaces
{
size_t objbits_vnode = vnode_objbits(type);
+ trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 1);
+ memset((void*)lvaddr, 0, 1UL << bits);
+ trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 0);
+
- for(size_t i = 0; i < numobjs; i++) {
+ for(dest_i = 0; dest_i < numobjs; dest_i++) {
// Initialize type specific fields
src_cap.u.vnode_x86_64_ptable.base =
- genpaddr + i * ((genpaddr_t)1 << objbits_vnode);
+ genpaddr + dest_i * ((genpaddr_t)1 << objbits_vnode);
// Insert the capability
- err = set_cap(&dest_caps[i].cap, &src_cap);
+ err = set_cap(&dest_caps[dest_i].cap, &src_cap);
if (err_is_fail(err)) {
- return err;
+ break;
}
}
- return SYS_ERR_OK;
+ break;
}
case ObjType_VNode_x86_64_pdir:
{
size_t objbits_vnode = vnode_objbits(type);
+ trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 1);
+ memset((void*)lvaddr, 0, 1UL << bits);
+ trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 0);
+
- for(size_t i = 0; i < numobjs; i++) {
+ for(dest_i = 0; dest_i < numobjs; dest_i++) {
// Initialize type specific fields
src_cap.u.vnode_x86_64_pdir.base =
- genpaddr + i * ((genpaddr_t)1 << objbits_vnode);
+ genpaddr + dest_i * ((genpaddr_t)1 << objbits_vnode);
// Insert the capability
- err = set_cap(&dest_caps[i].cap, &src_cap);
+ err = set_cap(&dest_caps[dest_i].cap, &src_cap);
if (err_is_fail(err)) {
- return err;
+ break;
}
}
- return SYS_ERR_OK;
+ break;
}
case ObjType_VNode_x86_64_pdpt:
{
size_t objbits_vnode = vnode_objbits(type);
+ trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 1);
+ memset((void*)lvaddr, 0, 1UL << bits);
+ trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 0);
+
- for(size_t i = 0; i < numobjs; i++) {
+ for(dest_i = 0; dest_i < numobjs; dest_i++) {
// Initialize type specific fields
src_cap.u.vnode_x86_64_pdpt.base =
- genpaddr + i * ((genpaddr_t)1 << objbits_vnode);
+ genpaddr + dest_i * ((genpaddr_t)1 << objbits_vnode);
// Insert the capability
- err = set_cap(&dest_caps[i].cap, &src_cap);
+ err = set_cap(&dest_caps[dest_i].cap, &src_cap);
if (err_is_fail(err)) {
- return err;
+ break;
}
}
- return SYS_ERR_OK;
+ break;
}
case ObjType_VNode_x86_64_pml4:
{
size_t objbits_vnode = vnode_objbits(type);
+ trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 1);
+ memset((void*)lvaddr, 0, 1UL << bits);
+ trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 0);
+
- for(size_t i = 0; i < numobjs; i++) {
+ for(dest_i = 0; dest_i < numobjs; dest_i++) {
// Initialize type specific fields
src_cap.u.vnode_x86_64_pml4.base =
- genpaddr + i * ((genpaddr_t)1 << objbits_vnode);
+ genpaddr + dest_i * ((genpaddr_t)1 << objbits_vnode);
#ifdef __x86_64__
// Make it a good PML4 by inserting kernel/mem VSpaces
case ObjType_Dispatcher:
assert((1UL << OBJBITS_DISPATCHER) >= sizeof(struct dcb));
+ trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 1);
+ memset((void*)lvaddr, 0, 1UL << bits);
+ trace_event(TRACE_SUBSYS_KERNEL, TRACE_EVENT_KERNEL_BZERO, 0);
- for(size_t i = 0; i < numobjs; i++) {
+ for(dest_i = 0; dest_i < numobjs; dest_i++) {
// Initialize type specific fields
src_cap.u.dispatcher.dcb = (struct dcb *)
- (lvaddr + i * (1UL << OBJBITS_DISPATCHER));
+ (lvaddr + dest_i * (1UL << OBJBITS_DISPATCHER));
// Insert the capability
- err = set_cap(&dest_caps[i].cap, &src_cap);
+ err = set_cap(&dest_caps[dest_i].cap, &src_cap);
if (err_is_fail(err)) {
- return err;
+ break;
}
}
- return SYS_ERR_OK;
+ break;
case ObjType_ID:
// ID type does not refer to a kernel object
#include <barrelfish_kpi/capabilities.h>
#include <mdb/mdb.h>
#include <offsets.h>
+#include <cap_predicates.h>
++#include <distcaps.h>
+ #include <paging_generic.h>
-STATIC_ASSERT((sizeof(struct capability) + sizeof(struct mdbnode) + sizeof(struct mapping_info)) <= (1UL << OBJBITS_CTE), "cap+mdbnode fit in cte");
+#if 0
+#define TRACE_PMEM_CAPS
+#define TRACE_PMEM_BEGIN 0x0
+#define TRACE_PMEM_SIZE (~(uint32_t)0)
+#endif
+
+struct cte;
+
+struct delete_list {
+ struct cte *next;
+ //cslot_t next_slot;
+};
+
+STATIC_ASSERT((sizeof(struct capability) + sizeof(struct mdbnode)
- + sizeof(struct delete_list)) <= (1UL << OBJBITS_CTE),
++ + sizeof(struct distcap_info) + sizeof(struct apping_info))
++ <= (1UL << OBJBITS_CTE),
+ "cap+mdbnode fit in cte");
/**
* \brief A CTE (Capability Table Entry).
*/
struct cte {
struct capability cap; ///< The capability
- struct mdbnode mdbnode; ///< MDB node for the cap
- struct delete_list delete_node; ///< State for in-progress delete cascades
+ struct mdbnode mdbnode; ///< MDB "root" node for the cap
++ struct distcap_info distcap; ///< State for distributed cap operations
+ struct mapping_info mapping_info; ///< Mapping info for mapped pmem capabilities
/// Padding to fill the struct out to the size required by OBJBITS_CTE
char padding[(1UL << OBJBITS_CTE)
- sizeof(struct capability) - sizeof(struct mdbnode)
- - sizeof(struct delete_list)];
- - sizeof(struct mapping_info)];
++ - sizeof(struct distcap_info) - sizeof(struct mapping_info)];
};
static inline struct cte *caps_locate_slot(lpaddr_t cnode, cslot_t offset)
#include <syscall.h>
#include <barrelfish_kpi/syscalls.h>
#include <capabilities.h>
- //#include <mdb/mdb.h>
- //#include <mdb/mdb_tree.h>
- //#include <cap_predicates.h>
+ #include <cap_predicates.h>
+ #include <mdb/mdb.h>
++#include <mdb/mdb_tree.h>
++#include <cap_predicates.h>
#include <dispatch.h>
+#include <distcaps.h>
#include <wakeup.h>
- //#include <paging_kernel_helper.h>
- //#include <exec.h>
- //#include <irq.h>
+ #include <paging_kernel_helper.h>
+ #include <paging_kernel_arch.h>
+ #include <exec.h>
+ #include <irq.h>
#include <trace/trace.h>
-
- /// Keep track of all DCBs for tracing rundown
- /// XXX this is never garbage-collected at the moment
- struct dcb *dcbs_list = NULL;
+ #include <trace_definitions/trace_defs.h>
errval_t sys_print(const char *str, size_t length)
{
arch_dirs "scc" = [ arch_dir, "arch/x86" ]
arch_dirs _ = [ arch_dir ]
- common_srcs = [ "ram_alloc.c", "inter.c", "spawn.c", "invocations.c", "iref.c",
+ rcap_db = case Config.rcap_db of
+ Config.RCAP_DB_NULL -> "rcap_db_null.c"
+ Config.RCAP_DB_CENTRAL-> "rcap_db_central.c"
+ Config.RCAP_DB_TWOPC -> "rcap_db_twopc.c"
+ rcap_db_libs = case Config.rcap_db of
+ Config.RCAP_DB_NULL -> []
+ Config.RCAP_DB_CENTRAL-> [ "collections" ]
+ Config.RCAP_DB_TWOPC -> [ "collections", "cap_predicates" ]
+ common_srcs = [ "trace_support.c", "bfscope_support.c", "ram_alloc.c", "inter.c", "spawn.c", "invocations.c", "iref.c",
"main.c", "monitor_server.c", "monitor_rpc_server.c",
- "boot.c", "queue.c", "domain.c", "intermon_bindings.c",
- "rcap_db_common.c", "resource_ctrl.c", "timing.c", rcap_db ]
+ "boot.c", "queue.c", "domain.c", "intermon_bindings.c",
+ "resource_ctrl.c", "timing.c", "send_cap.c",
+ "capops/capsend.c", "capops/capqueue.c",
+ "capops/caplock.c", "capops/copy.c", "capops/move.c",
+ "capops/retrieve.c", "capops/delete.c", "capops/revoke.c",
+ "capops/retype.c", "capops/init.c", "capops/magic.c",
+ "capops/deletestep.c" ]
arch_srcs "x86_32" = [ "arch/x86/boot.c", "arch/x86/inter.c", "arch/x86/monitor_server.c", "arch/x86/notify_ipi.c" ]
arch_srcs "x86_64" = [ "arch/x86/boot.c", "arch/x86/inter.c", "arch/x86/monitor_server.c", "arch/x86/notify_ipi.c" ]
--- /dev/null
+/**
+ * \file
+ * \brief
+ */
+
+/*
+ * Copyright (c) 2007, 2008, 2009, 2010, 2011, ETH Zurich.
+ * All rights reserved.
+ *
+ * This file is distributed under the terms in the attached LICENSE file.
+ * If you do not find this file, copies can be found by writing to:
+ * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
+ */
+
+#ifndef MONITOR_H
+#define MONITOR_H
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <barrelfish/barrelfish.h>
+#include <spawndomain/spawndomain.h>
+#include <bench/bench_arch.h>
+#include <if/monitor_defs.h>
+#include <if/monitor_blocking_defs.h>
+#include <monitor_invocations_arch.h>
+#include <queue.h>
+#include <connection.h>
+
+// Change #URPC_SIZE if changing this
+#define MON_URPC_CHANNEL_LEN (32 * UMP_MSG_BYTES)
+#define MON_RAM_CHANNEL_LEN (2 * UMP_MSG_BYTES)
+
+// XXX: These should match the aliases in intermon.if
+typedef uint64_t state_id_t;
+typedef uint64_t mon_id_t;
+typedef uint64_t con_id_t;
+typedef uint32_t chanid_t;
+typedef uint8_t bool_t;
+
+// XXX: from old routing library, to be removed
+typedef uint32_t recordid_t;
+
+//XXX used to wait until all monitors are up and connected. asq
+extern int seen_connections;
+
+struct intermon_state {
+ struct msg_queue queue; ///< Queue of outgoing messages
+ struct intermon_binding *binding; ///< Back-pointer to binding
+ coreid_t core_id; ///< Core ID of monitor on other end
+ rsrcid_t rsrcid;
+ bool rsrcid_inflight;
+ bool capops_ready;
+ struct monitor_binding *originating_client;
+};
+
+struct monitor_state {
+ struct msg_queue queue;
+};
+
+extern iref_t mem_serv_iref;
+extern iref_t name_serv_iref;
+extern iref_t ramfs_serv_iref;
+extern iref_t monitor_rpc_iref;
+extern iref_t monitor_mem_iref;
+extern coreid_t my_core_id;
+extern bool bsp_monitor;
+extern struct capref trace_cap;
+extern struct bootinfo *bi;
+extern bool update_ram_alloc_binding;
+extern int num_monitors;
+
+union capability_caprep_u {
+ intermon_caprep_t caprep;
+ monitor_blocking_caprep_t caprepb; // XXX: identical to intermon_caprep_t
+ struct capability cap;
+};
+STATIC_ASSERT(sizeof(union capability_caprep_u) >= sizeof(struct capability), \
+ ASSERT_CONCAT("Size mismatch:", intermon_caprep_t));
+
+static inline void capability_to_caprep(struct capability *cap,
+ intermon_caprep_t *caprep)
+{
+ union capability_caprep_u u = { .cap = *cap };
+ *caprep = u.caprep;
+}
+
+static inline void caprep_to_capability(intermon_caprep_t *caprep,
+ struct capability *cap)
+{
+ union capability_caprep_u u = { .caprep = *caprep };
+ *cap = u.cap;
+}
+
+#include <ram_alloc.h>
+#include <spawn.h>
+#include <monitor_server.h>
+#include <monitor_invocations.h>
+
+/* monitor_server.c */
+errval_t monitor_server_arch_init(struct monitor_binding *b);
+void set_monitor_rpc_iref(iref_t iref);
+
+/* boot.c */
+void boot_core_request(struct monitor_binding *st, coreid_t id, int32_t hwid,
+ int32_t int_cpu_type, char *cmdline);
+void boot_initialize_request(struct monitor_binding *st);
+
+errval_t spawn_xcore_monitor(coreid_t id, int hwid, enum cpu_type cpu_type,
+ const char *cmdline,
+ struct intermon_binding **ret_binding);
+errval_t boot_arch_app_core(int argc, char *argv[],
+ coreid_t *ret_parent_coreid,
+ struct intermon_binding **ret_binding);
+
+/* main.c */
+errval_t request_trace_caps(struct intermon_binding *st);
+errval_t request_mem_serv_iref(struct intermon_binding *st);
+errval_t request_name_serv_iref(struct intermon_binding *st);
+errval_t request_ramfs_serv_iref(struct intermon_binding *st);
+
+/* inter.c */
+errval_t intermon_init(struct intermon_binding *b, coreid_t coreid);
+errval_t arch_intermon_init(struct intermon_binding *b);
+
+/* ump_support.c */
+errval_t ump_intermon_init(struct intermon_binding *ib);
+errval_t ump_monitor_init(struct monitor_binding *mb);
+
+/* multihop_support.c */
+errval_t multihop_intermon_init(struct intermon_binding *ib);
+errval_t multihop_monitor_init(struct monitor_binding *mb);
+errval_t multihop_request_routing_table(struct intermon_binding *b);
+
++/* trace_support.c */
++errval_t trace_intermon_init(struct intermon_binding *ib);
++errval_t trace_monitor_init(struct monitor_binding *mb);
++
++/* bfscope_support.c */
++errval_t bfscope_intermon_init(struct intermon_binding *ib);
++errval_t bfscope_monitor_init(struct monitor_binding *mb);
++
+/* rck_support.c */
+errval_t rck_intermon_init(struct intermon_binding *ib);
+errval_t rck_monitor_init(struct monitor_binding *mb);
+
+// Resource control
+errval_t rsrc_new(rsrcid_t *id);
+errval_t rsrc_join_satellite(rsrcid_t id, coreid_t coreid);
+errval_t rsrc_join(rsrcid_t id, struct capref dispcap,
+ struct monitor_blocking_binding *b);
+errval_t rsrc_submit_manifest(rsrcid_t id, char *manifest);
+errval_t rsrc_set_phase(rsrcid_t id, uintptr_t phase);
+errval_t rsrc_set_phase_inter(rsrcid_t id, uintptr_t phase, uint64_t timestamp);
+struct monitor_blocking_binding *rsrc_get_binding(rsrcid_t id);
+errval_t rsrc_set_phase_data(rsrcid_t id, uintptr_t active, void *data,
+ size_t len);
+
+// Time coordination
+errval_t timing_sync_timer(void);
+void timing_sync_timer_reply(errval_t err);
+void timing_sync_bench(void);
+
+/* domain.c */
+void domain_mgmt_init(void);
+
+/* intermon_bindings.c */
+errval_t intermon_binding_set(struct intermon_state *st);
+errval_t intermon_binding_get(coreid_t coreid, struct intermon_binding **ret);
+
+/* iref.c */
+errval_t iref_alloc(struct monitor_binding *binding, uintptr_t service_id,
+ iref_t *iref);
+errval_t iref_get_core_id(iref_t iref, coreid_t *core_id);
+errval_t iref_get_binding(iref_t iref, struct monitor_binding **binding);
+errval_t iref_get_service_id(iref_t iref, uintptr_t *service_id);
+
+#endif // MONITOR_H
*/
#include <inttypes.h>
-#include "monitor.h"
+#include <monitor.h>
#include <trace/trace.h>
+#include "send_cap.h"
+#include "capops.h"
+ #include <trace_definitions/trace_defs.h>
#define MIN(x,y) ((x<y) ? (x) : (y))
#define MAX(x,y) ((x>y) ? (x) : (y))
}
#endif
+ #if CONFIG_TRACE
+ err = trace_intermon_init(b);
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "trace_intermon_init failed");
+ return err;
+ }
+
+ err = bfscope_intermon_init(b);
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "bfscope_intermon_init failed");
+ return err;
+ }
+ #endif
+
err = arch_intermon_init(b);
if (err_is_fail(err)) {
- USER_PANIC_ERR(err, "arch_intermon_init failed");
+ USER_PANIC_ERR(err, "arch_intermon_init failed");
+ return err;
+ }
+
+ err = capops_init(b->waitset, b);
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "capops_intermon_init failed");
return err;
}
#include "monitor.h"
#include <barrelfish/monitor_client.h>
+#include "capops.h"
+ // workaround inlining bug with gcc 4.4.1 shipped with ubuntu 9.10 and 4.4.3 in Debian
+ #if defined(__i386__) && defined(__GNUC__) \
+ && __GNUC__ == 4 && __GNUC_MINOR__ == 4 && __GNUC_PATCHLEVEL__ <= 3
+ #define SAFEINLINE __attribute__((noinline))
+ #else
+ #define SAFEINLINE
+ #endif
+
+ /*-------------------------- Internal structures ----------------------------*/
+
++#if 0
+ struct retype_st {
+ struct rcap_st rcap_st; // must always be first
+ struct monitor_blocking_binding *b;
+ struct capref croot;
+ capaddr_t src;
+ uint64_t new_type;
+ uint8_t size_bits;
+ capaddr_t to;
+ capaddr_t slot;
+ int dcn_vbits;
+ };
+
+ struct delete_st {
+ struct rcap_st rcap_st; // must always be first
+ struct monitor_blocking_binding *b;
+ struct capref croot;
+ capaddr_t src;
+ uint8_t vbits;
+ };
+
+ struct revoke_st {
+ struct rcap_st rcap_st; // must always be first
+ struct monitor_blocking_binding *b;
+ struct capref croot;
+ capaddr_t src;
+ uint8_t vbits;
+ };
++#endif
+
+ /*------------------------ Static global variables -------------------------*/
+
++#if 0
+ static struct retype_st static_retype_state;
+ static bool static_retype_state_used = false;
+
+ static struct delete_st static_delete_state;
+ static bool static_delete_state_used = false;
+
+ static struct revoke_st static_revoke_state;
+ static bool static_revoke_state_used = false;
++#endif
+
+ /*-------------------------- Helper Functions ------------------------------*/
+
++#if 0
+ static void remote_cap_retype_phase_2(void * st_arg);
+ static void remote_cap_delete_phase_2(void * st_arg);
+ static void remote_cap_revoke_phase_2(void * st_arg);
+
+ static SAFEINLINE struct retype_st *
+ alloc_retype_st(struct monitor_blocking_binding *b, struct capref croot,
+ capaddr_t src, uint64_t new_type, uint8_t size_bits,
+ capaddr_t to, capaddr_t slot, int dcn_vbits)
+ {
+ struct retype_st * st;
+ if (static_retype_state_used) {
+ st = malloc (sizeof(struct retype_st));
+ assert(st);
+ } else {
+ st = &static_retype_state;
+ static_retype_state_used = true;
+ }
+
+ st->rcap_st.free_at_ccast = false;
+ st->rcap_st.cb = remote_cap_retype_phase_2;
+ st->b = b;
+ st->croot = croot;
+ st->src = src;
+ st->new_type = new_type;
+ st->size_bits= size_bits;
+ st->to = to;
+ st->slot = slot;
+ st->dcn_vbits= dcn_vbits;
+
+ return st;
+ }
+
+ static void free_retype_st(struct retype_st * st)
+ {
+ cap_destroy(st->croot);
+ if (st == &static_retype_state) {
+ static_retype_state_used = false;
+ } else {
+ free(st);
+ }
+ }
+
+ static SAFEINLINE struct delete_st *
+ alloc_delete_st(struct monitor_blocking_binding *b, struct capref croot,
+ capaddr_t src, uint8_t vbits)
+ {
+ struct delete_st * st;
+ if (static_delete_state_used) {
+ st = malloc (sizeof(struct delete_st));
+ assert(st);
+ } else {
+ st = &static_delete_state;
+ static_delete_state_used = true;
+ }
+
+ st->rcap_st.free_at_ccast = false;
+ st->rcap_st.cb = remote_cap_delete_phase_2;
+ st->b = b;
+ st->croot = croot;
+ st->src = src;
+ st->vbits = vbits;
+
+ return st;
+ }
+
+ static void free_delete_st(struct delete_st * st)
+ {
+ cap_destroy(st->croot);
+ if (st == &static_delete_state) {
+ static_delete_state_used = false;
+ } else {
+ free(st);
+ }
+ }
+
+ // workaround inlining bug with gcc 4.4.1 shipped with ubuntu 9.10 and 4.4.3 in Debian
+ static SAFEINLINE struct revoke_st *
+ alloc_revoke_st(struct monitor_blocking_binding *b, struct capref croot,
+ capaddr_t src, uint8_t vbits)
+ {
+ struct revoke_st * st;
+ if (static_revoke_state_used) {
+ st = malloc (sizeof(struct revoke_st));
+ assert(st);
+ } else {
+ st = &static_revoke_state;
+ static_revoke_state_used = true;
+ }
+
+ st->rcap_st.free_at_ccast = false;
+ st->rcap_st.cb = remote_cap_revoke_phase_2;
+ st->b = b;
+ st->croot = croot;
+ st->src = src;
+ st->vbits = vbits;
+
+ return st;
+ }
+
+ static void free_revoke_st(struct revoke_st * st)
+ {
+ cap_destroy(st->croot);
+ if (st == &static_revoke_state) {
+ static_revoke_state_used = false;
+ } else {
+ free(st);
+ }
+ }
+
+
+ /*---------------------------- Handler functions ----------------------------*/
+
+
+
+ static void remote_cap_retype(struct monitor_blocking_binding *b,
+ struct capref croot, capaddr_t src,
+ uint64_t new_type, uint8_t size_bits,
+ capaddr_t to, capaddr_t slot, int32_t dcn_vbits)
+ {
+ errval_t err;
+ bool has_descendants;
+ coremask_t on_cores;
+
+ /* Save state for stackripped reply */
+ struct retype_st * st = alloc_retype_st(b, croot, src, new_type, size_bits,
+ to, slot, dcn_vbits);
+
+
+ /* Get the raw cap from the kernel */
+ err = monitor_domains_cap_identify(croot, src, CPTR_BITS,
+ &(st->rcap_st.capability));
+ if (err_is_fail(err)) {
+ err_push(err, MON_ERR_CAP_REMOTE);
+ goto reply;
+ }
+
+ /* Check if cap is retyped, if it is there is no point continuing,
+ This will be checked again once we succeed in locking cap */
+ err = rcap_db_get_info(&st->rcap_st.capability, &has_descendants, &on_cores);
+ assert(err_is_ok(err));
+ if (has_descendants) {
+ err = MON_ERR_REMOTE_CAP_NEED_REVOKE;
+ goto reply;
+ }
+
+ /* request lock */
+ err = rcap_db_acquire_lock(&st->rcap_st.capability, (struct rcap_st*)st);
+ if (err_is_fail(err)) {
+ goto reply;
+ }
+ return; // continues in remote_cap_retype_phase_2
+
+ reply:
+ free_retype_st(st);
+ err = b->tx_vtbl.remote_cap_retype_response(b, NOP_CONT, err);
+ assert(err_is_ok(err));
+ }
+
+
+ static void remote_cap_retype_phase_2(void * st_arg)
+ {
+ errval_t err, reply_err;
+ bool has_descendants;
+ coremask_t on_cores;
+ struct retype_st * st = (struct retype_st *) st_arg;
+ struct monitor_blocking_binding *b = st->b;
+
+
+ reply_err = st->rcap_st.err;
+
+ err = rcap_db_get_info(&st->rcap_st.capability, &has_descendants, &on_cores);
+
+ assert(err_is_ok(err));
+ if (has_descendants) {
+ reply_err = MON_ERR_REMOTE_CAP_NEED_REVOKE;
+ }
+
+ if (err_is_fail(reply_err)) {
+ // lock failed or cap already retyped, unlock any cores we locked
+ err = rcap_db_release_lock(&(st->rcap_st.capability), st->rcap_st.cores_locked);
+ assert (err_is_ok(err));
+ } else {
+ // all good, do retype on domains behalf
+ reply_err = monitor_retype_remote_cap(st->croot, st->src, st->new_type,
+ st->size_bits, st->to, st->slot,
+ st->dcn_vbits);
+
+ // signal if retype was a success to remote cores
+ err = rcap_db_retype(&(st->rcap_st.capability), err_is_ok(reply_err));
+ assert (err_is_ok(err));
+ }
+
+ free_retype_st(st);
+ err = b->tx_vtbl.remote_cap_retype_response(b, NOP_CONT, reply_err);
+ assert (err_is_ok(err));
+ }
+
+
+ static void remote_cap_delete(struct monitor_blocking_binding *b,
+ struct capref croot, capaddr_t src, uint8_t vbits)
+ {
+ errval_t err;
+
+ /* Save state for stackripped reply */
+ struct delete_st * st = alloc_delete_st(b, croot, src, vbits);
+
+ /* Get the raw cap from the kernel */
+ err = monitor_domains_cap_identify(croot, src, vbits,
+ &(st->rcap_st.capability));
+ if (err_is_fail(err)) {
+ err_push(err, MON_ERR_CAP_REMOTE);
+ goto reply;
+ }
+
+ /* request lock */
+ err = rcap_db_acquire_lock(&(st->rcap_st.capability), (struct rcap_st*)st);
+ if (err_is_fail(err)) {
+ goto reply;
+ }
+ return; // continues in remote_cap_retype_phase_2
+
+ reply:
+ free_delete_st(st);
+ err = b->tx_vtbl.remote_cap_delete_response(b, NOP_CONT, err);
+ assert(err_is_ok(err));
+ }
+
+ static void remote_cap_delete_phase_2(void * st_arg)
+ {
+ errval_t err, reply_err;
+ struct delete_st * st = (struct delete_st *) st_arg;
+ struct monitor_blocking_binding *b = st->b;
+
+ reply_err = st->rcap_st.err;
+ if (err_is_fail(reply_err)) {
+ // lock failed, unlock any cores we locked
+ err = rcap_db_release_lock(&(st->rcap_st.capability),
+ st->rcap_st.cores_locked);
+ assert (err_is_ok(err));
+ } else {
+ // all good, do delete on domains behalf
+ reply_err = monitor_delete_remote_cap(st->croot, st->src, st->vbits);
+ if (err_is_fail(reply_err)) {
+ DEBUG_ERR(reply_err, "delete cap error");
+ }
+
+ if (err_is_ok(reply_err)) {
+ // signal delete to other cores
+ err = rcap_db_delete(&st->rcap_st.capability);
+ assert(err_is_ok(err));
+ }
+ }
+
+ free_delete_st(st);
+ err = b->tx_vtbl.remote_cap_delete_response(b, NOP_CONT, reply_err);
+ assert (err_is_ok(err));
+ }
+
+
+ static void remote_cap_revoke(struct monitor_blocking_binding *b,
+ struct capref croot, capaddr_t src, uint8_t vbits)
+ {
+ errval_t err;
+ /* Save state for stackripped reply */
+ struct revoke_st * st = alloc_revoke_st(b, croot, src, vbits);
+
+ /* Get the raw cap from the kernel */
+ err = monitor_domains_cap_identify(croot, src, vbits,
+ &(st->rcap_st.capability));
+ if (err_is_fail(err)) {
+ err_push(err, MON_ERR_CAP_REMOTE);
+ goto reply;
+ }
+
+ /* request recursive lock on the cap and all of its descendants */
+ err = rcap_db_acquire_recursive_lock(&(st->rcap_st.capability),
+ (struct rcap_st*)st);
+ if (err_is_fail(err)) {
+ goto reply;
+ }
+ return; // continues in remote_cap_retype_phase_2
+
+ reply:
+ free_revoke_st(st);
+ err = b->tx_vtbl.remote_cap_revoke_response(b, NOP_CONT, err);
+ assert(err_is_ok(err));
+ }
+
+ static void remote_cap_revoke_phase_2(void * st_arg)
+ {
+ errval_t err, reply_err;
+ struct revoke_st * st = (struct revoke_st *) st_arg;
+ struct monitor_blocking_binding *b = st->b;
+
+ reply_err = st->rcap_st.err;
+ if (err_is_fail(reply_err)) {
+ // recursive lock failed, unlock any cores we locked
+ err = rcap_db_release_recursive_lock(&(st->rcap_st.capability),
+ st->rcap_st.cores_locked);
+ assert (err_is_ok(err));
+ } else {
+ // all good, do revole on domains behalf
+ reply_err = monitor_revoke_remote_cap(st->croot, st->src, st->vbits);
+ if (err_is_fail(reply_err)) {
+ DEBUG_ERR(reply_err, "revoke cap error");
+ }
+
+ if (err_is_ok(reply_err)) {
+ // signal revoke to other cores
+ err = rcap_db_revoke(&st->rcap_st.capability);
+ assert(err_is_ok(err));
+ }
+ }
+
+ free_revoke_st(st);
+ err = b->tx_vtbl.remote_cap_revoke_response(b, NOP_CONT, reply_err);
+ assert (err_is_ok(err));
+ }
++#endif
++
+static void retype_reply_status(errval_t status, void *st)
+{
+ struct monitor_blocking_binding *b = (struct monitor_blocking_binding*)st;
+ errval_t err = b->tx_vtbl.remote_cap_retype_response(b, NOP_CONT, status);
+ assert(err_is_ok(err));
+}
+
+static void remote_cap_retype(struct monitor_blocking_binding *b,
+ struct capref croot, capaddr_t src,
+ uint64_t new_type, uint8_t size_bits,
+ capaddr_t to, capaddr_t slot, int32_t to_vbits)
+{
+ capops_retype(new_type, size_bits, croot, to, to_vbits, slot, src,
+ CPTR_BITS, retype_reply_status, (void*)b);
+}
+
+static void delete_reply_status(errval_t status, void *st)
+{
+ struct monitor_blocking_binding *b = (struct monitor_blocking_binding*)st;
+ errval_t err = b->tx_vtbl.remote_cap_delete_response(b, NOP_CONT, status);
+ assert(err_is_ok(err));
+}
+
+static void remote_cap_delete(struct monitor_blocking_binding *b,
+ struct capref croot, capaddr_t src, uint8_t vbits)
+{
+ struct domcapref cap = { .croot = croot, .cptr = src, .bits = vbits };
+ capops_delete(cap, delete_reply_status, (void*)b);
+}
+
+static void revoke_reply_status(errval_t status, void *st)
+{
+ struct monitor_blocking_binding *b = (struct monitor_blocking_binding*)st;
+ errval_t err = b->tx_vtbl.remote_cap_revoke_response(b, NOP_CONT, status);
+ assert(err_is_ok(err));
+}
+
+static void remote_cap_revoke(struct monitor_blocking_binding *b,
+ struct capref croot, capaddr_t src, uint8_t vbits)
+{
+ struct domcapref cap = { .croot = croot, .cptr = src, .bits = vbits };
+ capops_revoke(cap, revoke_reply_status, (void*)b);
+}
static void rsrc_manifest(struct monitor_blocking_binding *b,
struct capref dispcap, char *str)
#include <barrelfish/debug.h> // XXX: To set the cap_identify_reply handler
#include <barrelfish/sys_debug.h> // XXX: for sys_debug_send_ipi
#include <trace/trace.h>
+ #include <trace_definitions/trace_defs.h>
#include <if/mem_defs.h>
#include <barrelfish/monitor_client.h>
+#include <barrelfish_kpi/distcaps.h>
#include <if/monitor_loopback_defs.h>
+#include "capops.h"
+#include "caplock.h"
+#include "send_cap.h"
// the monitor's loopback binding to itself
static struct monitor_binding monitor_self_binding;