/* base page size for now so we can map the kcb in boot driver */
size_bits { kcb_size };
};
+
+cap IPI is_always_copy {};
/* Retrieve local arch-specific core ID (e.g. APIC ID on x86)*/
rpc get_arch_core_id(out uintptr id);
- /* Scary */
- rpc get_kernel_cap(out cap cap);
+ /* get cap that can be used to send IPIs */
+ rpc get_ipi_cap(out cap cap);
rpc forward_kcb_request(in coreid destination, in cap kcb, out errval err);
return sysret.error;
}
-static inline errval_t invoke_send_init_ipi(struct capref kernel_cap, coreid_t core_id)
+static inline errval_t invoke_send_init_ipi(struct capref ipi_cap, coreid_t core_id)
{
- uint8_t invoke_bits = get_cap_valid_bits(kernel_cap);
- capaddr_t invoke_cptr = get_cap_addr(kernel_cap) >> (CPTR_BITS - invoke_bits);
+ uint8_t invoke_bits = get_cap_valid_bits(ipi_cap);
+ capaddr_t invoke_cptr = get_cap_addr(ipi_cap) >> (CPTR_BITS - invoke_bits);
return
- syscall3((invoke_bits << 16) | (KernelCmd_Init_IPI_Send << 8) | SYSCALL_INVOKE,
+ syscall3((invoke_bits << 16) | (IPICmd_Send_Init << 8) | SYSCALL_INVOKE,
invoke_cptr, (uintptr_t) core_id).error;
}
-static inline errval_t invoke_send_start_ipi(struct capref kernel_cap, coreid_t core_id, forvaddr_t entry)
+static inline errval_t invoke_send_start_ipi(struct capref ipi_cap, coreid_t core_id, forvaddr_t entry)
{
- uint8_t invoke_bits = get_cap_valid_bits(kernel_cap);
- capaddr_t invoke_cptr = get_cap_addr(kernel_cap) >> (CPTR_BITS - invoke_bits);
+ uint8_t invoke_bits = get_cap_valid_bits(ipi_cap);
+ capaddr_t invoke_cptr = get_cap_addr(ipi_cap) >> (CPTR_BITS - invoke_bits);
return
- syscall4((invoke_bits << 16) | (KernelCmd_Start_IPI_Send << 8) | SYSCALL_INVOKE,
+ syscall4((invoke_bits << 16) | (IPICmd_Send_Start << 8) | SYSCALL_INVOKE,
invoke_cptr, (uintptr_t) core_id, (uintptr_t) entry).error;
}
}
return sr.error;
-}
\ No newline at end of file
+}
return sysret.error;
}
-static inline errval_t invoke_send_init_ipi(struct capref kernel_cap, coreid_t core_id)
+static inline errval_t invoke_send_init_ipi(struct capref ipi_cap, coreid_t core_id)
{
- return cap_invoke2(kernel_cap, KernelCmd_Init_IPI_Send,
+ return cap_invoke2(ipi_cap, IPICmd_Send_Init,
core_id).error;
}
-static inline errval_t invoke_send_start_ipi(struct capref kernel_cap, coreid_t core_id, forvaddr_t entry)
+static inline errval_t invoke_send_start_ipi(struct capref ipi_cap, coreid_t core_id, forvaddr_t entry)
{
- return cap_invoke3(kernel_cap, KernelCmd_Start_IPI_Send,
+ return cap_invoke3(ipi_cap, IPICmd_Send_Start,
core_id, entry).error;
}
static inline bool type_is_vnode(enum objtype type)
{
- STATIC_ASSERT(26 == ObjType_Num, "Check VNode definitions");
+ STATIC_ASSERT(27 == ObjType_Num, "Check VNode definitions");
return (type == ObjType_VNode_x86_64_pml4 ||
type == ObjType_VNode_x86_64_pdpt ||
static inline size_t vnode_objbits(enum objtype type)
{
// This function should be emitted by hamlet or somesuch.
- STATIC_ASSERT(26 == ObjType_Num, "Check VNode definitions");
+ STATIC_ASSERT(27 == ObjType_Num, "Check VNode definitions");
if (type == ObjType_VNode_x86_64_pml4 ||
type == ObjType_VNode_x86_64_pdpt ||
*/
static inline size_t vnode_entry_bits(enum objtype type) {
// This function should be emitted by hamlet or somesuch.
- STATIC_ASSERT(26 == ObjType_Num, "Check VNode definitions");
+ STATIC_ASSERT(27 == ObjType_Num, "Check VNode definitions");
if (type == ObjType_VNode_x86_64_pml4 ||
type == ObjType_VNode_x86_64_pdpt ||
KernelCmd_Spawn_SCC_Core,
KernelCmd_IPI_Register,
KernelCmd_IPI_Delete,
- KernelCmd_Start_IPI_Send, ///< Send Startup IPI to a destination core
- KernelCmd_Init_IPI_Send, ///< Send Init IPI to a destination core
- KernelCmd_GetGlobalPhys, ///< Get physical address of kernel variable struct global;
+ KernelCmd_GetGlobalPhys,
KernelCmd_Add_kcb, ///< add extra kcb to be scheduled
KernelCmd_Remove_kcb, ///< remove kcb from scheduling ring
KernelCmd_Suspend_kcb_sched, ///< suspend/resume kcb scheduler
};
/**
+ * IPI capability commands
+ */
+
+enum ipi_cmd {
+ IPICmd_Send_Start, ///< Send Startup IPI to a destination core
+ IPICmd_Send_Init, ///< Send Init IPI to a destination core
+};
+/**
* Maximum command ordinal.
*/
#define CAP_MAX_CMD KernelCmd_Count
[KernelCmd_IPI_Register] = kernel_ipi_register,
[KernelCmd_IPI_Delete] = kernel_ipi_delete,
#endif
- [KernelCmd_Start_IPI_Send] = kernel_send_start_ipi,
- [KernelCmd_Init_IPI_Send] = kernel_send_init_ipi,
[KernelCmd_GetGlobalPhys] = kernel_get_global_phys,
[KernelCmd_Add_kcb] = kernel_add_kcb,
[KernelCmd_Remove_kcb] = kernel_remove_kcb,
[KernelCmd_Suspend_kcb_sched] = kernel_suspend_kcb_sched
},
+ [ObjType_IPI] = {
+ [IPICmd_Send_Start] = kernel_send_start_ipi,
+ [IPICmd_Send_Init] = kernel_send_init_ipi,
+ },
[ObjType_IRQTable] = {
[IRQTableCmd_Alloc] = handle_irq_table_alloc,
[IRQTableCmd_Set] = handle_irq_table_set,
[KernelCmd_Sync_timer] = monitor_handle_sync_timer,
[KernelCmd_IPI_Register] = kernel_ipi_register,
[KernelCmd_IPI_Delete] = kernel_ipi_delete,
- [KernelCmd_Start_IPI_Send] = kernel_send_start_ipi,
- [KernelCmd_Init_IPI_Send] = kernel_send_init_ipi,
[KernelCmd_GetGlobalPhys] = kernel_get_global_phys,
[KernelCmd_Add_kcb] = kernel_add_kcb,
[KernelCmd_Remove_kcb] = kernel_remove_kcb,
[KernelCmd_Suspend_kcb_sched] = kernel_suspend_kcb_sched,
},
+ [ObjType_IPI] = {
+ [IPICmd_Send_Start] = kernel_send_start_ipi,
+ [IPICmd_Send_Init] = kernel_send_init_ipi,
+ },
[ObjType_IRQTable] = {
[IRQTableCmd_Alloc] = handle_irq_table_alloc,
[IRQTableCmd_Set] = handle_irq_table_set,
// If you create more capability types you need to deal with them
// in the table below.
-STATIC_ASSERT(26 == ObjType_Num, "Knowledge of all cap types");
+STATIC_ASSERT(27 == ObjType_Num, "Knowledge of all cap types");
static size_t caps_numobjs(enum objtype type, uint8_t bits, uint8_t objbits)
{
case ObjType_Notify_RCK:
case ObjType_Notify_IPI:
case ObjType_PerfMon:
+ case ObjType_IPI:
return 1;
default:
*/
// If you create more capability types you need to deal with them
// in the table below.
-STATIC_ASSERT(26 == ObjType_Num, "Knowledge of all cap types");
+STATIC_ASSERT(27 == ObjType_Num, "Knowledge of all cap types");
static errval_t caps_create(enum objtype type, lpaddr_t lpaddr, uint8_t bits,
uint8_t objbits, size_t numobjs,
/* fall through */
case ObjType_Kernel:
+ case ObjType_IPI:
case ObjType_IRQTable:
case ObjType_EndPoint:
case ObjType_Notify_RCK:
#include "coreboot.h"
coreid_t my_arch_id;
-struct capref kernel_cap;
+struct capref ipi_cap;
bool done = false;
st->rx_vtbl.boot_core_reply = boot_core_reply;
}
-static void load_kernel_cap(void)
+static void load_ipi_cap(void)
{
struct monitor_blocking_rpc_client *mc = get_monitor_blocking_rpc_client();
- errval_t err = mc->vtbl.get_kernel_cap(mc, &kernel_cap);
+ errval_t err = mc->vtbl.get_ipi_cap(mc, &ipi_cap);
if (err_is_fail(err)) {
- USER_PANIC_ERR(err, "get_kernel_cap failed.");
+ USER_PANIC_ERR(err, "get_ipi_cap failed.");
}
}
setup_monitor_messaging();
load_arch_id();
- load_kernel_cap();
+ load_ipi_cap();
}
volatile uint64_t *ap_dispatch;
extern coreid_t my_arch_id;
-extern struct capref kernel_cap;
+extern struct capref ipi_cap;
extern uint64_t end;
errval_t get_core_info(coreid_t core_id, archid_t* apic_id, enum cpu_type* cpu_type)
barrelfish_usleep(10*1000);
#endif
- err = invoke_send_init_ipi(kernel_cap, core_id);
+ err = invoke_send_init_ipi(ipi_cap, core_id);
if (err_is_fail(err)) {
DEBUG_ERR(err, "invoke send init ipi");
return err;
#endif
// x86 protocol actually would like us to do this twice
- err = invoke_send_start_ipi(kernel_cap, core_id, entry);
+ err = invoke_send_start_ipi(ipi_cap, core_id, entry);
if (err_is_fail(err)) {
DEBUG_ERR(err, "invoke sipi");
return err;
*ap_wait = AP_STARTING_UP;
end = bench_tsc();
- err = invoke_send_init_ipi(kernel_cap, core_id);
+ err = invoke_send_init_ipi(ipi_cap, core_id);
if (err_is_fail(err)) {
DEBUG_ERR(err, "invoke send init ipi");
return err;
}
- err = invoke_send_start_ipi(kernel_cap, core_id, entry);
+ err = invoke_send_start_ipi(ipi_cap, core_id, entry);
if (err_is_fail(err)) {
DEBUG_ERR(err, "invoke sipi");
return err;
/* ----------------------- BOOTINFO REQUEST CODE END ----------------------- */
-// TODO(gz): HACK remove before coreboot goes public.
-static void get_kernel_cap(struct monitor_blocking_binding *b)
+static void get_ipi_cap(struct monitor_blocking_binding *b)
{
errval_t err;
- err = b->tx_vtbl.get_kernel_cap_response(b, NOP_CONT, cap_kernel);
+ // XXX: We should not just hand out this cap to everyone
+ // who requests it. There is currently no way to determine
+ // if the client is a valid recipient
+
+ // get slot for ipi cap
+ struct capref ipi;
+ err = slot_alloc(&ipi);
if (err_is_fail(err)) {
- USER_PANIC_ERR(err, "sending kernel_cap failed.");
+ DEBUG_ERR(err, "slot alloc for ipi");
}
+
+ // TODO: do this in a slightly saner way!
+ // fabricate an IPI cap
+ struct capability cap;
+ memset(&cap, 0, sizeof(cap));
+ cap.type = ObjType_IPI;
+ cap.rights = CAPRIGHTS_ALLRIGHTS;
+
+ // put it in the slot
+ capaddr_t caddr = get_cnode_addr(ipi);
+ uint8_t vbits = get_cnode_valid_bits(ipi);
+ size_t slot = ipi.slot;
+ err = invoke_monitor_create_cap((uint64_t*)&cap, caddr, vbits, slot);
+ assert(err_is_ok(err));
+
+ err = b->tx_vtbl.get_ipi_cap_response(b, NOP_CONT, ipi);
+ assert(err_is_ok(err));
}
static void forward_kcb_request(struct monitor_blocking_binding *b,
.get_arch_core_id_call = get_arch_core_id,
.cap_set_remote_call = cap_set_remote,
- .get_kernel_cap_call = get_kernel_cap,
+ .get_ipi_cap_call = get_ipi_cap,
.forward_kcb_request_call = forward_kcb_request,