failure ELF_MAP "Failure in spawn_elf_map",
failure SET_CAPS "Failure in set_special_caps",
+ failure MONEP_SLOT_ALLOC "Failure allocating a slot for monitor EP",
failure MONITOR_CLIENT "Failure in monitor_client_setup",
failure FREE "Failure in spawn_free",
/* Is destination empty */
if (recv_cte->cap.type != ObjType_Null) {
+ printk(LOG_NOTE, "%s: dest slot occupied\n", __FUNCTION__);
return SYS_ERR_LMP_CAPTRANSFER_DST_SLOT_OCCUPIED;
}
assert(retcap != NULL);
assert(retsize != NULL);
uint32_t s;
+ err = slot_alloc(retcap);
+ if (err_is_fail(err)) {
+ return err;
+ }
err = rpc_client->vtbl.get_vbe_bios_cap(rpc_client, &msgerr, retcap, &s);
*retsize = s;
return err_is_fail(err) ? err : msgerr;
errval_t err, msgerr;
struct monitor_blocking_rpc_client *r = get_monitor_blocking_rpc_client();
+ err = slot_alloc(retcap);
+ if (err_is_fail(err)) {
+ return err;
+ }
err = r->vtbl.get_irq_dest_cap(r, retcap, &msgerr);
if (err_is_fail(err)){
return err;
// the default value
ram_set_affinity(0, 0);
do {
- struct capref cap;
- err = slot_alloc(&cap);
+ err = slot_alloc(ret);
if (err_is_fail(err)) {
err = err_push(err, LIB_ERR_SLOT_ALLOC);
break;
}
- err = slot_free(cap);
- if (err_is_fail(err)) {
- err = err_push(err, LIB_ERR_SLOT_FREE);
- break;
- }
} while (0);
ram_set_affinity(minbase, maxlimit);
if (err_is_fail(err)) {
return err;
}
+ } else {
+ err = slot_alloc(ret);
+ if (err_is_fail(err)) {
+ return err;
+ }
}
assert(ret != NULL);
*/
errval_t oct_get_capability(const char *key, struct capref *retcap)
{
- errval_t reterr;
+ errval_t err, reterr;
struct octopus_thc_client_binding_t *cl = oct_get_thc_client();
- errval_t err = cl->call_seq.get_cap(cl, key, retcap, &reterr);
+ err = slot_alloc(retcap);
+ if (err_is_fail(err)) {
+ return err;
+ }
+ err = cl->call_seq.get_cap(cl, key, retcap, &reterr);
if(err_is_fail(err)) {
return err;
}
}
// Receive bulk transport cap from bcached
+ err = slot_alloc(&client->cache_memory);
+ if(err_is_fail(err)) {
+ USER_PANIC_ERR(err, "slot_alloc for new_client");
+ }
err = client->rpc.vtbl.new_client(&client->rpc, &client->cache_memory);
if(err_is_fail(err)) {
USER_PANIC_ERR(err, "new_client");
C.SBlank,
C.SComment "allocate a new receive slot if needed",
- C.If (C.Unary C.Not $ C.Call "capref_is_null" [C.Variable "cap"])
+ C.If (need_slot_alloc "cap")
[C.Ex $ C.Assignment errvar $
C.Call "lmp_chan_alloc_recv_slot" [chanaddr],
C.If (C.Call "err_is_fail" [errvar])
rx_msgfrag_field = C.DerefField bindvar "rx_msg_fragment"
binding_incoming_token = C.DerefField bindvar "incoming_token"
+ capref_is_null c = C.Call "capref_is_null" [C.Variable c]
+ in_rpc = (C.Call "thread_get_rpc_in_progress" [])
+ need_slot_alloc c = C.Binary C.And (C.Unary C.Not (capref_is_null c))
+ (C.Unary C.Not in_rpc)
+
call_cases = [C.Case (C.Variable $ msg_enum_elem_name ifn mn) (call_msgnum_case msgdef msg)
| (msgdef, msg@(LMPMsgSpec mn _)) <- zip msgdefs msgs]
C.StmtList $ concat [store_arg_frags arch ifn mn msgwords word 0 afl
| (afl, word) <- zip wl [0..]],
case cap of
- Just (CapFieldTransfer _ af) -> C.Ex $ C.Assignment (argfield_expr RX mn af) (C.Variable "cap")
+ Just (CapFieldTransfer _ af) -> C.StmtList [
+ C.SComment "Updating recv slot: alloc if provided capref null, set otherwise",
+ C.If (C.Binary C.And in_rpc
+ (C.Unary C.Not (C.Call "capref_is_null" [(argfield_expr RX mn af)])))
+ [
+ C.Ex $ C.Call "lmp_chan_set_recv_slot"
+ [chanaddr, (argfield_expr RX mn af)]
+ ] [
+ C.Ex $ C.Call "lmp_chan_rpc_without_allocated_slot" []
+ ],
+ C.SComment "Store received cap in provided capref",
+ C.Ex $ C.Assignment (argfield_expr RX mn af) (C.Variable "cap")
+ ]
Nothing -> C.StmtList [],
C.SBlank,
C.Ex $ C.Call "assert" [C.Binary C.Equals async_err_var (C.Variable "SYS_ERR_OK")],
C.Ex $ C.Call "thread_set_rpc_in_progress" [C.Variable "true"],
C.SBlank,
+ C.SComment "set provided caprefs on underlying binding",
+ binding_save_rx_slots,
+ C.SBlank,
C.SComment "call send function",
C.Ex $ C.Assignment binding_error (C.Variable "SYS_ERR_OK"),
C.Ex $ C.Call "thread_set_outgoing_token" [C.Call "thread_set_token" [message_chanstate]],
mkargs (Arg _ (StringArray an _)) = [an]
mkargs (Arg _ (DynamicArray an al _)) = [an, al]
(txargs, rxargs) = partition_rpc_args args
+ is_cap_arg (Arg (Builtin t) _) = t == Cap || t == GiveAwayCap
+ is_cap_arg (Arg _ _) = False
+ rx_cap_args = filter is_cap_arg rxargs
+ binding_save_rx_slot (Arg tr (Name an)) = C.Ex $
+ C.Assignment (rpc_rx_union_elem n an) (C.DerefPtr $ C.Variable an)
+ binding_save_rx_slots = C.StmtList [ binding_save_rx_slot c | c <- rx_cap_args ]
token_name = "token"
outgoing_token = bindvar `C.DerefField` "outgoing_token"
receiving_chanstate = C.CallInd (bindvar `C.DerefField` "get_receiving_chanstate") [bindvar]
size_t bootinfo_size;
struct bootinfo *bootinfo;
+ err = slot_alloc(&bootinfo_frame);
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "slot_alloc for monitor->get_bootinfo");
+ }
msgerr = cl->vtbl.get_bootinfo(cl, &err, &bootinfo_frame, &bootinfo_size);
if (err_is_fail(msgerr) || err_is_fail(err)) {
USER_PANIC_ERR(err_is_fail(msgerr) ? msgerr : err, "failed in get_bootinfo");
errval_t error_code;
struct capref requested_caps;
+ err = slot_alloc(&requested_caps);
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "slot_alloc for monitor->get_phyaddr_cap");
+ }
err = cl->vtbl.get_phyaddr_cap(cl, &requested_caps, &error_code);
assert(err_is_ok(err) && err_is_ok(error_code));
physical_caps = requested_caps;
size_t bootinfo_size;
struct bootinfo *bootinfo;
+ err = slot_alloc(&bootinfo_frame);
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "slot_alloc for monitor->get_bootinfo");
+ }
msgerr = cl->vtbl.get_bootinfo(cl, &err, &bootinfo_frame, &bootinfo_size);
if (err_is_fail(msgerr) || err_is_fail(err)) {
USER_PANIC_ERR(err_is_fail(msgerr) ? msgerr : err, "failed in get_bootinfo");
// Request I/O Cap
struct capref requested_caps;
errval_t error_code;
+ err = slot_alloc(&requested_caps);
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "slot_alloc for monitor->get_io_cap");
+ }
err = cl->vtbl.get_io_cap(cl, &requested_caps, &error_code);
assert(err_is_ok(err) && err_is_ok(error_code));
// Copy into correct slot
// Here we get a cnode cap, so we need to put it somewhere in the root cnode
// As we already have a reserved slot for a phyaddr caps cnode, we put it there
+ err = slot_alloc(&requested_caps);
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "slot_alloc for monitor->get_phyaddr_cap");
+ }
err = cl->vtbl.get_phyaddr_cap(cl, &requested_caps, &error_code);
assert(err_is_ok(err) && err_is_ok(error_code));
physical_caps = requested_caps;
static void load_ipi_cap(void)
{
+ errval_t err;
struct monitor_blocking_rpc_client *mc = get_monitor_blocking_rpc_client();
- errval_t err = mc->vtbl.get_ipi_cap(mc, &ipi_cap);
+ err = slot_alloc(&ipi_cap);
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "slot_alloc for monitor->get_ipi_cap failed");
+ }
+ err = mc->vtbl.get_ipi_cap(mc, &ipi_cap);
if (err_is_fail(err)) {
USER_PANIC_ERR(err, "get_ipi_cap failed.");
}
#else
struct acpi_rpc_client* acl = get_acpi_rpc_client();
+ err = slot_alloc(&bootcap);
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "slot_alloc for mm_realloc_range_proxy");
+ }
errval_t error_code;
err = acl->vtbl.mm_realloc_range_proxy(acl, 16, 0x0,
- &bootcap, &error_code);
+ &bootcap, &error_code);
if (err_is_fail(err)) {
USER_PANIC_ERR(err, "mm_alloc_range_proxy failed.");
}
struct capref bootcap;
struct acpi_rpc_client* acl = get_acpi_rpc_client();
- errval_t error_code;
- errval_t err = acl->vtbl.mm_realloc_range_proxy(acl, 16, 0x0,
+ errval_t err, error_code;
+
+ err = slot_alloc(&bootcap);
+ if (err_is_fail(err)) {
+ USER_PANIC_ERR(err, "slot_alloc for mm_alloc_range_proxy");
+ }
+ err = acl->vtbl.mm_realloc_range_proxy(acl, 16, 0x0,
&bootcap, &error_code);
if (err_is_fail(err)) {
USER_PANIC_ERR(err, "mm_alloc_range_proxy failed.");
assert(cl != NULL);
struct capref requested_cap;
+ err = slot_alloc(&requested_cap);
+ assert(err_is_ok(err));
err = cl->vtbl.get_io_cap(cl, &requested_cap, &error_code);
assert(err_is_ok(err) && err_is_ok(error_code));
/*err = mm_alloc_range(&pci_mm_physaddr, bits, base + i * framesize,
base + (i + 1) * framesize, &c->phys_cap[i], NULL);*/
errval_t error_code;
+ err = slot_alloc(&c->phys_cap[i]);
+ assert(err_is_ok(err));
err = acl->vtbl.mm_alloc_range_proxy(acl, bits, base + i * framesize,
base + (i + 1) * framesize,
&c->phys_cap[i], &error_code);
struct capref pcie_cap;
struct acpi_rpc_client* acl = get_acpi_rpc_client();
errval_t error_code;
+ err = slot_alloc(&pcie_cap);
+ if (err_is_fail(err)) {
+ return err;
+ }
err = acl->vtbl.mm_alloc_range_proxy(acl, region_bits, address,
address + (1UL << region_bits), &pcie_cap, &error_code);
if (err_is_fail(err)) {
// Request I/O Cap
struct capref requested_caps;
errval_t error_code;
+ err = slot_alloc(&requested_caps);
+ assert(err_is_ok(err));
err = cl->vtbl.get_io_cap(cl, &requested_caps, &error_code);
assert(err_is_ok(err) && err_is_ok(error_code));
struct capref bootinfo_frame;
size_t bootinfo_size;
+ err = slot_alloc(&bootinfo_frame);
+ if (err_is_fail(err)) {
+ return err;
+ }
msgerr = cl->vtbl.get_bootinfo(cl, &err, &bootinfo_frame, &bootinfo_size);
if (err_is_fail(msgerr)) {
err = msgerr;
/* request connection from monitor */
struct monitor_blocking_rpc_client *mrpc = get_monitor_blocking_rpc_client();
struct capref monep;
+ err = slot_alloc(&monep);
+ if (err_is_fail(err)) {
+ return err_push(err, SPAWN_ERR_MONEP_SLOT_ALLOC);
+ }
err = mrpc->vtbl.alloc_monitor_ep(mrpc, &msgerr, &monep);
if (err_is_fail(err)) {
return err_push(err, SPAWN_ERR_MONITOR_CLIENT);