(Flounder) Fixing a token passing in 32-bit architectures.
Signed-off-by: Adam Turowski <adam.turowski@inf.ethz.ch>
errval_t skb_set_memory_affinity(void);
#define ELEMENT_NAME_BUF_SIZE 80
-#define SKB_REPLY_BUF_SIZE (128*1024)
struct list_parser_status {
char *s;
struct octopus_rpc_client *rpc_client = NULL;
char *attributes = NULL;
size_t attributes_len = 0;
- char *record = NULL;
octopus_trigger_id_t tid;
rpc_client = get_octopus_rpc_client();
/* Store record at octopus. */
err = rpc_client->vtbl.set_with_idcap(rpc_client, *session_id, attributes,
SET_DEFAULT, NOP_TRIGGER, false,
- &record, &tid, &error);
+ NULL, &tid, &error);
if (err_is_fail(err)) {
goto out;
}
errval_t err;
struct morecore_state *state = get_morecore_state();
+ struct ram_alloc_state *ram_alloc_state = get_ram_alloc_state();
+ if(ram_alloc_state->ram_alloc_func != ram_alloc_fixed) {
+ if (bytes < LARGE_PAGE_SIZE) {
+ bytes = LARGE_PAGE_SIZE;
+ }
+
+ bytes = ROUND_UP(bytes, LARGE_PAGE_SIZE);
+ }
+
void *buf = NULL;
size_t mapped = 0;
size_t step = bytes;
return LIB_ERR_NAMESERVICE_NOT_BOUND;
}
- char* record = NULL;
- octopus_trigger_id_t tid;
- errval_t error_code;
- err = r->vtbl.get(r, iface, NOP_TRIGGER, &record, &tid, &error_code);
+
+ struct octopus_get_names_response__rx_args reply;
+ err = r->vtbl.get(r, iface, NOP_TRIGGER, reply.output, &reply.tid,
+ &reply.error_code);
if (err_is_fail(err)) {
goto out;
}
- err = error_code;
+ err = reply.error_code;
if (err_is_fail(err)) {
if (err_no(err) == OCT_ERR_NO_RECORD) {
err = err_push(err, LIB_ERR_NAMESERVICE_UNKNOWN_NAME);
}
uint64_t iref_number = 0;
- err = oct_read(record, "_ { iref: %d }", &iref_number);
+ err = oct_read(reply.output, "_ { iref: %d }", &iref_number);
if (err_is_fail(err) || iref_number == 0) {
err = err_push(err, LIB_ERR_NAMESERVICE_INVALID_NAME);
goto out;
}
out:
- free(record);
return err;
}
return LIB_ERR_NAMESERVICE_NOT_BOUND;
}
- char* record = NULL;
- errval_t error_code;
- err = r->vtbl.wait_for(r, iface, &record, &error_code);
+ struct octopus_wait_for_response__rx_args reply;
+ err = r->vtbl.wait_for(r, iface, reply.record, &reply.error_code);
if (err_is_fail(err)) {
goto out;
}
- err = error_code;
+ err = reply.error_code;
if (err_is_fail(err)) {
if (err_no(err) == OCT_ERR_NO_RECORD) {
err = err_push(err, LIB_ERR_NAMESERVICE_UNKNOWN_NAME);
}
uint64_t iref_number = 0;
- err = oct_read(record, "_ { iref: %d }", &iref_number);
+ err = oct_read(reply.record, "_ { iref: %d }", &iref_number);
if (err_is_fail(err)) {
err = err_push(err, LIB_ERR_NAMESERVICE_INVALID_NAME);
goto out;
}
out:
- free(record);
return err;
}
}
snprintf(record, len+1, format, iface, iref);
- char* ret = NULL;
octopus_trigger_id_t tid;
errval_t error_code;
- err = r->vtbl.set(r, record, 0, NOP_TRIGGER, 0, &ret, &tid, &error_code);
+ err = r->vtbl.set(r, record, 0, NOP_TRIGGER, 0, NULL, &tid, &error_code);
if (err_is_fail(err)) {
goto out;
}
}
// FIXME: world's most (kinda less now) broken implementation...
-
- char* buffer = NULL;
- errval_t error_code;
- octopus_trigger_id_t tid;
-
char** names = NULL;
size_t count = 0;
-
+
static char* spawnds = "r'spawn.[0-9]+' { iref: _ }";
- err = r->vtbl.get_names(r, spawnds, NOP_TRIGGER, &buffer, &tid, &error_code);
- if (err_is_fail(err) || err_is_fail(error_code)) {
+ struct octopus_get_names_response__rx_args reply;
+ err = r->vtbl.get_names(r, spawnds, NOP_TRIGGER, reply.output, &reply.tid,
+ &reply.error_code);
+ if (err_is_fail(err) || err_is_fail(reply.error_code)) {
err = err_push(err, SPAWN_ERR_FIND_SPAWNDS);
goto out;
}
- err = oct_parse_names(buffer, &names, &count);
+ err = oct_parse_names(reply.output, &names, &count);
if (err_is_fail(err)) {
goto out;
}
}
out:
- free(buffer);
oct_free_names(names, count);
return err;
}
}
assert(cl != NULL);
- err = cl->vtbl.get_domainlist(cl, domains, len);
+ struct spawn_get_domainlist_response__rx_args reply;
+ err = cl->vtbl.get_domainlist(cl, reply.domains, len);
if (err_is_fail(err)) {
USER_PANIC_ERR(err, "get_domainlist");
}
+ *domains = memdup(reply.domains, *len);
return SYS_ERR_OK;
}
}
assert(cl != NULL);
- err = cl->vtbl.status(cl, domain, (spawn_ps_entry_t *)pse, argbuf, arglen,
- reterr);
+ struct spawn_status_response__rx_args reply;
+ err = cl->vtbl.status(cl, domain, (spawn_ps_entry_t *)pse, reply.argv,
+ arglen, reterr);
if (err_is_fail(err)) {
USER_PANIC_ERR(err, "status");
}
+ *argbuf = memdup(reply.argv, *arglen);
return SYS_ERR_OK;
}
{
errval_t err;
errval_t exist_err;
- char* record = NULL;
char** names = NULL;
uint64_t mode = 0;
- uint64_t state = 0;
- uint64_t fn = 0;
octopus_trigger_id_t tid;
size_t current_barriers = 0;
octopus_trigger_t t = oct_mktrigger(OCT_ERR_NO_RECORD, octopus_BINDING_RPC,
err = oct_set_get(SET_SEQUENTIAL, barrier_record,
"%s_ { barrier: '%s' }", name, name);
+ *barrier_record = strdup(*barrier_record);
+ if (*barrier_record == NULL) {
+ return LIB_ERR_MALLOC_FAIL;
+ }
err = oct_get_names(&names, ¤t_barriers, "_ { barrier: '%s' }",
name);
oct_free_names(names, current_barriers);
}
if (err_no(err) == OCT_ERR_NO_RECORD) {
// Wait until barrier record is created
- err = cl->recv.trigger(cl, &tid, &fn, &mode, &record, &state);
- free(record);
+ err = cl->recv.trigger(cl, NULL, NULL, &mode, NULL, NULL);
assert(mode & OCT_REMOVED);
err = SYS_ERR_OK;
errval_t err;
char* rec_name = NULL;
char* barrier_name = NULL;
- char* record = NULL;
char** names = NULL;
size_t remaining_barriers = 0;
uint64_t mode = 0;
- uint64_t state = 0;
- uint64_t fn = 0;
octopus_trigger_id_t tid;
octopus_trigger_t t = oct_mktrigger(SYS_ERR_OK, octopus_BINDING_RPC,
OCT_ON_DEL, NULL, NULL);
if (err_is_ok(err)) {
// Wait until everyone has left the barrier
- err = cl->recv.trigger(cl, &tid, &fn, &mode, &record, &state);
+ err = cl->recv.trigger(cl, NULL, NULL, &mode, NULL, NULL);
assert(mode & OCT_REMOVED);
}
else if (err_no(err) == OCT_ERR_NO_RECORD) {
}
out:
- free(record);
free(rec_name);
free(barrier_name);
return err;
errval_t err = SYS_ERR_OK;
va_list args;
- char* data = NULL;
char* buf = NULL;
*len = 0;
struct octopus_thc_client_binding_t* cl = oct_get_thc_client();
- errval_t error_code;
- octopus_trigger_id_t tid;
- err = cl->call_seq.get_names(cl, buf, NOP_TRIGGER, &data,
- &tid, &error_code);
+ struct octopus_get_names_response__rx_args reply;
+ err = cl->call_seq.get_names(cl, buf, NOP_TRIGGER, reply.output,
+ &reply.tid, &reply.error_code);
if (err_is_ok(err)) {
- err = error_code;
+ err = reply.error_code;
}
if (err_is_ok(err)) {
- err = oct_parse_names(data, names, len);
+ err = oct_parse_names(reply.output, names, len);
//qsort(*names, *len, sizeof(char*), cmpstringp);
}
free(buf);
- free(data);
return err;
}
errval_t oct_get(char** data, const char* query, ...)
{
assert(query != NULL);
- errval_t error_code;
errval_t err = SYS_ERR_OK;
- octopus_trigger_id_t tid;
va_list args;
char* buf = NULL;
struct octopus_thc_client_binding_t* cl = oct_get_thc_client();
assert(cl != NULL);
- err = cl->call_seq.get(cl, buf, NOP_TRIGGER, data,
- &tid, &error_code);
+
+ struct octopus_get_response__rx_args reply;
+ err = cl->call_seq.get(cl, buf, NOP_TRIGGER, reply.output,
+ &reply.tid, &reply.error_code);
if (err_is_ok(err)) {
- err = error_code;
+ err = reply.error_code;
}
free(buf);
+
+ if (err_is_fail(err)) {
+ return err;
+ }
+
+ if (data) {
+ *data = strdup(reply.output);
+ }
+
+
return err;
}
// Send to Server
struct octopus_thc_client_binding_t* cl = oct_get_thc_client();
- char* record = NULL;
errval_t error_code;
- octopus_trigger_id_t tid;
- err = cl->call_seq.set(cl, buf, SET_DEFAULT, NOP_TRIGGER, false,
- &record, &tid, &error_code);
- assert(record == NULL);
+ err = cl->call_seq.set(cl, buf, SET_DEFAULT, NOP_TRIGGER, false, NULL, NULL,
+ &error_code);
if (err_is_ok(err)) {
err = error_code;
// Send to Server
struct octopus_thc_client_binding_t* cl = oct_get_thc_client();
- char* record = NULL;
errval_t error_code;
- octopus_trigger_id_t tid;
- err = cl->call_seq.set(cl, buf, mode, NOP_TRIGGER, false,
- &record, &tid, &error_code);
- assert(record == NULL);
+ err = cl->call_seq.set(cl, buf, mode, NOP_TRIGGER, false, NULL, NULL,
+ &error_code);
if (err_is_ok(err)) {
err = error_code;
// Send to Server
struct octopus_thc_client_binding_t* cl = oct_get_thc_client();
- errval_t error_code;
- octopus_trigger_id_t tid;
- err = cl->call_seq.set(cl, buf, mode, NOP_TRIGGER, true, record,
- &tid, &error_code);
+ struct octopus_set_response__rx_args reply;
+ err = cl->call_seq.set(cl, buf, mode, NOP_TRIGGER, true, reply.record,
+ &reply.tid, &reply.error_code);
if (err_is_ok(err)) {
- err = error_code;
+ err = reply.error_code;
}
free(buf);
+
+ if (err_is_fail(err)) {
+ return err;
+ }
+
+ if (record) {
+ *record = strdup(reply.record);
+ }
+
return err;
}
errval_t oct_get_with_idcap(char **data, struct capref idcap)
{
assert(!capref_is_null(idcap));
- errval_t error_code;
errval_t err = SYS_ERR_OK;
- octopus_trigger_id_t tid;
struct octopus_thc_client_binding_t *cl = oct_get_thc_client();
assert(cl != NULL);
- err = cl->call_seq.get_with_idcap(cl, idcap, NOP_TRIGGER, data, &tid,
- &error_code);
+ struct octopus_get_with_idcap_response__rx_args reply;
+ err = cl->call_seq.get_with_idcap(cl, idcap, NOP_TRIGGER, reply.output,
+ &reply.tid, &reply.error_code);
if (err_is_ok(err)) {
- err = error_code;
+ err = reply.error_code;
+ }
+
+ if (err_is_fail(err)) {
+ return err;
+ }
+
+ if (data) {
+ *data = strdup(reply.output);
}
return err;
// Send to Server
struct octopus_thc_client_binding_t *cl = oct_get_thc_client();
- char *record = NULL;
errval_t error_code;
- octopus_trigger_id_t tid;
err = cl->call_seq.set_with_idcap(cl, idcap, buf, SET_DEFAULT, NOP_TRIGGER,
- false, &record, &tid, &error_code);
- assert(record == NULL);
+ false, NULL, NULL, &error_code);
if (err_is_ok(err)) {
err = error_code;
struct octopus_thc_client_binding_t* cl = oct_get_thc_client();
errval_t error_code;
- octopus_trigger_id_t tid;
- err = cl->call_seq.del(cl, buf, NOP_TRIGGER, &tid, &error_code);
+ err = cl->call_seq.del(cl, buf, NOP_TRIGGER, NULL, &error_code);
if (err_is_ok(err)) {
err = error_code;
}
struct octopus_thc_client_binding_t* cl = oct_get_thc_client();
errval_t error_code;
- octopus_trigger_id_t tid;
- err = cl->call_seq.exists(cl, buf, NOP_TRIGGER, &tid, &error_code);
+ err = cl->call_seq.exists(cl, buf, NOP_TRIGGER, NULL, &error_code);
if (err_is_ok(err)) {
err = error_code;
}
struct octopus_thc_client_binding_t* cl = oct_get_thc_client();
- errval_t error_code;
- err = cl->call_seq.wait_for(cl, buf, record, &error_code);
+ struct octopus_wait_for_response__rx_args reply;
+ err = cl->call_seq.wait_for(cl, buf, reply.record, &reply.error_code);
+ if (err_is_fail(err)) {
+ goto out;
+ }
+ err = reply.error_code;
+
if (err_is_fail(err)) {
goto out;
}
- err = error_code;
+
+ if (record) {
+ *record = strdup(reply.record);
+ }
out:
free(buf);
errval_t err = SYS_ERR_OK;
errval_t exist_err;
char** names = NULL;
- char* record = NULL;
char* name = NULL;
size_t len = 0;
size_t i = 0;
bool found = false;
uint64_t mode = 0;
- uint64_t state = 0;
- uint64_t fn = 0;
octopus_trigger_id_t tid;
octopus_trigger_t t = oct_mktrigger(SYS_ERR_OK, octopus_BINDING_RPC,
OCT_ON_DEL, NULL, NULL);
if (err_is_fail(err)) {
goto out;
}
+ /// XXX why is there a strdup ?
+ *lock_record = strdup(*lock_record);
err = oct_read(*lock_record, "%s", &name);
if (err_is_fail(err)) {
goto out;
}
if (err_is_ok(exist_err)) {
- err = cl->recv.trigger(cl, &tid, &fn, &mode, &record, &state);
+ err = cl->recv.trigger(cl, &tid, NULL, &mode, NULL, NULL);
assert(err_is_ok(err));
- free(record);
assert(mode & OCT_REMOVED);
}
else if (err_no(exist_err) != OCT_ERR_NO_RECORD) {
else {
fprintf(stderr, "Incoming subscription(%"PRIu64") for %s with unset handler function.",
id, record);
- free(record);
}
}
errval_t oct_sem_wait(uint32_t id)
{
errval_t err = SYS_ERR_OK;
- char* result = NULL;
- octopus_trigger_id_t tid;
octopus_trigger_t t = oct_mktrigger(OCT_ERR_NO_RECORD,
octopus_BINDING_RPC, OCT_ON_SET, NULL, NULL);
struct octopus_thc_client_binding_t* cl = oct_get_thc_client();
// XXX: The current implementation suffers from a herd effect,
// may be worth it to use locks for this critical section
while (1) {
- cl->call_seq.get(cl, query, t, &result, &tid, &err);
+ struct octopus_get_response__rx_args reply;
+ cl->call_seq.get(cl, query, t, reply.output, &reply.tid, &err);
if (err_is_ok(err)) {
- errval_t del_err = oct_del(result);
- free(result);
- result = NULL;
+ errval_t del_err = oct_del(reply.output);
if (err_is_ok(del_err)) {
break; // Decreased successfully
}
else if (err_no(err) == OCT_ERR_NO_RECORD) {
// No record found, wait until one is posted
- char* trigger_result = NULL;
- uint64_t fn, mode, state;
- cl->recv.trigger(cl, &tid, &fn, &mode, &trigger_result, &state);
- free(trigger_result);
+ cl->recv.trigger(cl, NULL, NULL, NULL, NULL, NULL);
}
else {
break; // Unexpected error
}
}
- free(result);
return err;
}
else {
fprintf(stderr, "Incoming trigger(%"PRIu64") for %s with unset handler function.",
id, record);
- free(record);
}
}
trigger_handler_fn event_handler, void* state,
octopus_trigger_id_t* tid)
{
- errval_t error_code;
char** names = NULL;
- char* output = NULL;
char* record = NULL; // freed by cpu_change_event
size_t len = 0;
octopus_trigger_t t = oct_mktrigger(0, octopus_BINDING_EVENT,
// Get current cores registered in system
struct octopus_thc_client_binding_t* rpc = oct_get_thc_client();
+
+ struct octopus_get_names_response__rx_args reply;
errval_t err = rpc->call_seq.get_names(rpc, query,
- t, &output, tid, &error_code);
+ t, reply.output, &reply.tid, &reply.error_code);
if (err_is_fail(err)) {
goto out;
}
- err = error_code;
+ err = reply.error_code;
switch(err_no(err)) {
case SYS_ERR_OK:
- err = oct_parse_names(output, &names, &len);
+ err = oct_parse_names(reply.output, &names, &len);
if (err_is_fail(err)) {
goto out;
}
break;
case OCT_ERR_NO_RECORD:
- assert(record == NULL);
break;
default:
DEBUG_ERR(err, "Unable to retrieve core record for %s", names[i]);
- assert(record == NULL);
break;
}
}
out:
oct_free_names(names, len);
- free(output);
return err;
}
ns->cap = cap;
ns->error = reterr;
ns->reply(b, ns);
-
- free(key);
}
static void put_cap_reply(struct octopus_binding *b,
reterr = OCT_ERR_CAP_OVERWRITE;
err = cap_delete(cap);
assert(err_is_ok(err));
- free(key);
} else {
+ /* we need to make our own copy of the key */
+ key = strdup(key);
int r = capdb->d.put_capability(&capdb->d, key, cap);
assert(r == 0);
}
assert(err_is_ok(err));
ns->error = reterr;
ns->reply(b, ns);
-
- free(key);
}
errval_t init_capstorage(void)
drs->reply(b, drs);
free_ast(ast);
- free(query);
}
static void get_names_reply(struct octopus_binding* b,
drs->reply(b, drs);
free_ast(ast);
- free(query);
}
static void set_reply(struct octopus_binding* b, struct oct_reply_state* drs)
drs->reply(b, drs);
free_ast(ast);
- free(query);
}
static errval_t build_query_with_idcap(char **query_p, struct capref idcap,
drs->reply(b, drs);
free_ast(ast);
- free(attributes);
if (query != NULL) {
free(query);
}
drs->reply(b, drs);
free_ast(ast);
- free(query);
}
static void exists_reply(struct octopus_binding* b, struct oct_reply_state* drs)
drs->reply(b, drs);
free_ast(ast);
- free(query);
}
static void wait_for_reply(struct octopus_binding* b, struct oct_reply_state* drs)
if (err_is_ok(err)) {
err = get_record(ast, &drs->query_state);
if (err_no(err) == OCT_ERR_NO_RECORD) {
- debug_printf("waiting for: %s\n", query);
uint64_t wid;
set_watch_err = set_watch(b, ast, OCT_ON_SET, drs, &wid);
}
}
free_ast(ast);
- free(query);
}
static void subscribe_reply(struct octopus_binding* b,
drs->reply(b, drs);
free_ast(ast);
- free(query);
}
static void unsubscribe_reply(struct octopus_binding* b,
out2:
free_ast(ast);
out1:
- free(record);
+ return;
}
void get_identifier(struct octopus_binding* b)
interrupt_handler_fn reloc_handler,
void *reloc_handler_arg)
{
- pci_caps_per_bar_t *caps_per_bar = NULL;
+ pci_caps_per_bar_t caps_per_bar;
uint8_t nbars;
errval_t err, msgerr;
err = pci_client->vtbl.
init_pci_device(pci_client, class, subclass, prog_if, vendor,
device, bus, dev, fun, &msgerr,
- &nbars, &caps_per_bar);
+ &nbars, caps_per_bar, caps_per_bar + 1, caps_per_bar + 2,
+ caps_per_bar + 3, caps_per_bar + 4, caps_per_bar + 5);
if (err_is_fail(err)) {
return err;
} else if (err_is_fail(msgerr)) {
- free(caps_per_bar);
return msgerr;
}
for (int nb = 0; nb < nbars; nb++) {
struct device_mem *bar = &bars[nb];
- int ncaps = (*caps_per_bar)[nb];
+ int ncaps = (caps_per_bar)[nb];
if (ncaps != 0) {
bar->nr_caps = ncaps;
bar->frame_cap = malloc(ncaps * sizeof(struct capref)); // FIXME: leak
err = SYS_ERR_OK;
out:
- free(caps_per_bar);
return err;
}
}
strncpy(dir->dirent.d_name, name, sizeof(dir->dirent.d_name));
- free(name);
dir->dirent.d_name[sizeof(dir->dirent.d_name) - 1] = '\0';
return &dir->dirent;
}
static errval_t allocate_unique_number(uint32_t *np)
{
errval_t err;
- char *record;
- octopus_trigger_id_t tid;
struct octopus_rpc_client *oc = get_octopus_rpc_client();
+ struct octopus_set_response__rx_args reply;
+
/* request a system-wide unique number at octopus */
char *query = PTY_PTS_OCTOPUS_PREFIX;
- oc->vtbl.set(oc, query, SET_SEQUENTIAL, NOP_TRIGGER, true, &record, &tid,
- &err);
+ oc->vtbl.set(oc, query, SET_SEQUENTIAL, NOP_TRIGGER, true, reply.record,
+ &reply.tid, &err);
if (err_is_fail(err)) {
goto finish;
}
* Octpus returns the record in the form 'ptypts0 {}'. Extract unique
* number.
*/
- int ret = sscanf(record, PTY_PTS_OCTOPUS_PREFIX "%" PRIu32, np);
+ int ret = sscanf(reply.record, PTY_PTS_OCTOPUS_PREFIX "%" PRIu32, np);
assert(ret == 1);
finish:
- free(record);
return err;
}
}
/* ------------------------- evaluate ------------------------------ */
-errval_t skb_evaluate(char *query, char **result, char **str_error, int32_t *int_error)
+errval_t skb_evaluate(char *query, char **ret_result, char **ret_str_error, int32_t *int_error)
{
errval_t err;
struct skb_state *skb_state = get_skb_state();
- err = skb_state->skb->vtbl.run(skb_state->skb, query, result, str_error,
- int_error);
+ // allocate memory for holding the response data
+ char *result = NULL;
+ if (ret_result) {
+ result = malloc(skb__run_response_output_MAX_ARGUMENT_SIZE);
+ if (result == NULL) {
+ return LIB_ERR_MALLOC_FAIL;
+ }
+ }
+ char *str_error = NULL;
+ if (ret_str_error) {
+ str_error = malloc(skb__run_response_str_error_MAX_ARGUMENT_SIZE);
+ if (str_error == NULL) {
+ if (result) {
+ free(result);
+ }
+ return LIB_ERR_MALLOC_FAIL;
+ }
+ }
+ err = skb_state->skb->vtbl.run(skb_state->skb, query, result,
+ str_error, int_error);
if (err_is_fail(err)) {
+ if (result) {
+ free(result);
+ }
+ if (str_error) {
+ free(str_error);
+ }
return err_push(err, SKB_ERR_RUN);
}
+
+ if (ret_result) {
+ *ret_result = result;
+ }
+ if (ret_str_error) {
+ *ret_str_error = str_error;
+ }
+
return SYS_ERR_OK;
}
#include <string.h>
#include <barrelfish/barrelfish.h>
#include <skb/skb.h>
+#include <if/skb_rpcclient_defs.h>
+#include <barrelfish/core_state_arch.h>
#include "skb_debug.h"
-#define BUFFER_SIZE SKB_REPLY_BUF_SIZE
-#define OUTPUT_SIZE SKB_REPLY_BUF_SIZE
+#define BUFFER_SIZE skb__run_call_input_MAX_ARGUMENT_SIZE
+#define OUTPUT_SIZE skb__run_response_output_MAX_ARGUMENT_SIZE
/* XXX: The following static chars make the skb connection not thread
safe and we probably don't want to put them in the per dispatcher
corestate as they are so big. */
-static char buffer[BUFFER_SIZE];
-static char output[OUTPUT_SIZE];
-static char error_output[OUTPUT_SIZE];
+static char buffer[skb__run_call_input_MAX_ARGUMENT_SIZE + 1];
+static char output[skb__run_response_output_MAX_ARGUMENT_SIZE + 1];
+static char error_output[skb__run_response_str_error_MAX_ARGUMENT_SIZE + 1];
static int error_code;
int skb_read_error_code(void)
errval_t skb_execute(char *goal)
{
- int32_t error;
- char *result, *error_out;
- errval_t err = skb_evaluate(goal, &result, &error_out, &error);
+ errval_t err;
+ struct skb_state *skb_state = get_skb_state();
+
+ err = skb_state->skb->vtbl.run(skb_state->skb, goal, output,
+ error_output, &error_code);
if (err_is_fail(err)) {
- return err_push(err, SKB_ERR_EVALUATE);
+ return err_push(err, SKB_ERR_RUN);
}
- error_code = error;
- strncpy(output, result, OUTPUT_SIZE);
- strncpy(error_output, error_out, OUTPUT_SIZE);
- free(result);
- free(error_out);
- if (error != 0) {
+
+ if (error_code != 0) {
return err_push(err, SKB_ERR_EXECUTION);
}
+
return err;
}
}
}
- char* record = NULL;
- octopus_trigger_id_t tid;
- errval_t error_code;
+ struct octopus_get_response__rx_args reply;
err = r->vtbl.get(r, omp_entry, NOP_TRIGGER,
- &record, &tid, &error_code);
+ reply.output, &reply.tid, &reply.error_code);
if (err_is_fail(err)) {
goto out;
}
- err = error_code;
+ err = reply.error_code;
if (err_is_fail(err)) {
if (err_no(err) == OCT_ERR_NO_RECORD) {
err = err_push(err, LIB_ERR_NAMESERVICE_UNKNOWN_NAME);
uint64_t addr = 0;
char *symname = NULL;
- err = oct_read(record, "_ { sym: %s, addr: %d }", &symname, &addr);
+ err = oct_read(reply.output, "_ { sym: %s, addr: %d }", &symname, &addr);
if (err_is_fail(err) || symname == NULL) {
err = err_push(err, LIB_ERR_NAMESERVICE_INVALID_NAME);
goto out;
*ret_name = strdup(symname);
}
- out: free(record);
+ out:
free(omp_entry);
return err;
}
}
}
- char* ret = NULL;
octopus_trigger_id_t tid;
errval_t error_code;
err = r->vtbl.set(r, record, 0, NOP_TRIGGER,
- 0, &ret, &tid, &error_code);
+ 0, NULL, &tid, &error_code);
if (err_is_fail(err)) {
goto out;
}
err = error_code;
out:
- free(record);
-
+ free(record);
return err;
}
}
/* Make a copy of characters, since the output filters might modify them. */
- outdata = malloc(length);
- assert(outdata != NULL);
- memcpy(outdata, data, length);
+ outdata = memdup(data, length);
/* tell user how much we've written (before applying filters) */
*written = length;
term_filter_id_t id = term_client_add_input_filter(client, term_filter_cr2lf);
client->cr2lf_id = id;
}
-
+
return SYS_ERR_OK;
}
break;
}
-errval_t term_client_blocking_tcgetattr(struct term_client *client,
+errval_t term_client_blocking_tcgetattr(struct term_client *client,
struct termios* t)
{
if (client->cr2lf_id > 0) {
return SYS_ERR_OK;
}
-errval_t term_client_blocking_tcsetattr(struct term_client *client,
+errval_t term_client_blocking_tcsetattr(struct term_client *client,
const struct termios* t)
{
errval_t err = term_client_blocking_config(client, TerminalConfig_ECHO, (t->c_lflag & ECHO) > 0);
struct octopus_rpc_client *r = get_octopus_rpc_client();
assert(r != NULL);
- char *record;
- errval_t error_code;
- octopus_trigger_id_t tid;
- err = r->vtbl.get_with_idcap(r, session_id, NOP_TRIGGER, &record, &tid,
- &error_code);
+ struct octopus_get_with_idcap_response__rx_args reply;
+
+ err = r->vtbl.get_with_idcap(r, session_id, NOP_TRIGGER, reply.output, &reply.tid,
+ &reply.error_code);
if (err_is_fail(err)) {
err_push(err, TERM_ERR_LOOKUP_SESSION_RECORD);
goto out;
}
- err = error_code;
+ err = reply.error_code;
if (err_is_fail(err)) {
err_push(err, TERM_ERR_LOOKUP_SESSION_RECORD);
goto out;
int64_t conf_oct;
// oct_read can only parse 64-bit values, we need to parse the irefs as 64bit
// then cast to 32bit
- err = oct_read(record, "_ { session_iref: %d, in_iref: %d, out_iref: %d, "
+ err = oct_read(reply.output, "_ { session_iref: %d, in_iref: %d, out_iref: %d, "
"conf_iref: %d }", &session_oct, &in_oct, &out_oct,
&conf_oct);
//iref_t session_iref = (iref_t)session_oct;
"\n", *in_iref, *out_iref, *conf_iref);
out:
- free(record);
return err;
}
* Make a copy of the data, since the echo filters might modify it and the
* modification should not be seen by the application.
*/
- echodata = malloc(length);
- assert(echodata != NULL);
- memcpy(echodata, data, length);
+ echodata = memdup(data, length);
/* apply echo filters */
term_filter_apply(client->echo_filters, &echodata, &length);
{
struct term_client *client = b->st;
+
+ char *my_data = memdup(data, length);
+
if (client->non_blocking_read) {
assert(client->chars_cb != NULL);
/* handle triggers */
- handle_triggers(client, data, length);
+ handle_triggers(client, my_data, length);
/* filter input */
- term_filter_apply(client->input_filters, &data, &length);
+ term_filter_apply(client->input_filters, &my_data, &length);
/* call user supplied chars_cb */
- client->chars_cb(client->st, data, length);
+ client->chars_cb(client->st, my_data, length);
} else {
assert(client->readbuf == NULL);
- client->readbuf = data;
- client->readbuf_pos = data;
+ client->readbuf = my_data;
+ client->readbuf_pos = my_data;
client->readbuf_len = length;
}
}
uint32_t ret_status;
- uint8_t *tmp;
+ uint8_t tmp[2048];
size_t length;
/* connect with the USB Manager */
err = usb_manager.vtbl.connect(&usb_manager, usb_driver_iref, init_config,
- &ret_status, &tmp, &length);
+ &ret_status, tmp, &length);
if (((usb_error_t) ret_status) != USB_ERR_OK) {
debug_printf("libusb: ERROR connecting to the USB manager\n");
/*
* initialize the devices with the descriptors
- * Do not free the tmp, since the data is still used by the descriptors
*/
usb_device_init(tmp);
{\r
errval_t err;\r
uint32_t ret_status = 0;\r
- uint8_t *data = NULL;\r
+ uint8_t data[2048];\r
size_t length = 0;\r
usb_error_t ret;\r
\r
USB_DEBUG_IDC("libusb: usb_do_request_read()\n");\r
\r
err = usb_manager.vtbl.request_read(&usb_manager, (uint8_t*) req,\r
- sizeof(*req), (uint8_t **) &data, &length, &ret_status);\r
+ sizeof(*req), data, &length, &ret_status);\r
\r
*ret_length = length;\r
\r
\r
USB_DEBUG_IDC("libusb: usb_do_request_read() got data (len=%i)\n", *ret_length);\r
\r
- *ret_data = (void *) data;\r
+ *ret_data = memdup(data, sizeof(data));\r
\r
return (ret);\r
}\r
ahci_mgmt_bound = true;
- // populate list
- uint8_t *port_ids;
- size_t len;
-
- err = ahci_mgmt_rpc.vtbl.list(&ahci_mgmt_rpc, &port_ids, &len);
+ struct ahci_mgmt_list_response__rx_args reply;
+ err = ahci_mgmt_rpc.vtbl.list(&ahci_mgmt_rpc, reply.port_ids, &reply.len);
assert(err_is_ok(err));
- for (size_t i = 0; i < len; i++) {
- uint8_t *data;
- size_t identifylen = 0;
+ for (size_t i = 0; i < reply.len; i++) {
+ struct ahci_mgmt_identify_response__rx_args identify_reply;
err = ahci_mgmt_rpc.vtbl.identify(&ahci_mgmt_rpc,
- port_ids[i], &data, &identifylen);
+ reply.port_ids[i], identify_reply.identify_data,
+ &identify_reply.data_len);
assert(err_is_ok(err));
- assert(identifylen == 512);
+ assert(identify_reply.data_len == 512);
ata_identify_t identify;
- ata_identify_initialize(&identify, (void *)data);
+ ata_identify_initialize(&identify, (void *)identify_reply.identify_data );
//char buf[8192];
//ata_identify_pr(buf, 8192, &identify);
port_ids[i], sectors, sector_size);
struct ahci_handle *handle = calloc(1, sizeof(struct ahci_handle));
- handle->port_num = port_ids[i];
+ handle->port_num = reply.port_ids[i];
struct blockdev_entry *newentry = calloc(1, sizeof(struct blockdev_entry));
newentry->open = false;
//VFS_BLK_DEBUG("bdfs_ahci: read begin: %zu -> %zu\n", bytes, aligned_bytes);
- uint8_t *data;
err = h->ata_rw28_rpc.vtbl.read_dma(&h->ata_rw28_rpc,
- aligned_bytes, blockpos, &data, bytes_read);
- memcpy(buffer, data, *bytes_read);
- free(data);
+ aligned_bytes, blockpos, buffer, bytes_read);
return err;
}
ahci_mgmt_bound = true;
// populate list
- uint8_t *port_ids;
- size_t len;
-
- err = ahci_mgmt_rpc.vtbl.list(&ahci_mgmt_rpc, &port_ids, &len);
+ struct ahci_mgmt_list_response__rx_args reply;
+ err = ahci_mgmt_rpc.vtbl.list(&ahci_mgmt_rpc, reply.port_ids, &reply.len);
assert(err_is_ok(err));
- for (size_t i = 0; i < len; i++) {
+ for (size_t i = 0; i < reply.len; i++) {
if (i > 9) {
break;
}
- uint8_t *data;
- size_t identifylen = 0;
- err = ahci_mgmt_rpc.vtbl.identify(&ahci_mgmt_rpc,
- port_ids[i], &data, &identifylen);
+ struct ahci_mgmt_identify_response__rx_args identify_reply;
+ err = ahci_mgmt_rpc.vtbl.identify(&ahci_mgmt_rpc, reply.port_ids[i],
+ identify_reply.identify_data,
+ &identify_reply.data_len);
assert(err_is_ok(err));
- assert(identifylen == 512);
+ assert(identify_reply.data_len == 512);
ata_identify_t identify;
- ata_identify_initialize(&identify, (void *)data);
+ ata_identify_initialize(&identify, (void *)identify_reply.identify_data);
//char buf[8192];
//ata_identify_pr(buf, 8192, &identify);
port_ids[i], sectors, sector_size);
struct ata_handle *handle = calloc(1, sizeof(struct ata_handle));
- handle->port_num = port_ids[i];
+ handle->port_num = reply.port_ids[i];
struct blockdev_entry *newentry = calloc(1, sizeof(struct blockdev_entry));
newentry->open = false;
}
else if (err == FS_CACHE_NOTPRESENT) {
size_t read_size;
+ data_ = malloc(size);
err = mount->ata_rw28_rpc.vtbl.read_dma(&mount->ata_rw28_rpc,
- size, block, &data_, &read_size);
+ size, block, data_, &read_size);
if (err_is_fail(err)) {
return err;
}
FAT_DEBUG("ata_rw28 initialized.\n");
#endif
-
- // read data from fat boot sector
- uint8_t *data;
size_t size;
+ // read data from fat boot sector
+ uint8_t *data = malloc(ata_rw28__read_dma_block_response_buffer_MAX_ARGUMENT_SIZE);
err = mount->ata_rw28_rpc.vtbl.read_dma_block(&mount->ata_rw28_rpc,
- mount->startblock, &data, &size);
+ mount->startblock, mount->bootsec_data, &size);
if (err_is_fail(err)) {
goto bootsec_read_failed;
}
}
FAT_DEBUG("end sector 0 dump");
#endif
- memcpy(mount->bootsec_data, data, size);
- free(data);
+
data = NULL;
if (memcmp(mount->bootsec_data+0x1FE, "\x55\xAA", 2) != 0) {
mount->block_count);
goto fs_check_failed;
}
+ struct ata_rw28_read_dma_block_response__rx_args reply;
mount->ata_rw28_rpc.vtbl.read_dma_block(&mount->ata_rw28_rpc,
- mount->startblock + fs_info_sector, &data, &size);
- if (memcmp(data+0, "RRaA", 4) != 0 ||
- memcmp(data+0x1e4, "rrAa", 4) != 0)
+ mount->startblock + fs_info_sector, reply.buffer ,
+ &reply.buffer_size);
+ if (memcmp(reply.buffer+0, "RRaA", 4) != 0 ||
+ memcmp(reply.buffer+0x1e4, "rrAa", 4) != 0)
{
FAT_DEBUG_F("File System Information Sector signatures do not match,"
- " %"PRIx32", %"PRIx32, *(uint32_t*)(data+0),
- *(uint32_t*)(data+0x1e4));
+ " %"PRIx32", %"PRIx32, *(uint32_t*)(reply.buffer+0),
+ *(uint32_t*)(reply.buffer+0x1e4));
goto fs_check_failed;
}
- if (memcmp(data+0x1fe, "\x55\xAA", 2) != 0) {
+ if (memcmp(reply.buffer+0x1fe, "\x55\xAA", 2) != 0) {
FAT_DEBUG("File System Information Sector check bytes do not match");
goto fs_check_failed;
}
#ifdef FAT_DEBUG_ENABLED
FAT_DEBUG("dumping FSIS");
- printf("nr of free clusters: %"PRIu32"\n", *(uint32_t*)(data+0x1e8));
+ printf("nr of free clusters: %"PRIu32"\n", *(uint32_t*)(reply.buffer+0x1e8));
printf("most recently allocated cluster: %"PRIu32"\n",
- *(uint32_t*)(data+0x1ec));
+ *(uint32_t*)(reply.buffer+0x1ec));
printf("----------------\n");
#endif
- free(data);
data = NULL;
}
assert(!h->isdir);
- uint8_t *mybuf = NULL;
-
restart:
err = cl->rpc.vtbl.read(&cl->rpc, h->fh, h->pos, bytes,
- &msgerr, &mybuf, bytes_read);
+ &msgerr, buffer, bytes_read);
if (err_is_fail(err)) {
DEBUG_ERR(err, "transport error in read");
return err;
} else if (err_is_fail(msgerr)) {
- assert(mybuf == NULL);
if (err_no(msgerr) == FS_ERR_INVALID_FH && !restarts++) {
// revalidate handle and try again
msgerr = resolve_path(cl, h->path, &h->fh, NULL, NULL);
}
h->pos += *bytes_read;
- memcpy(buffer, mybuf, *bytes_read);
- free(mybuf);
if (*bytes_read < bytes) { // XXX: this can only mean EOF for ramfs
return VFS_ERR_EOF;
{
struct ramfs_handle *h = inhandle;
struct ramfs_client *cl = st;
- char *name;
- trivfs_fsize_t size;
- bool isdir;
- errval_t err, msgerr;
+
+ errval_t err;
int restarts = 0;
assert(h->isdir);
+ struct trivfs_readdir_response__rx_args reply;
restart:
err = cl->rpc.vtbl.readdir(&cl->rpc, h->fh, h->pos,
- &msgerr, &name, &isdir, &size);
+ &reply.err, reply.name, &reply.isdir, &reply.size);
if (err_is_fail(err)) {
DEBUG_ERR(err, "transport error in readdir");
return err;
- } else if (err_is_fail(msgerr)) {
- assert(name == NULL);
- if (err_no(msgerr) == FS_ERR_INVALID_FH && !restarts++) {
+ } else if (err_is_fail(reply.err)) {
+ if (err_no(reply.err) == FS_ERR_INVALID_FH && !restarts++) {
// revalidate handle and try again
if (h->fh == cl->rootfh) { // XXX: revalidate root
err = cl->rpc.vtbl.getroot(&cl->rpc, &cl->rootfh);
h->fh = cl->rootfh;
goto restart;
} else {
- msgerr = resolve_path(cl, h->path, &h->fh, NULL, NULL);
- if (err_is_ok(msgerr)) {
+ reply.err = resolve_path(cl, h->path, &h->fh, NULL, NULL);
+ if (err_is_ok(reply.err)) {
goto restart;
}
}
}
- if (err_no(msgerr) != FS_ERR_INDEX_BOUNDS) {
- DEBUG_ERR(msgerr, "server error in readdir");
+ if (err_no(reply.err) != FS_ERR_INDEX_BOUNDS) {
+ DEBUG_ERR(reply.err, "server error in readdir");
}
- return msgerr;
+ return reply.err;
}
h->pos++;
if (retname != NULL) {
- *retname = name;
+ *retname = strdup(reply.name);
}
if (info != NULL) {
- info->type = isdir ? VFS_DIRECTORY : VFS_FILE;
- info->size = size;
+ info->type = reply.isdir ? VFS_DIRECTORY : VFS_FILE;
+ info->size = reply.size;
}
return SYS_ERR_OK;
return LIB_ERR_NAMESERVICE_NOT_BOUND;
}
- char* record = NULL;
- octopus_trigger_id_t tid;
- errval_t error_code;
- err = r->vtbl.get(r, iface, NOP_TRIGGER, &record, &tid, &error_code);
+ struct octopus_get_response__rx_args reply;
+ err = r->vtbl.get(r, iface, NOP_TRIGGER, reply.output, &reply.tid, &reply.error_code);
if (err_is_fail(err)) {
goto out;
}
- err = error_code;
+ err = reply.error_code;
if (err_is_fail(err)) {
if (err_no(err) == OCT_ERR_NO_RECORD) {
err = err_push(err, XEON_PHI_ERR_CLIENT_DOMAIN_VOID);
}
xphi_dom_id_t domid = 0;
- err = oct_read(record, "_ { domid: %d }", &domid);
+ err = oct_read(reply.output, "_ { domid: %d }", &domid);
if (err_is_fail(err) || domid == 0) {
err = err_push(err, XEON_PHI_ERR_CLIENT_DOMAIN_VOID);
goto out;
*retdomid = domid;
}
- out: free(record);
-
+ out:
return err;
#endif
}
return LIB_ERR_NAMESERVICE_NOT_BOUND;
}
- char* record = NULL;
- errval_t error_code;
- err = r->vtbl.wait_for(r, iface, &record, &error_code);
+ struct octopus_wait_for_response__rx_args reply;
+ err = r->vtbl.wait_for(r, iface, reply.record, &reply.error_code);
if (err_is_fail(err)) {
goto out;
}
- err = error_code;
+ err = reply.error_code;
if (err_is_fail(err)) {
if (err_no(err) == OCT_ERR_NO_RECORD) {
err = err_push(err, XEON_PHI_ERR_CLIENT_DOMAIN_VOID);
}
xphi_dom_id_t domid = 0;
- err = oct_read(record, "_ { domid: %d }", &domid);
+ err = oct_read(reply.record, "_ { domid: %d }", &domid);
if (err_is_fail(err)) {
err = err_push(err, XEON_PHI_ERR_CLIENT_DOMAIN_VOID);
goto out;
}
out:
- free(record);
return err;
#endif
}
}
snprintf(record, len+1, format, iface, domid);
- char* ret = NULL;
octopus_trigger_id_t tid;
errval_t error_code;
- err = r->vtbl.set(r, record, 0, NOP_TRIGGER, 0, &ret, &tid, &error_code);
+ err = r->vtbl.set(r, record, 0, NOP_TRIGGER, 0, NULL, &tid, &error_code);
if (err_is_fail(err)) {
goto out;
}
C.SComment "unmarshall message number from first word, set fragment to 0",
C.Ex $ C.Assignment rx_msgnum_field $
C.Binary C.BitwiseAnd (C.SubscriptOf msgwords $ C.NumConstant 0) msgnum_mask,
- C.Ex $ C.Assignment rx_msgfrag_field (C.NumConstant 0),
- C.Ex $ C.Assignment binding_incoming_token (C.Binary C.BitwiseAnd (C.Binary C.RightShift (C.SubscriptOf msgwords $ C.NumConstant 0) (C.NumConstant (toInteger msgnum_bits))) (C.HexConstant 0xffffffff))
+ C.Ex $ C.Assignment rx_msgfrag_field (C.NumConstant 0)
] [],
C.SBlank,
-- a (possibly larger) message argument, by type, qualified name and bit offset
data ArgFieldFragment = ArgFieldFragment TypeBuiltin ArgField Int
| MsgCode -- implicit argument, occurs once per message
- | Token
deriving (Show, Eq)
-- an argument field names the lowest-level field of an argument
-- each entry in the list is a field name and (optional) array index
-- eg. foo[3].bar is [NamedField "foo", ArrayField 3, NamedField "bar"]
type ArgField = [ArgFieldElt]
-data ArgFieldElt = NamedField String | ArrayField Integer
+data ArgFieldElt = NamedField String | ArrayField Integer | TokenField
deriving (Show, Eq)
-- modes of transfering a cap
msg_code_type :: TypeBuiltin
msg_code_type = UInt16
-msg_code_token :: TypeBuiltin
-msg_code_token = UInt32
build_msg_spec :: Arch -> Int -> Bool -> [TypeDef] -> MessageDef -> MsgSpec
build_msg_spec arch words_per_frag contains_msgcode types (Message _ mn args _)
-- build an LMP message spec by merging in the caps from a UMP spec
build_lmp_msg_spec :: Arch -> [TypeDef] -> MessageDef -> LMPMsgSpec
-build_lmp_msg_spec arch types msgdef = LMPMsgSpec mn (merge_caps frags caps)
+build_lmp_msg_spec arch types (Message msgt msgn args msgm) = LMPMsgSpec mn (merge_caps frags caps)
where
- MsgSpec mn frags caps = build_msg_spec arch (lmp_words arch) True types msgdef
+ MsgSpec mn frags caps = build_msg_spec arch (lmp_words arch) True types (Message msgt msgn (Arg (Builtin UInt32) Token:args) msgm)
-- XXX: ensure that we never put a cap together with an overflow fragment
-- even though this could work at the transport-level, the current
where
-- does the first fragment need to contain the message code?
first_frag
- | contains_msgcode = MsgFragment [[MsgCode, Token]]
+ | contains_msgcode = MsgFragment [[MsgCode]]
| otherwise = MsgFragment []
group_frags :: [FieldFragment] -> MsgFragment -> [MsgFragment]
| t `elem` [UInt8, Int8, Char]
= [OverflowField $ BufferFragment t [NamedField n] [NamedField l]]
| otherwise = error "dynamic arrays of types other than char/int8/uint8 are not yet supported"
+ arg_fragments (Arg (Builtin b) Token) = fragment_builtin [TokenField] b
arg_fragments (Arg (Builtin b) v) = fragment_builtin [NamedField (varname v)] b
arg_fragments (Arg (TypeVar t) v) =
fragment_typedef [NamedField (varname v)] (lookup_type_name types t)
= min (wordsize a) (bitsizeof_builtin a t)
bitsizeof_argfieldfrag a MsgCode
= bitsizeof_builtin a msg_code_type
-bitsizeof_argfieldfrag a Token
- = bitsizeof_builtin a msg_code_token
bitsizeof_builtin :: Arch -> TypeBuiltin -> Int
bitsizeof_builtin _ UInt8 = 8
argfield_expr :: Direction -> String -> ArgField -> C.Expr
argfield_expr TX mn [NamedField n] = tx_union_elem mn n
argfield_expr RX mn [NamedField n] = rx_union_elem mn n
+argfield_expr TX mn [TokenField] = C.DerefField bindvar "outgoing_token"
+argfield_expr RX mn [TokenField] = C.DerefField bindvar "incoming_token"
argfield_expr _ _ [ArrayField n] = error "invalid; top-level array"
argfield_expr dir mn ((NamedField n):rest)
= C.FieldOf (argfield_expr dir mn rest) n
mkfieldexpr :: ArgFieldFragment -> C.Expr
mkfieldexpr MsgCode = C.Variable $ msg_enum_elem_name ifn mn
- mkfieldexpr Token = C.DerefField bindvar "outgoing_token"
mkfieldexpr (ArgFieldFragment t af 0) = fieldaccessor t af
mkfieldexpr (ArgFieldFragment t af off) =
C.Binary C.RightShift (fieldaccessor t af) (C.NumConstant $ toInteger off)
store_arg_frags _ _ _ _ _ _ [] = []
store_arg_frags arch ifn mn msgdata_ex word bitoff (MsgCode:rest)
= store_arg_frags arch ifn mn msgdata_ex word (bitoff + bitsizeof_argfieldfrag arch MsgCode) rest
-store_arg_frags arch ifn mn msgdata_ex word bitoff (Token:rest)
- = store_arg_frags arch ifn mn msgdata_ex word (bitoff + bitsizeof_argfieldfrag arch Token) rest
store_arg_frags _ _ _ _ _ _ ((ArgFieldFragment String _ _):_)
= error "strings are not handled here"
store_arg_frags arch ifn mn msgdata_ex word bitoff (aff@(ArgFieldFragment t af argoff):rest)
mkfieldexpr :: ArgFieldFragment -> C.Expr
mkfieldexpr MsgCode = C.Variable $ msg_enum_elem_name ifn mn
- mkfieldexpr Token = C.DerefField bindvar "outgoing_token"
mkfieldexpr (ArgFieldFragment t af 0) = fieldaccessor t af
mkfieldexpr (ArgFieldFragment t af off) =
C.Binary C.RightShift (fieldaccessor t af) (C.NumConstant $ toInteger off)
> data Variable = Name String
> | StringArray String Integer
> | DynamicArray String String Integer
+> | Token
> deriving (Show)
>
> arg, (.@.) :: TypeRef -> String -> MessageArgument
ACPI_ERR_INVALID_PATH_NAME, NULL);
assert(err_is_ok(err));
}
-
- free(pathname);
}
static void set_device_irq(struct acpi_binding *b, char* device, uint32_t irq)
reply:
err = b->tx_vtbl.set_device_irq_response(b, NOP_CONT, err);
assert(err_is_ok(err));
-
- free(device);
}
static void reset_handler(struct acpi_binding *b)
kcb_id, barrelfish_id, cap_key);
free(cap_key);
- free(record);
}
if (len == 0) {
DEBUG("%s:%s:%d: No KCB found?\n",
printf("CPU %"PRIu64": APIC_ID=%"PRIu64" APIC_PROCESSOR_ID=%"PRIu64" ENABLED=%"PRIu64"\n",
barrelfish_id, apic_id, processor_id, enabled);
-
- free(record);
}
if (len == 0) {
DEBUG("%s:%s:%d: No cpus found?\n",
*apic_id = (archid_t) apic;
*cpu_type = (enum cpu_type) type;
out:
- free(record);
return err;
#endif
}
static void output_handler(struct serial_binding *b, char *c, size_t len)
{
serial_write(c, len);
- free(c);
}
static void associate_stdin_handler(struct serial_binding *b)
static void characters_handler(void *st, char *buffer, size_t length)
{
serial_write(buffer, length);
- free(buffer);
}
static void configuration_handler(void *st, terminal_config_option_t opt,
interphi_domain_wait_reply(ws->node, err, ws->usr_state, domid);
free(state);
- free(record);
}
/**
return LIB_ERR_NAMESERVICE_NOT_BOUND;
}
- char* record = NULL;
- octopus_trigger_id_t tid;
- errval_t error_code;
- err = r->vtbl.get(r, iface, NOP_TRIGGER, &record, &tid, &error_code);
+ struct octopus_get_response__rx_args reply;
+ err = r->vtbl.get(r, iface, NOP_TRIGGER, reply.output, &reply.tid,
+ &reply.error_code);
if (err_is_fail(err)) {
goto out;
}
- err = error_code;
+ err = reply.error_code;
if (err_is_fail(err)) {
if (err_no(err) == OCT_ERR_NO_RECORD) {
err = err_push(err, XEON_PHI_ERR_CLIENT_DOMAIN_VOID);
}
xphi_dom_id_t domid = 0;
- err = oct_read(record, "_ { domid: %d }", &domid);
+ err = oct_read(reply.output, "_ { domid: %d }", &domid);
if (err_is_fail(err) || domid == 0) {
err = err_push(err, XEON_PHI_ERR_CLIENT_DOMAIN_VOID);
goto out;
*retdomid = domid;
}
- out: free(record);
+ out:
return err;
}
OCT_ERR_NO_RECORD, octopus_BINDING_EVENT, m,
domain_wait_trigger_handler, ws);
- char* record = NULL;
- errval_t error_code;
- err = c->call_seq.get(c, iface, iface_set_trigger, &record, &ws->tid,
- &error_code);
+ struct octopus_get_response__rx_args reply;
+
+ assert(!"FIXME");
+ err = c->call_seq.get(c, iface, iface_set_trigger, NULL, &ws->tid,
+ &reply.error_code);
if (err_is_fail(err)) {
- free(record);
return err;
}
- if (err_is_fail(error_code)) {
- free(record);
- return error_code;
+ if (err_is_fail(reply.error_code)) {
+ return reply.error_code;
}
free(ws);
xphi_dom_id_t domid = 0;
- err = oct_read(record, "_ { domid: %d }", &domid);
+ err = oct_read(reply.output, "_ { domid: %d }", &domid);
if (err_is_fail(err) || domid == 0) {
err = err_push(err, XEON_PHI_ERR_CLIENT_DOMAIN_VOID);
- free(record);
return err;
}
}
snprintf(record, len + 1, format, iface, domid);
- char* ret = NULL;
octopus_trigger_id_t tid;
errval_t error_code;
- err = r->vtbl.set(r, record, 0, NOP_TRIGGER, 0, &ret, &tid, &error_code);
+ err = r->vtbl.set(r, record, 0, NOP_TRIGGER, 0, NULL, &tid, &error_code);
if (err_is_fail(err)) {
goto out;
}
static void rx_msg_string(struct xmplmsg_binding *b, char *str)
{
printf("server: received msg_string:\n\t%s\n", str);
- free(str);
+ // no need to free the string
}
static struct xmplmsg_rx_vtbl rx_vtbl = {
{
debug_printf("client: rx_myrpc_response called: %s\n", s);
- free(s);
+ // no need to free s
}
static void run_client(struct xmplthc_thc_client_binding_t *cl)
{
int i = 42;
- char *s = NULL;
+
+ char s[512];
// regular message
cl->send.mymsg(cl, i);
// call/response
cl->send.mycall(cl, i);
debug_printf("client: sent mycall: %d\n", i);
- cl->recv.myresponse(cl, &s);
+ cl->recv.myresponse(cl, s);
debug_printf("client: received myresponse: '%s'\n", s);
- free(s);
// rpc as call/response
cl->send.myrpc(cl, i);
debug_printf("client: sent myrpc call msg: %d\n", i);
- cl->recv.myrpc(cl, &s);
+ cl->recv.myrpc(cl, s);
debug_printf("client: received myrpc response msg: '%s'\n", s);
- free(s);
// rpc
- cl->call_seq.myrpc(cl, i, &s);
+ cl->call_seq.myrpc(cl, i, s);
debug_printf("client: returned from myrpc(%d, '%s')\n", i, s);
- free(s);
debug_printf("finished client\n");
}
}
static int nproc(int argc, char* argv[]) {
- errval_t err, error_code;
- octopus_trigger_id_t tid;
+ errval_t err;
size_t count = 0;
char** names = NULL;
- char* buffer;
static char* spawnds = "r'spawn.[0-9]+' { iref: _ }";
oct_init();
+ struct octopus_get_names_response__rx_args reply;
struct octopus_rpc_client *r = get_octopus_rpc_client();
- err = r->vtbl.get_names(r, spawnds, NOP_TRIGGER, &buffer, &tid, &error_code);
- if (err_is_fail(err) || err_is_fail(error_code)) {
+ err = r->vtbl.get_names(r, spawnds, NOP_TRIGGER, reply.output,
+ &reply.tid, &reply.error_code);
+ if (err_is_fail(err) || err_is_fail(reply.error_code)) {
DEBUG_ERR(err, "get_names failed");
goto out;
}
- err = oct_parse_names(buffer, &names, &count);
+ err = oct_parse_names(reply.output, &names, &count);
if (err_is_fail(err)) {
DEBUG_ERR(err, "parse_names failed.");
goto out;
}
out:
- free(buffer);
oct_free_names(names, count);
printf("%zx\n", count);
out:
assert(!(mode & OCT_REMOVED));
- free(record);
}
static char* local_apics = "r'hw\\.processor\\.[0-9]+' { processor_id: _, "
KALUGA_DEBUG("Waiting for acpi");
char* record = NULL;
errval_t err = oct_wait_for(&record, "acpi { iref: _ }");
- free(record);
if (err_is_fail(err)) {
return err_push(err, KALUGA_ERR_WAITING_FOR_ACPI);
}
{
// Check if the core we're spawning on is already up...
struct octopus_thc_client_binding_t* cl = oct_get_thc_client();
- char* iref_record = NULL;
- octopus_trigger_id_t tid;
errval_t error_code;
octopus_trigger_t t = oct_mktrigger(OCT_ERR_NO_RECORD,
octopus_BINDING_EVENT, OCT_ON_SET, spawnd_up_event, state);
char* query = malloc(length+1);
snprintf(query, length+1, format, core);
- errval_t err = cl->call_seq.get(cl, query, t, &iref_record, &tid, &error_code);
+ errval_t err = cl->call_seq.get(cl, query, t, NULL, NULL, &error_code);
free(query);
- free(iref_record);
if (err_is_fail(err)) {
return err;
if (err_is_fail(err)) {
USER_PANIC_ERR(err, "Got malformed device record?");
}
+
+ /* duplicate device record as we may need it for later */
+ device_record = strdup(device_record);
+ assert(device_record);
+
// Ask the SKB which binary and where to start it...
static char* query = "find_pci_driver(pci_card(%"PRIu64", %"PRIu64", _, _, _), Driver),"
struct module_info* mi = find_module("pci");
if (mi == NULL) {
KALUGA_DEBUG("PCI driver not found or not declared as auto.");
- goto out;
+ return;
}
// XXX: always spawn on my_core_id; otherwise we need to check that
break;
}
}
-
-out:
- free(bridge_record);
}
errval_t watch_for_pci_root_bridge(void)
struct ipi_alloc_notify_reply_state {
struct monitor_msg_queue_elem elem;
- struct monitor_ipi_alloc_notify_reply__args args;
+ struct monitor_ipi_alloc_notify_reply__tx_args args;
};
static void
struct ipi_alloc_notify_reply_state {
struct monitor_msg_queue_elem elem;
- struct monitor_ipi_alloc_notify_reply__args args;
+ struct monitor_ipi_alloc_notify_reply__tx_args args;
};
static void
struct ipi_alloc_notify_reply_state {
struct monitor_msg_queue_elem elem;
- struct monitor_ipi_alloc_notify_reply__args args;
+ struct monitor_ipi_alloc_notify_reply__tx_args args;
};
static void
}
assert(r != NULL);
- char* buffer = NULL;
- errval_t error_code;
- octopus_trigger_id_t tid;
-
char** names = NULL;
size_t count = 0;
static char* spawnds = "r'spawn.[0-9]+' { iref: _ }";
- err = r->vtbl.get_names(r, spawnds, NOP_TRIGGER, &buffer, &tid, &error_code);
- if (err_is_fail(err) || err_is_fail(error_code)) {
+ struct octopus_get_names_response__rx_args reply;
+ err = r->vtbl.get_names(r, spawnds, NOP_TRIGGER, reply.output,
+ &reply.tid, &reply.error_code);
+ if (err_is_fail(err) || err_is_fail(reply.error_code)) {
err = err_push(err, SPAWN_ERR_FIND_SPAWNDS);
goto out;
}
- err = oct_parse_names(buffer, &names, &count);
+ err = oct_parse_names(reply.output, &names, &count);
if (err_is_fail(err)) {
goto out;
}
out:
- free(buffer);
oct_free_names(names, count);
if (err_is_fail(err)) {
DEBUG_ERR(err, "num_spawnds_online");
struct boot_core_reply_state {
struct monitor_msg_queue_elem elem;
- struct monitor_boot_core_reply__args args;
+ struct monitor_boot_core_reply__tx_args args;
};
static void
struct cap_receive_request_state {
struct monitor_msg_queue_elem elem;
- struct monitor_cap_receive_request__args args;
+ struct monitor_cap_receive_request__tx_args args;
uintptr_t your_mon_id;
struct intermon_binding *b;
};
struct alloc_iref_reply_state {
struct monitor_msg_queue_elem elem;
- struct monitor_alloc_iref_reply__args args;
+ struct monitor_alloc_iref_reply__tx_args args;
struct monitor_binding *b;
};
struct bind_lmp_client_request_error_state {
struct monitor_msg_queue_elem elem;
- struct monitor_bind_lmp_reply_client__args args;
+ struct monitor_bind_lmp_reply_client__tx_args args;
struct monitor_binding *serv_binding;
struct capref ep;
};
struct bind_lmp_service_request_state {
struct monitor_msg_queue_elem elem;
- struct monitor_bind_lmp_service_request__args args;
+ struct monitor_bind_lmp_service_request__tx_args args;
struct monitor_binding *b;
uintptr_t domain_id;
};
struct bind_lmp_reply_client_state {
struct monitor_msg_queue_elem elem;
- struct monitor_bind_lmp_reply_client__args args;
+ struct monitor_bind_lmp_reply_client__tx_args args;
struct monitor_binding *b;
};
struct new_monitor_binding_reply_state {
struct monitor_msg_queue_elem elem;
- struct monitor_new_monitor_binding_reply__args args;
+ struct monitor_new_monitor_binding_reply__tx_args args;
};
static void
assert(from <= routing_table_max_coreid);
assert(routing_table[from] == NULL);
assert(len == routing_table_max_coreid + 1);
- routing_table[from] = to;
+ routing_table[from] = memdup(to, len * sizeof(coreid_t));
if (--routing_table_nentries == 0) {
// we have received the complete table!
assert(len == max_coreid + 1);
assert(source_coreid <= max_coreid);
- routing_table[source_coreid] = to;
+ routing_table[source_coreid] = memdup(to, len * sizeof(coreid_t));
} else {
- assert(to == NULL);
-
if (err_no(err) != MON_ERR_INCOMPLETE_ROUTE) {
DEBUG_ERR(err, "unexpected error retrieving routing table");
}
}
}
}
-
- free(destinations);
}
// return the next hop (based on the routing table)
struct multihop_intermon_bind_reply_state {
struct intermon_msg_queue_elem elem;
- struct intermon_bind_multihop_intermon_reply__args args;
+ struct intermon_bind_multihop_intermon_reply__tx_args args;
};
// called when channel is no longer busy
struct multihop_monitor_bind_reply_state {
struct monitor_msg_queue_elem elem;
- struct monitor_multihop_bind_client_reply__args args;
+ struct monitor_multihop_bind_client_reply__tx_args args;
};
// continue function to forward a message to a dispatcher
// monitor message forwarding state
struct monitor_multihop_message_forwarding_state {
struct monitor_msg_queue_elem elem;
- struct monitor_multihop_message__args args;
+ struct monitor_multihop_message__rx_args args;
};
// inter-monitor forwarding state
struct intermon_message_forwarding_state {
struct intermon_msg_queue_elem elem;
- struct intermon_multihop_message__args args;
+ struct intermon_multihop_message__rx_args args;
};
/**
me->args.direction = direction;
me->args.flags = flags;
me->args.ack = ack;
- me->args.payload = payload;
+ memcpy(me->args.payload, payload, size);
+
me->args.size = size;
me->elem.cont = multihop_message_intermon_forward_cont;
errval_t err;
// try to forward message
- err = b->tx_vtbl.multihop_message(b, MKCONT(free, payload), vci, direction,
+ err = b->tx_vtbl.multihop_message(b, NOP_CONT, vci, direction,
flags, ack, payload, size);
if (err_is_fail(err)) {
me->args.direction = direction;
me->args.flags = flags;
me->args.ack = ack;
- me->args.payload = payload;
+ memcpy(me->args.payload, payload, size);
+
me->args.size = size;
me->elem.cont = multihop_message_intermon_forward_cont;
me->args.direction = direction;
me->args.flags = flags;
me->args.ack = ack;
- me->args.payload = payload;
+ memcpy(me->args.payload, payload, size);
+
me->args.size = size;
me->elem.cont = multihop_message_forward_continue;
me->args.direction = direction;
me->args.flags = flags;
me->args.ack = ack;
- me->args.payload = payload;
+ memcpy(me->args.payload, payload, size);
+
me->args.size = size;
me->elem.cont = multihop_message_intermon_forward_cont;
errval_t err;
// try to forward message
- err = b->tx_vtbl.multihop_message(b, MKCONT(free, payload), vci, direction,
+ err = b->tx_vtbl.multihop_message(b, NOP_CONT, vci, direction,
flags, ack, payload, size);
if (err_is_fail(err)) {
me->args.direction = direction;
me->args.flags = flags;
me->args.ack = ack;
- me->args.payload = payload;
+ memcpy(me->args.payload, payload, size);
+
me->args.size = size;
me->elem.cont = multihop_message_forward_continue;
// intermonitor capability forwarding state
struct multihop_intermon_capability_forwarding_state {
struct intermon_msg_queue_elem elem;
- struct intermon_multihop_cap_send__args args;
+ struct intermon_multihop_cap_send__tx_args args;
};
// monitor capability forwarding state
struct multihop_capability_forwarding_state {
struct monitor_msg_queue_elem elem;
- struct monitor_multihop_cap_send__args args;
+ struct monitor_multihop_cap_send__tx_args args;
};
/**
/**
* \brief Request for some memory (over the memory allocation channel)
*/
-static void mem_alloc_handler(struct monitor_mem_binding *b,
+// static void mem_alloc_handler(struct monitor_mem_binding *b,
+// uint8_t size_bits, genpaddr_t minbase,
+// genpaddr_t maxlimit, coreid_t from)
+static errval_t mem_alloc_handler(struct monitor_mem_binding *b,
uint8_t size_bits, genpaddr_t minbase,
- genpaddr_t maxlimit, coreid_t from)
+ genpaddr_t maxlimit, coreid_t from,
+ errval_t *out_err, monitor_mem_caprep_t *out_caprep)
{
struct capref *cap = NULL;
monitor_mem_caprep_t caprep = {0,0,0,0};
out:
// RPC protocol, this can never fail with TX_BUSY
- err = b->tx_vtbl.alloc_response(b, NOP_CONT, reterr, caprep);
- if (err_is_fail(err)) {
- DEBUG_ERR(err, "reply failed");
- }
+ *out_err = reterr;
+ *out_caprep = caprep;
+ // err = b->tx_vtbl.alloc_response(b, NOP_CONT, reterr, caprep);
+ // if (err_is_fail(err)) {
+ // DEBUG_ERR(err, "reply failed");
+ // }
if (cap) {
capops_delete(get_cap_domref(*cap), mem_alloc_delete_result_handler, cap);
}
+ return SYS_ERR_OK;
}
-static void mem_free_handler(struct monitor_mem_binding *b,
+static errval_t mem_free_handler(struct monitor_mem_binding *b,
monitor_mem_caprep_t caprep,
- genpaddr_t base, uint8_t bits)
+ genpaddr_t base, uint8_t bits, errval_t *result)
{
- errval_t err, result;
+ debug_printf("%s:%d\n", __func__, __LINE__);
+ errval_t err;
// this should only run on the bsp monitor
assert(bsp_monitor);
DEBUG_CAPOPS("%s\n", __FUNCTION__);
struct capref cap;
err = slot_alloc(&cap);
if (err_is_fail(err)) {
- result = err_push(err, LIB_ERR_SLOT_ALLOC);
+ *result = err_push(err, LIB_ERR_SLOT_ALLOC);
goto out;
}
err = monitor_cap_create(cap, &cap_raw, my_core_id);
if (err_is_fail(err)) {
- result = err_push(err, MON_ERR_CAP_CREATE);
+ *result = err_push(err, MON_ERR_CAP_CREATE);
goto out;
}
DEBUG_CAPOPS("%s: created local copy, sending to memserv\n", __FUNCTION__);
struct mem_rpc_client *mb = get_mem_client();
assert(mb);
- err = mb->vtbl.free_monitor(mb, cap, base, bits, &result);
+ err = mb->vtbl.free_monitor(mb, cap, base, bits, result);
if (err_is_fail(err)) {
- result = err;
+ *result = err;
}
out:
- DEBUG_CAPOPS("%s: sending reply: %s\n", __FUNCTION__, err_getstring(result));
- err = b->tx_vtbl.free_response(b, NOP_CONT, result);
- assert(err_is_ok(err));
+ DEBUG_CAPOPS("%s: sending reply: %s\n", __FUNCTION__, err_getstring(*result));
+ debug_printf("%s:%d response\n", __func__, __LINE__);
+// err = b->tx_vtbl.free_response(b, NOP_CONT, result);
+// assert(err_is_ok(err));
+ return SYS_ERR_OK;
}
static errval_t mon_ram_alloc(struct capref *ret, uint8_t size_bits,
return reterr;
}
-static struct monitor_mem_rx_vtbl the_monitor_mem_vtable = {
+static struct monitor_mem_rpc_rx_vtbl the_monitor_mem_rpc_vtable = {
.alloc_call = mem_alloc_handler,
.free_call = mem_free_handler,
};
static errval_t monitor_mem_connected(void *st, struct monitor_mem_binding *b)
{
- b->rx_vtbl = the_monitor_mem_vtable;
+ b->rpc_rx_vtbl = the_monitor_mem_rpc_vtable;
return SYS_ERR_OK;
}
struct send_phase_data_state {
struct intermon_msg_queue_elem elem;
- struct intermon_rsrc_phase_data__args args;
+ struct intermon_rsrc_phase_data__tx_args args;
};
static void send_phase_data_handler(struct intermon_binding *b,
struct rsrc_phase_state {
struct intermon_msg_queue_elem elem;
- struct intermon_rsrc_phase__args args;
+ struct intermon_rsrc_phase__tx_args args;
};
static void rsrc_phase_handler(struct intermon_binding *b,
assert(len == sizeof(d->phase));
memcpy(&d->phase, data, len);
- // Free copy
- free(data);
-
return SYS_ERR_OK;
}
struct bind_ump_request_state {
struct intermon_msg_queue_elem elem;
- struct intermon_bind_ump_request__args args;
+ struct intermon_bind_ump_request__tx_args args;
struct frame_identity frameid;
struct capability capability;
struct monitor_binding *mb;
struct bind_ump_reply_state {
struct intermon_msg_queue_elem elem;
- struct intermon_bind_ump_reply__args args;
+ struct intermon_bind_ump_reply__tx_args args;
struct capability capability;
};
struct bind_ump_service_request_state {
struct monitor_msg_queue_elem elem;
- struct monitor_bind_ump_service_request__args args;
+ struct monitor_bind_ump_service_request__tx_args args;
struct intermon_binding *binding;
uintptr_t your_mon_id;
};
struct bind_ump_reply_client_state {
struct monitor_msg_queue_elem elem;
- struct monitor_bind_ump_reply_client__args args;
+ struct monitor_bind_ump_reply_client__tx_args args;
};
static void bind_ump_reply_client_cont(struct monitor_binding *domain_binding,
pci_hdr1_initialize(&bhdr, addr);
//ACPI_HANDLE child;
- char* child = NULL;
+ char* child = malloc(acpi__read_irq_table_response_child_MAX_ARGUMENT_SIZE);
+ assert(child);
errval_t error_code;
PCI_DEBUG("get irq table for (%hhu,%hhu,%hhu)\n", (*busnum) + 2,
addr.device, addr.function);
.function = addr.function,
};
errval_t err = cl->vtbl.read_irq_table(cl, handle, xaddr, (*busnum) + 2,
- &error_code, &child);
+ &error_code, child);
if (err_is_ok(err) && error_code == ACPI_ERR_NO_CHILD_BRIDGE){
PCI_DEBUG("No corresponding ACPI entry for bridge found\n");
} else if (err_is_fail(err) || err_is_fail(error_code)) {
pci_hdr0_int_pin_rd(&devhdr) - 1);
// octopus start
- char* record = NULL;
static char* device_fmt = "hw.pci.device. { "
"bus: %u, device: %u, function: %u, "
"vendor: %u, device_id: %u, class: %u, "
classcode.subclss, classcode.prog_if);
assert(err_is_ok(err));
- free(record);
// end octopus
query_bars(devhdr, addr, false);
}
free(handle);
+
}
#if 0
pcie_enable();
pci_add_root(addr, maxbus, acpi_node);
pcie_disable();
-
- free(record);
}
out: oct_free_names(names, len);
if (err_is_fail(err)) {
err = b->tx_vtbl.init_pci_device_response(b, NOP_CONT, err, 0,
- cc->nr_caps_bar);
+ cc->nr_caps_bar[0],
+ cc->nr_caps_bar[1],
+ cc->nr_caps_bar[2],
+ cc->nr_caps_bar[3],
+ cc->nr_caps_bar[4],
+ cc->nr_caps_bar[5]);
} else {
err = b->tx_vtbl.init_pci_device_response(b, NOP_CONT, err,
cc->nr_allocated_bars,
- cc->nr_caps_bar);
+ cc->nr_caps_bar[0],
+ cc->nr_caps_bar[1],
+ cc->nr_caps_bar[2],
+ cc->nr_caps_bar[3],
+ cc->nr_caps_bar[4],
+ cc->nr_caps_bar[5]);
+
}
assert(err_is_ok(err));
}
if(err_is_fail(e)) {
if(err_no(e) == FLOUNDER_ERR_TX_BUSY) {
struct client_state *st = b->st;
- struct pci_get_bar_cap_response__args *me = malloc(sizeof(*me));
+ struct pci_get_bar_cap_response__tx_args *me = malloc(sizeof(*me));
assert(me != NULL);
me->err = err;
me->cap = cap;
{
struct pci_binding *b = arg;
struct client_state *st = b->st;
- struct pci_get_bar_cap_response__args *a = st->cont_st;
+ struct pci_get_bar_cap_response__tx_args *a = st->cont_st;
get_bar_cap_response_cont(b, a->err, a->cap, a->type, a->bar_nr);
free(a);
}
struct msgq_elem {
enum trivfs_msg_enum msgnum;
- union trivfs_arg_union a;
+ union trivfs_rx_arg_union a;
struct dirent *dirent;
struct msgq_elem *next;
};
assert(q != NULL);
q->msgnum = trivfs_readdir_response__msgnum;
q->a.readdir_response.err = reterr;
- q->a.readdir_response.name = (char *)name;
+ strncpy(q->a.readdir_response.name, name, sizeof(q->a.readdir_response.name));
q->a.readdir_response.isdir = isdir;
q->a.readdir_response.size = size;
q->dirent = err_is_ok(reterr) ? e : NULL;
msg_enqueue(st, b, q);
-
+
}
static void lookup(struct trivfs_binding *b, trivfs_fh_t dir, char *name)
isdir = ramfs_isdir(e);
reply:
- free(name);
if (queue_is_empty(st)) {
err = b->tx_vtbl.lookup_response(b, NOP_CONT, reterr, retfh, isdir);
if (err_is_ok(err)) {
assert(q != NULL);
q->msgnum = trivfs_read_response__msgnum;
q->a.read_response.err = reterr;
- q->a.read_response.data = buf;
+ memcpy(q->a.read_response.data, buf, len);
q->a.read_response.retlen = len;
q->dirent = err_is_ok(reterr) ? f : NULL;
msg_enqueue(st, b, q);
memcpy(buf, data, len);
reply:
- free(data);
if (queue_is_empty(st)) {
err = b->tx_vtbl.write_response(b, NOP_CONT, reterr);
if (err_is_ok(err)) {
goto reply;
}
+
+ name = strdup(name);
+ if (name == NULL) {
+ reterr = LIB_ERR_MALLOC_FAIL;
+ goto reply;
+ }
struct dirent *newf;
err = ramfs_create(d, name, &newf);
if (err_is_fail(err)) {
+ free(name);
reterr = err;
goto reply;
}
fh = fh_set(st, newf);
reply:
- if (err_is_fail(reterr)) {
- free(name);
- }
-
if (queue_is_empty(st)) {
err = b->tx_vtbl.create_response(b, NOP_CONT, reterr, fh);
if (err_is_ok(err)) {
goto reply;
}
+ name = strdup(name);
+ if (name == NULL) {
+ reterr = LIB_ERR_MALLOC_FAIL;
+ goto reply;
+ }
+
struct dirent *newd;
err = ramfs_mkdir(d, name, &newd);
if (err_is_fail(err)) {
+ free(name);
reterr = err;
goto reply;
}
fh = fh_set(st, newd);
reply:
- if (err_is_fail(reterr)) {
- free(name);
- }
-
if (queue_is_empty(st)) {
err = b->tx_vtbl.mkdir_response(b, NOP_CONT, reterr, fh);
if (err_is_ok(err)) {
void post_and_execute_string(void);
-#define BUFFER_SIZE SKB_REPLY_BUF_SIZE
-
-
struct skb_query_state {
- char output_buffer[BUFFER_SIZE];
- char error_buffer[BUFFER_SIZE];
+ char output_buffer[skb__run_response_output_MAX_ARGUMENT_SIZE];
+ char error_buffer[skb__run_response_str_error_MAX_ARGUMENT_SIZE];
int output_length;
int error_output_length;
int exec_res;
vfs_mkdir("/tmp");
chdir(ECLIPSE_DIR);
- // make sure, that dlsym has the right table to the statically compiled-in
- // shared libraries...
+ // make sure, that dlsym has the right table to the statically compiled-in
+ // shared libraries...
dlopen_set_params(funcs, sizeof(funcs) / sizeof(*funcs));
- // now set the right values for the eclipse-clp engine
+ // now set the right values for the eclipse-clp engine
ec_set_option_int(EC_OPTION_IO, MEMORY_IO);
ec_set_option_ptr(EC_OPTION_ECLIPSEDIR, ECLIPSE_DIR);
ec_set_option_long(EC_OPTION_GLOBALSIZE, MEMORY_SIZE);
struct skb_query_state* sqs = malloc(sizeof(struct skb_query_state));
- // ec_.m.vm_flags |= 8;
+ // ec_.m.vm_flags |= 8;
SKB_DEBUG("before ec init\n");
int n = ec_init();
if (n != 0) {
errval_t new_reply_state(struct skb_reply_state** srs, rpc_reply_handler_fn reply_handler)
{
- assert(*srs == NULL);
- *srs = malloc(sizeof(struct skb_reply_state));
- if(*srs == NULL) {
- return LIB_ERR_MALLOC_FAIL;
- }
- memset(*srs, 0, sizeof(struct skb_reply_state));
+ assert(*srs == NULL);
+ *srs = malloc(sizeof(struct skb_reply_state));
+ if(*srs == NULL) {
+ return LIB_ERR_MALLOC_FAIL;
+ }
+ memset(*srs, 0, sizeof(struct skb_reply_state));
- (*srs)->rpc_reply = reply_handler;
- (*srs)->next = NULL;
+ (*srs)->rpc_reply = reply_handler;
+ (*srs)->next = NULL;
- return SYS_ERR_OK;
+ return SYS_ERR_OK;
}
void free_reply_state(void* arg) {
- if(arg != NULL) {
- struct skb_reply_state* srt = (struct skb_reply_state*) arg;
- free(srt);
- }
- else {
- assert(!"free_reply_state with NULL argument?");
- }
+ if(arg != NULL) {
+ struct skb_reply_state* srt = (struct skb_reply_state*) arg;
+ free(srt);
+ }
+ else {
+ assert(!"free_reply_state with NULL argument?");
+ }
}
errval_t execute_query(char* query, struct skb_query_state* st)
{
SKB_DEBUG("Executing query: %s\n", query);
- assert(query != NULL);
+ assert(query != NULL);
assert(st != NULL);
- int res;
+ int res;
ec_ref Start = ec_ref_create_newvar();
- st->exec_res = PFLUSHIO;
+ st->exec_res = PFLUSHIO;
st->output_length = 0;
st->error_output_length = 0;
res = 0;
do {
res = ec_queue_read(1, st->output_buffer + st->output_length,
- BUFFER_SIZE - res);
+ sizeof(st->output_buffer) - res);
st->output_length += res;
- } while ((res != 0) && (st->output_length < BUFFER_SIZE));
+ } while ((res != 0) && (st->output_length < sizeof(st->output_buffer)));
st->output_buffer[st->output_length] = 0;
res = 0;
do {
res = ec_queue_read(2, st->error_buffer + st->error_output_length,
- BUFFER_SIZE - res);
+ sizeof(st->error_buffer) - res);
st->error_output_length += res;
} while ((res != 0) &&
- (st->error_output_length < BUFFER_SIZE));
+ (st->error_output_length < sizeof(st->error_buffer)));
st->error_buffer[st->error_output_length] = 0;
}
static void run_reply(struct skb_binding* b, struct skb_reply_state* srt) {
errval_t err;
err = b->tx_vtbl.run_response(b, MKCONT(free_reply_state, srt),
- srt->skb.output_buffer,
- srt->skb.error_buffer,
- srt->skb.exec_res);
+ srt->skb.output_buffer,
+ srt->skb.error_buffer,
+ srt->skb.exec_res);
if (err_is_fail(err)) {
if(err_no(err) == FLOUNDER_ERR_TX_BUSY) {
- enqueue_reply_state(b, srt);
- return;
+ enqueue_reply_state(b, srt);
+ return;
}
USER_PANIC_ERR(err, "SKB sending %s failed!", __FUNCTION__);
}
static void run(struct skb_binding *b, char *query)
{
- struct skb_reply_state* srt = NULL;
- errval_t err = new_reply_state(&srt, run_reply);
- assert(err_is_ok(err)); // TODO
+ struct skb_reply_state* srt = NULL;
+ errval_t err = new_reply_state(&srt, run_reply);
+ assert(err_is_ok(err)); // TODO
- err = execute_query(query, &srt->skb);
- assert(err_is_ok(err));
+ err = execute_query(query, &srt->skb);
+ assert(err_is_ok(err));
run_reply(b, srt);
- free(query);
}
static errval_t connect_cb(void *st, struct skb_binding *b)
{
- // Set up continuation queue
+ // Set up continuation queue
b->st = NULL;
// copy my message receive handler vtable to the binding
assert(pe != NULL);
memset(pe, 0, sizeof(struct ps_entry));
memcpy(pe->argv, argv, MAX_CMDLINE_ARGS*sizeof(*argv));
- pe->argbuf = argbuf;
+ pe->argbuf = memdup(argbuf, argbytes);
pe->argbytes = argbytes;
/*
* NB: It's important to keep a copy of the DCB *and* the root
domainid_t domainid;
};
-static void retry_spawn_domain_response(void *a)
-{
- errval_t err;
-
- struct pending_spawn_response *r = (struct pending_spawn_response*)a;
- struct spawn_binding *b = r->b;
-
- err = b->tx_vtbl.spawn_domain_response(b, NOP_CONT, r->err, r->domainid);
-
- if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
- // try again
- err = b->register_send(b, get_default_waitset(),
- MKCONT(retry_spawn_domain_response,a));
- }
- if (err_is_fail(err)) {
- DEBUG_ERR(err, "error sending spawn_domain reply\n");
- }
-
- free(a);
-}
-
-
-static errval_t spawn_reply(struct spawn_binding *b, errval_t rerr,
- domainid_t domainid)
-{
- errval_t err;
-
- err = b->tx_vtbl.spawn_domain_response(b, NOP_CONT, rerr, domainid);
-
- if (err_is_fail(err)) {
- DEBUG_ERR(err, "error sending spawn_domain reply\n");
-
- if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
- // this will be freed in the retry handler
- struct pending_spawn_response *sr =
- malloc(sizeof(struct pending_spawn_response));
- if (sr == NULL) {
- return LIB_ERR_MALLOC_FAIL;
- }
- sr->b = b;
- sr->err = rerr;
- sr->domainid = domainid;
- err = b->register_send(b, get_default_waitset(),
- MKCONT(retry_spawn_domain_response, sr));
- if (err_is_fail(err)) {
- // note that only one continuation may be registered at a time
- free(sr);
- DEBUG_ERR(err, "register_send failed!");
- return err;
- }
- }
- }
-
- return SYS_ERR_OK;
-}
-
-static void retry_spawn_domain_w_caps_response(void *a)
-{
- errval_t err;
-
- struct pending_spawn_response *r = (struct pending_spawn_response*)a;
- struct spawn_binding *b = r->b;
-
- err = b->tx_vtbl.spawn_domain_with_caps_response(b, NOP_CONT, r->err, r->domainid);
-
- if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
- // try again
- err = b->register_send(b, get_default_waitset(),
- MKCONT(retry_spawn_domain_response,a));
- }
- if (err_is_fail(err)) {
- DEBUG_ERR(err, "error sending spawn_domain reply\n");
- }
-
- free(a);
-}
-
-
-static errval_t spawn_with_caps_reply(struct spawn_binding *b, errval_t rerr,
- domainid_t domainid)
-{
- errval_t err;
-
- err = b->tx_vtbl.spawn_domain_with_caps_response(b, NOP_CONT, rerr, domainid);
-
- if (err_is_fail(err)) {
- DEBUG_ERR(err, "error sending spawn_domain reply\n");
-
- if (err_no(err) == FLOUNDER_ERR_TX_BUSY) {
- // this will be freed in the retry handler
- struct pending_spawn_response *sr =
- malloc(sizeof(struct pending_spawn_response));
- if (sr == NULL) {
- return LIB_ERR_MALLOC_FAIL;
- }
- sr->b = b;
- sr->err = rerr;
- sr->domainid = domainid;
- err = b->register_send(b, get_default_waitset(),
- MKCONT(retry_spawn_domain_w_caps_response, sr));
- if (err_is_fail(err)) {
- // note that only one continuation may be registered at a time
- free(sr);
- DEBUG_ERR(err, "register_send failed!");
- return err;
- }
- }
- }
-
- return SYS_ERR_OK;
-}
-
-
static errval_t spawn_with_caps_common(char *path, char *argbuf, size_t argbytes,
char *envbuf, size_t envbytes,
struct capref inheritcn_cap,
finish:
if(err_is_fail(err)) {
- free(argbuf);
DEBUG_ERR(err, "spawn");
}
- free(envbuf);
- free(path);
-
return err;
}
-static void spawn_with_caps_handler(struct spawn_binding *b, char *path,
- char *argbuf, size_t argbytes,
- char *envbuf, size_t envbytes,
- struct capref inheritcn_cap,
- struct capref argcn_cap,
- uint8_t flags)
+static errval_t spawn_with_caps_handler(struct spawn_binding *b, char *path,
+ char *argvbuf, size_t argvbytes, char *envbuf, size_t envbytes,
+ struct capref inheritcn_cap, struct capref argcn_cap, uint8_t flags,
+ errval_t *err, spawn_domainid_t *domain_id)
{
- errval_t err;
- domainid_t newdomid;
- err = spawn_with_caps_common(path, argbuf, argbytes, envbuf, envbytes,
- inheritcn_cap, argcn_cap, flags, &newdomid);
-
- err = spawn_with_caps_reply(b, err, newdomid);
-
- if (err_is_fail(err)) {
- DEBUG_ERR(err, "while sending reply in spawn_with_caps_handler");
- }
+ *err = spawn_with_caps_common(path, argvbuf, argvbytes, envbuf, envbytes,
+ inheritcn_cap, argcn_cap, flags, domain_id);
+ return SYS_ERR_OK;
}
-static void spawn_handler(struct spawn_binding *b, char *path, char *argbuf,
- size_t argbytes, char *envbuf, size_t envbytes,
- uint8_t flags)
+static errval_t spawn_handler(struct spawn_binding *b, char *path,
+ char *argvbuf, size_t argvbytes, char *envbuf, size_t envbytes,
+ uint8_t flags, errval_t *err, spawn_domainid_t *domain_id)
{
- errval_t err;
- domainid_t newdomid;
- err = spawn_with_caps_common(path, argbuf, argbytes, envbuf, envbytes,
- NULL_CAP, NULL_CAP, flags, &newdomid);
-
- err = spawn_reply(b, err, newdomid);
-
- if (err_is_fail(err)) {
- // not much we can do about this
- DEBUG_ERR(err, "while sending reply in spawn_handler");
- }
+ *err = spawn_with_caps_common(path, argvbuf, argvbytes, envbuf, envbytes,
+ NULL_CAP, NULL_CAP, flags, domain_id);
+ return SYS_ERR_OK;
}
/**
}
static struct spawn_rx_vtbl rx_vtbl = {
- .spawn_domain_call = spawn_handler,
- .spawn_domain_with_caps_call = spawn_with_caps_handler,
+ // .spawn_domain_call = spawn_handler,
+ // .spawn_domain_with_caps_call = spawn_with_caps_handler,
.use_local_memserv_call = use_local_memserv_handler,
.kill_call = kill_handler,
.exit_call = exit_handler,
.dump_capabilities_call = dump_capabilities_handler
};
+static struct spawn_rpc_rx_vtbl rpc_rx_vtbl = {
+ .spawn_domain_call = spawn_handler,
+ .spawn_domain_with_caps_call = spawn_with_caps_handler,
+ // .use_local_memserv_call = use_local_memserv_handler,
+ // .kill_call = kill_handler,
+ // .exit_call = exit_handler,
+ // .wait_call = wait_handler,
+ // .get_domainlist_call = get_domainlist_handler,
+ // .status_call = status_handler,
+ // .dump_capabilities_call = dump_capabilities_handler
+};
+
static void export_cb(void *st, errval_t err, iref_t iref)
{
if (err_is_fail(err)) {
{
// copy my message receive handler vtable to the binding
b->rx_vtbl = rx_vtbl;
+ b->rpc_rx_vtbl = rpc_rx_vtbl;
return SYS_ERR_OK;
}
free(buf);
printf("reading data\n");
- size_t bytes_read;
- err = ata_rw28_rpc.vtbl.read_dma(&ata_rw28_rpc, bytes, start_lba, &buf, &bytes_read);
+ struct ata_rw28_read_dma_response__rx_args reply;
+ err = ata_rw28_rpc.vtbl.read_dma(&ata_rw28_rpc, bytes, start_lba, reply.buffer, &reply.buffer_size);
if (err_is_fail(err))
USER_PANIC_ERR(err, "read_dma rpc");
- if (!buf)
- USER_PANIC("read_dma -> !buf");
- if (bytes_read != bytes)
+ if (reply.buffer_size != bytes)
USER_PANIC("read_dma -> read_size != size");
+ buf = reply.buffer;
printf("checking data\n");
for (size_t i = 0; i < count; ++i)
{
i*step, val, pat);
}
}
- free(buf);
printf("write_and_check_32 completed\n");
}