Variable renamings, fixing event_mutex locking and thread_unblock_one.
authorAdam Turowski <adam.turowski@inf.ethz.ch>
Mon, 25 Jul 2016 13:12:13 +0000 (15:12 +0200)
committerAdam Turowski <adam.turowski@inf.ethz.ch>
Mon, 25 Jul 2016 13:12:13 +0000 (15:12 +0200)
Signed-off-by: Adam Turowski <adam.turowski@inf.ethz.ch>

lib/barrelfish/deferred.c
lib/barrelfish/domain.c
lib/barrelfish/event_mutex.c
lib/barrelfish/threads.c

index 3a536b2..65bfc30 100644 (file)
@@ -149,11 +149,11 @@ errval_t barrelfish_usleep(delayus_t delay)
 errval_t deferred_event_cancel(struct deferred_event *event)
 {
     enum ws_chanstate chanstate = event->waitset_state.state;
-    dispatcher_handle_t dh = disp_disable();
-    errval_t err = waitset_chan_deregister_disabled(&event->waitset_state);
+    dispatcher_handle_t handle = disp_disable();
+    errval_t err = waitset_chan_deregister_disabled(&event->waitset_state, handle);
     if (err_is_ok(err) && chanstate != CHAN_PENDING) {
         // remove from dispatcher queue
-        struct dispatcher_generic *disp = get_dispatcher_generic(dh);
+        struct dispatcher_generic *disp = get_dispatcher_generic(handle);
         if (event->prev == NULL) {
             disp->deferred_events = event->next;
         } else {
@@ -162,10 +162,10 @@ errval_t deferred_event_cancel(struct deferred_event *event)
         if (event->next != NULL) {
             event->next->prev = event->prev;
         }
-        update_wakeup_disabled(dh);
+        update_wakeup_disabled(handle);
     }
 
-    disp_enable(dh);
+    disp_enable(handle);
     return err;
 }
 
index 7fda57c..e88e512 100644 (file)
@@ -37,7 +37,7 @@
 ///< Struct to maintain per dispatcher domain library state
 struct domain_state {
     iref_t iref;  ///< Iref for the interdisp service
-    struct interdisp_binding *b[MAX_CPUS];
+    struct interdisp_binding *binding[MAX_CPUS];
     struct waitset interdisp_ws;
     struct thread *default_waitset_handler;
     struct thread *remote_wakeup_queue;
@@ -80,7 +80,7 @@ static void dispatcher_initialized_handler(void *arg)
 
     // XXX: Tell currently active interdisp-threads to handle default waitset
     for(int i = 0; i < MAX_CPUS; i++) {
-        struct interdisp_binding *b = domain_state->b[i];
+        struct interdisp_binding *b = domain_state->binding[i];
 
         if(disp_get_core_id() != i &&
            span_domain_state->core_id != i && b != NULL) {
@@ -289,7 +289,7 @@ static void span_eager_connect_request(struct interdisp_binding *b,
     struct domain_state *domain_state = get_domain_state();
 
     /* Store the sending core's connection */
-    domain_state->b[core_id] = b;
+    domain_state->binding[core_id] = b;
 }
 
 static struct interdisp_rx_vtbl interdisp_vtbl = {
@@ -331,7 +331,7 @@ static void client_connected(void *st, errval_t err,
 
     /* Set it on the domain library state */
     b->rx_vtbl = interdisp_vtbl;
-    domain_state->b[state->cnt] = b;
+    domain_state->binding[state->cnt] = b;
 
     // Send it our core id
     err = b->tx_vtbl.span_eager_connect(b, NOP_CONT, disp_get_core_id());
@@ -355,7 +355,7 @@ static void client_connected(void *st, errval_t err,
             USER_PANIC_ERR(err, "Binding to inter-dispatcher service");
         }
     } else {
-        struct interdisp_binding *sb = domain_state->b[state->core_id];
+        struct interdisp_binding *sb = domain_state->binding[state->core_id];
         /* Send initialized msg to the dispatcher that spanned us */
         errval_t err2 = sb->tx_vtbl.
             dispatcher_initialized(sb, NOP_CONT,
@@ -424,9 +424,9 @@ static void handle_wakeup_on(void *arg)
         /* coreid_t core_id = disp_handle_get_core_id(thread->disp); */
         coreid_t core_id = thread->coreid;
 
-        assert(domain_state->b[core_id] != NULL);
+        assert(domain_state->binding[core_id] != NULL);
 
-        struct interdisp_binding *b = domain_state->b[core_id];
+        struct interdisp_binding *b = domain_state->binding[core_id];
         err = b->tx_vtbl.wakeup_thread(b, NOP_CONT, (genvaddr_t)(uintptr_t)thread);
         if (err_is_fail(err)) {
             USER_PANIC_ERR(err, "wakeup_thread");
@@ -546,7 +546,7 @@ errval_t domain_init(void)
     waitset_chanstate_init(&domain_state->remote_wakeup_event,
                            CHANTYPE_EVENT_QUEUE);
     for (int i = 0; i < MAX_CPUS; i++) {
-        domain_state->b[i] = NULL;
+        domain_state->binding[i] = NULL;
     }
 
     waitset_init(&domain_state->interdisp_ws);
@@ -776,18 +776,16 @@ static errval_t domain_new_dispatcher_varstack(coreid_t core_id,
     /* Wait to use the monitor binding */
     struct monitor_binding *mcb = get_monitor_binding();
     event_mutex_enqueue_lock(&mcb->mutex, &span_domain_state->event_qnode,
-                             (struct event_closure) {
-                                 .handler = span_domain_request_sender_wrapper,
-                                     .arg = span_domain_state });
+                          (struct event_closure) {
+                              .handler = span_domain_request_sender_wrapper,
+                                  .arg = span_domain_state });
 
-#if 1
     while(!span_domain_state->initialized) {
         event_dispatch(get_default_waitset());
     }
 
     /* Free state */
     free(span_domain_state);
-#endif
 
     return SYS_ERR_OK;
 }
@@ -813,14 +811,14 @@ errval_t domain_send_cap(coreid_t core_id, struct capref cap)
 {
     errval_t err;
     struct domain_state *domain_state = get_domain_state();
-    if (!domain_state->b[core_id]) {
+    if (!domain_state->binding[core_id]) {
         return LIB_ERR_NO_SPANNED_DISP;
     }
 
     send_cap_err = SYS_ERR_OK;
     cap_received = false;
 
-    struct interdisp_binding *b = domain_state->b[core_id];
+    struct interdisp_binding *b = domain_state->binding[core_id];
     err = b->tx_vtbl.send_cap_request(b, NOP_CONT, cap, (uintptr_t)&cap);
     if (err_is_fail(err)) {
         return err_push(err, LIB_ERR_SEND_CAP_REQUEST);
@@ -856,7 +854,7 @@ static errval_t domain_wakeup_on_coreid_disabled(coreid_t core_id,
 
     // Catch this early
     assert_disabled(ds != NULL);
-    if (ds->b[core_id] == NULL) {
+    if (ds->binding[core_id] == NULL) {
         return LIB_ERR_NO_SPANNED_DISP;
     }
 
@@ -953,11 +951,11 @@ errval_t domain_thread_create_on_varstack(coreid_t core_id,
         struct domain_state *domain_state = get_domain_state();
         errval_t err;
 
-        if (domain_state->b[core_id] == NULL) {
+        if (domain_state->binding[core_id] == NULL) {
             return LIB_ERR_NO_SPANNED_DISP;
         }
 
-        struct interdisp_binding *b = domain_state->b[core_id];
+        struct interdisp_binding *b = domain_state->binding[core_id];
         struct create_thread_req *req = malloc(sizeof(*req));
         req->reply_received = false;
         // use special waitset to make sure loop exits properly.
@@ -1003,11 +1001,11 @@ errval_t domain_thread_join(struct thread *thread, int *retval)
         struct domain_state *domain_state = get_domain_state();
         errval_t err;
 
-        if (domain_state->b[core_id] == NULL) {
+        if (domain_state->binding[core_id] == NULL) {
             return LIB_ERR_NO_SPANNED_DISP;
         }
 
-        struct interdisp_binding *b = domain_state->b[core_id];
+        struct interdisp_binding *b = domain_state->binding[core_id];
         struct join_thread_req *req = malloc(sizeof(*req));
         req->reply_received = false;
         // use special waitset to make sure loop exits properly.
index d8c5fd6..7132143 100644 (file)
@@ -72,10 +72,9 @@ void event_mutex_threaded_lock(struct event_mutex *em)
         thread_mutex_unlock(&em->tmutex);
     } else {
         // add ourselves to the thread queue and block
-        // XXX: TODO: the mutex unlock and block on the queue must be atomic
-        assert(!"this is broken without thread_block_and_release_mutex()");
-        thread_mutex_unlock(&em->tmutex);
-        void *wakeup_reason = thread_block(&em->tqueue);
+        dispatcher_handle_t handle = disp_disable();
+        thread_mutex_unlock_disabled(handle, &em->tmutex);
+        void *wakeup_reason = thread_block_and_release_spinlock_disabled(handle, &em->tqueue, NULL);
 
         assert(wakeup_reason == em);
         assert(em->locked);
index 1bac6da..c4658a2 100644 (file)
@@ -964,7 +964,12 @@ struct thread *thread_unblock_one_disabled(dispatcher_handle_t handle,
  */
 struct thread *thread_unblock_one(struct thread **queue, void *reason)
 {
-    return thread_unblock_one_disabled(disp_disable(), queue, reason);
+    struct thread *thread;
+
+    dispatcher_handle_t handle = disp_disable();
+    thread = thread_unblock_one_disabled(handle, queue, reason);
+    disp_enable(handle);
+    return thread;
 }
 
 /**