3 * \brief Waitset and low-level event handling mechanism
5 * A "wait set" is a collection of channels to wait on, much like an
6 * FDSET in POSIX. There should be a default, static wait set for each
7 * dispatcher. Threads which wait for events specify the wait set they
12 * Copyright (c) 2009-2012, ETH Zurich.
13 * Copyright (c) 2015, Hewlett Packard Enterprise Development LP.
14 * All rights reserved.
16 * This file is distributed under the terms in the attached LICENSE file.
17 * If you do not find this file, copies can be found by writing to:
18 * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
21 #include <barrelfish/barrelfish.h>
22 #include <barrelfish/waitset.h>
23 #include <barrelfish/waitset_chan.h>
24 #include <barrelfish/threads.h>
25 #include <barrelfish/dispatch.h>
26 #include "threads_priv.h"
27 #include "waitset_chan_priv.h"
31 #ifdef CONFIG_INTERCONNECT_DRIVER_UMP
32 # include <barrelfish/ump_endpoint.h>
35 #if defined(__k1om__) || defined(__aarch64__)
36 #include <barrelfish_kpi/asm_inlines_arch.h>
37 static inline cycles_t cyclecount(void)
41 #elif defined(__x86_64__) || defined(__i386__)
42 #include <arch/x86/barrelfish_kpi/asm_inlines_arch.h>
43 static inline cycles_t cyclecount(void)
47 #elif defined(__arm__) && defined(__gem5__)
49 * XXX: Gem5 doesn't support the ARM performance monitor extension
50 * therefore we just poll a fixed number of times instead of using
51 * cycle counts. POLL_COUNT is deliberately set to 42, guess why! ;)
54 #elif defined(__aarch64__) && defined(__gem5__)
56 #elif defined(__arm__)
57 #include <arch/arm/barrelfish_kpi/asm_inlines_arch.h>
58 static inline cycles_t cyclecount(void)
60 return get_cycle_count();
63 static inline cycles_t cyclecount(void)
65 USER_PANIC("called on non-x86 architecture. why are we polling?");
70 // FIXME: bogus default value. need to measure this at boot time
71 #define WAITSET_POLL_CYCLES_DEFAULT 2000
73 /// Maximum number of cycles to spend polling channels before yielding CPU
74 cycles_t waitset_poll_cycles = WAITSET_POLL_CYCLES_DEFAULT;
77 * \brief Initialise a new waitset
79 void waitset_init(struct waitset *ws)
82 ws->pending = ws->polled = ws->idle = NULL;
83 ws->waiting_threads = NULL;
88 * \brief Destroy a previously initialised waitset
90 errval_t waitset_destroy(struct waitset *ws)
94 // FIXME: do we want to support cancelling all the pending events/channels?
95 if (ws->pending || ws->waiting_threads) {
96 return LIB_ERR_WAITSET_IN_USE;
99 // remove idle and polled channels from waitset
100 struct waitset_chanstate *chan, *next;
101 for (chan = ws->idle; chan != NULL; chan = next) {
103 assert(chan->state == CHAN_IDLE);
104 assert(chan->waitset == ws);
105 chan->waitset = NULL;
106 chan->next = chan->prev = NULL;
108 if (next == ws->idle) {
114 for (chan = ws->polled; chan != NULL; chan = next) {
116 assert(chan->state == CHAN_POLLED);
117 assert(chan->waitset == ws);
118 chan->waitset = NULL;
119 chan->next = chan->prev = NULL;
121 if (next == ws->polled) {
130 /// Returns a channel with a pending event on the given waitset, or NULL
131 static struct waitset_chanstate *get_pending_event_disabled(struct waitset *ws)
133 // are there any pending events on the waitset?
134 if (ws->pending == NULL) {
138 // dequeue next pending event
139 struct waitset_chanstate *chan = ws->pending;
140 if (chan->next == chan) {
141 assert_disabled(chan->prev == chan);
144 ws->pending = chan->next;
145 chan->prev->next = chan->next;
146 chan->next->prev = chan->prev;
149 chan->prev = chan->next = NULL;
153 assert_disabled(chan->state == CHAN_PENDING);
154 chan->state = CHAN_UNREGISTERED;
155 chan->waitset = NULL;
160 #ifdef CONFIG_INTERCONNECT_DRIVER_UMP
162 * \brief Poll an incoming UMP endpoint.
163 * This is logically part of the UMP endpoint implementation, but placed here
164 * for easier inlining.
166 static inline void ump_endpoint_poll(struct waitset_chanstate *chan)
168 /* XXX: calculate location of endpoint from waitset channel state */
169 struct ump_endpoint *ep = (struct ump_endpoint *)
170 ((char *)chan - offsetof(struct ump_endpoint, waitset_state));
172 if (ump_endpoint_can_recv(ep)) {
173 errval_t err = waitset_chan_trigger(chan);
174 assert(err_is_ok(err)); // should not be able to fail
177 #endif // CONFIG_INTERCONNECT_DRIVER_UMP
180 void arranet_polling_loop_proxy(void) __attribute__((weak));
181 void arranet_polling_loop_proxy(void)
183 USER_PANIC("Network polling not available without Arranet!\n");
186 void poll_ahci(struct waitset_chanstate *) __attribute__((weak));
187 void poll_ahci(struct waitset_chanstate *chan)
189 errval_t err = waitset_chan_trigger(chan);
190 assert(err_is_ok(err)); // should not be able to fail
193 /// Helper function that knows how to poll the given channel, based on its type
194 static void poll_channel(struct waitset_chanstate *chan)
196 switch (chan->chantype) {
197 #ifdef CONFIG_INTERCONNECT_DRIVER_UMP
198 case CHANTYPE_UMP_IN:
199 ump_endpoint_poll(chan);
201 #endif // CONFIG_INTERCONNECT_DRIVER_UMP
203 case CHANTYPE_LWIP_SOCKET:
204 arranet_polling_loop_proxy();
212 assert(!"invalid channel type to poll!");
216 // pollcycles_*: arch-specific implementation for polling.
217 // Used by get_next_event().
219 // pollcycles_reset() -- return the number of pollcycles we want to poll for
220 // pollcycles_update() -- update the pollcycles variable. This is needed for
221 // implementations where we don't have a cycle counter
222 // and we just count the number of polling operations
224 // pollcycles_expired() -- check if pollcycles have expired
226 // We might want to move them to architecture-specific files, and/or create a
227 // cleaner interface. For now, I just wanted to keep them out of
230 #if defined(__ARM_ARCH_7A__) && defined(__GNUC__) \
231 && __GNUC__ == 4 && __GNUC_MINOR__ <= 6 && __GNUC_PATCHLEVEL__ <= 3
232 static __attribute__((noinline, unused))
236 cycles_t pollcycles_reset(void)
239 #if defined(__arm__) && !defined(__gem5__)
240 reset_cycle_counter();
241 pollcycles = waitset_poll_cycles;
242 #elif defined(__arm__) && defined(__gem5__)
244 #elif defined(__aarch64__) && defined(__gem5__)
247 pollcycles = cyclecount() + waitset_poll_cycles;
252 #if defined(__ARM_ARCH_7A__) && defined(__GNUC__) \
253 && __GNUC__ == 4 && __GNUC_MINOR__ <= 6 && __GNUC_PATCHLEVEL__ <= 3
254 static __attribute__((noinline, unused))
258 cycles_t pollcycles_update(cycles_t pollcycles)
260 cycles_t ret = pollcycles;
261 #if defined(__arm__) && defined(__gem5__)
263 #elif defined(__aarch64__) && defined(__gem5__)
269 #if defined(__ARM_ARCH_7A__) && defined(__GNUC__) \
270 && __GNUC__ == 4 && __GNUC_MINOR__ <= 6 && __GNUC_PATCHLEVEL__ <= 3
271 static __attribute__((noinline, unused))
275 bool pollcycles_expired(cycles_t pollcycles)
278 #if defined(__arm__) && !defined(__gem5__)
279 ret = (cyclecount() > pollcycles || is_cycle_counter_overflow());
280 #elif defined(__arm__) && defined(__gem5__)
281 ret = pollcycles >= POLL_COUNT;
282 #elif defined(__aarch64__) && defined(__gem5__)
283 ret = pollcycles >= POLL_COUNT;
285 ret = cyclecount() > pollcycles;
290 static errval_t get_next_event_debug(struct waitset *ws,
291 struct event_closure *retclosure, bool debug)
293 struct waitset_chanstate *chan;
294 bool was_polling = false;
298 assert(retclosure != NULL);
300 // unconditionally disable ourselves and check for events
301 // if we decide we have to start polling, we'll jump back up here
302 goto check_for_events;
304 /* ------------ POLLING LOOP; RUNS WHILE ENABLED ------------ */
307 assert(ws->polling); // this thread is polling
308 // get the amount of cycles we want to poll for
309 pollcycles = pollcycles_reset();
311 // while there are no pending events, poll channels
312 while (ws->polled != NULL && ws->pending == NULL) {
313 struct waitset_chanstate *nextchan = NULL;
314 // NB: Polling policy is to return as soon as a pending event
315 // appears, not bother looking at the rest of the polling queue
316 for (chan = ws->polled;
317 chan != NULL && chan->waitset == ws && chan->state == CHAN_POLLED
318 && ws->pending == NULL;
321 nextchan = chan->next;
324 pollcycles = pollcycles_update(pollcycles);
325 // yield the thread if we exceed the cycle count limit
326 if (ws->pending == NULL && pollcycles_expired(pollcycles)) {
328 if (strcmp(disp_name(), "netd") != 0) {
329 // Print the callback trace so that we know which call is leading
330 // the schedule removal and
331 printf("%s: callstack: %p %p %p %p\n", disp_name(),
332 __builtin_return_address(0),
333 __builtin_return_address(1),
334 __builtin_return_address(2),
335 __builtin_return_address(3));
340 pollcycles = pollcycles_reset();
344 // ensure that we restart polling from the place we left off here,
345 // if the next channel is a valid one
346 if (nextchan != NULL && nextchan->waitset == ws
347 && nextchan->state == CHAN_POLLED) {
348 ws->polled = nextchan;
352 /* ------------ STATE MACHINERY; RUNS WHILE DISABLED ------------ */
354 dispatcher_handle_t handle = disp_disable();
356 // are there any pending events on the waitset?
357 chan = get_pending_event_disabled(ws);
359 // if we need to poll, and we have a blocked thread, wake it up to do so
360 if (was_polling && ws->polled != NULL && ws->waiting_threads != NULL) {
361 // start a blocked thread polling
363 t = thread_unblock_one_disabled(handle, &ws->waiting_threads, NULL);
364 assert_disabled(t == NULL); // shouldn't see a remote thread
365 } else if (was_polling) {
366 // I'm stopping polling, and there is nobody else
367 assert_disabled(ws->polling);
372 *retclosure = chan->closure;
376 // If we got here and there are channels to poll but no-one is polling,
377 // then either we never polled, or we lost a race on the channel we picked.
378 // Either way, we'd better start polling again.
379 if (ws->polled != NULL && (was_polling || !ws->polling)) {
387 // otherwise block awaiting an event
388 chan = thread_block_disabled(handle, &ws->waiting_threads);
391 // not a real event, just a wakeup to get us to start polling!
395 *retclosure = chan->closure;
401 * \brief Wait for (block) and return next event on given waitset
403 * Wait until something happens, either activity on some channel, or a deferred
404 * call, and then return the corresponding closure. This is the core of the
405 * event-handling system.
408 * \param retclosure Pointer to storage space for returned event closure
410 errval_t get_next_event(struct waitset *ws, struct event_closure *retclosure)
412 return get_next_event_debug(ws, retclosure, false);
418 * \brief Return next event on given waitset, if one is already pending
420 * This is essentially a non-blocking variant of get_next_event(). It should be
421 * used with great care, to avoid the creation of busy-waiting loops.
424 * \param retclosure Pointer to storage space for returned event closure
426 * \returns LIB_ERR_NO_EVENT if nothing is pending
428 errval_t check_for_event(struct waitset *ws, struct event_closure *retclosure)
430 struct waitset_chanstate *chan;
434 assert(retclosure != NULL);
437 // are there any pending events on the waitset?
438 dispatcher_handle_t handle = disp_disable();
439 chan = get_pending_event_disabled(ws);
442 *retclosure = chan->closure;
446 // if there are no pending events, poll all channels once
447 if (ws->polled != NULL && pollcount++ == 0) {
448 for (chan = ws->polled;
449 chan != NULL && chan->waitset == ws && chan->state == CHAN_POLLED;
453 if (ws->pending != NULL) {
457 if (chan->next == ws->polled) { // reached the start of the queue
463 return LIB_ERR_NO_EVENT;
467 * \brief Wait for (block) and dispatch next event on given waitset
469 * Wait until something happens, either activity on some channel, or deferred
470 * call, and then call the corresponding closure.
475 errval_t event_dispatch(struct waitset *ws)
477 struct event_closure closure;
478 errval_t err = get_next_event(ws, &closure);
479 if (err_is_fail(err)) {
483 assert(closure.handler != NULL);
484 closure.handler(closure.arg);
488 errval_t event_dispatch_debug(struct waitset *ws)
490 struct event_closure closure;
491 errval_t err = get_next_event_debug(ws, &closure, false);
492 if (err_is_fail(err)) {
496 assert(closure.handler != NULL);
497 // printf("%s: event_dispatch: %p: \n", disp_name(), closure.handler);
500 closure.handler(closure.arg);
505 * \brief check and dispatch next event on given waitset
507 * Check if there is any pending activity on some channel, or deferred
508 * call, and then call the corresponding closure.
510 * Do not wait! In case of no pending events, return err LIB_ERR_NO_EVENT.
514 errval_t event_dispatch_non_block(struct waitset *ws)
517 struct event_closure closure;
518 errval_t err = check_for_event(ws, &closure);
520 if (err_is_fail(err)) {
524 assert(closure.handler != NULL);
525 closure.handler(closure.arg);
532 * "Private" functions that are called only by the channel implementations
536 * \brief Initialise per-channel waitset state
538 * \param chan Channel state
539 * \param chantype Channel type
541 void waitset_chanstate_init(struct waitset_chanstate *chan,
542 enum ws_chantype chantype)
544 assert(chan != NULL);
545 chan->waitset = NULL;
546 chan->chantype = chantype;
547 chan->state = CHAN_UNREGISTERED;
549 chan->prev = chan->next = NULL;
554 * \brief Destroy previously-initialised per-channel waitset state
555 * \param chan Channel state
557 void waitset_chanstate_destroy(struct waitset_chanstate *chan)
559 assert(chan != NULL);
560 if (chan->waitset != NULL) {
561 errval_t err = waitset_chan_deregister(chan);
562 assert(err_is_ok(err)); // can't fail if registered
567 * \brief Register a closure to be called when a channel is triggered
569 * In the Future, call the closure on a thread associated with the waitset
570 * when the channel is triggered. Only one closure may be registered per
571 * channel state at any one time.
572 * This function must only be called when disabled.
575 * \param chan Waitset's per-channel state
576 * \param closure Event handler
578 errval_t waitset_chan_register_disabled(struct waitset *ws,
579 struct waitset_chanstate *chan,
580 struct event_closure closure)
582 if (chan->waitset != NULL) {
583 return LIB_ERR_CHAN_ALREADY_REGISTERED;
588 // channel must not already be registered!
589 assert_disabled(chan->next == NULL && chan->prev == NULL);
590 assert_disabled(chan->state == CHAN_UNREGISTERED);
592 // this is probably insane! :)
593 assert_disabled(closure.handler != NULL);
596 chan->closure = closure;
598 // enqueue this channel on the waitset's queue of idle channels
599 if (ws->idle == NULL) {
600 chan->next = chan->prev = chan;
603 chan->next = ws->idle;
604 chan->prev = chan->next->prev;
605 chan->next->prev = chan;
606 chan->prev->next = chan;
608 chan->state = CHAN_IDLE;
614 * \brief Register a closure on a channel, and mark the channel as polled
616 * In the Future, call the closure on a thread associated with the waitset
617 * when the channel is triggered. Only one closure may be registered per
618 * channel state at any one time. Additionally, mark the channel as polled.
619 * This function must only be called when disabled.
622 * \param chan Waitset's per-channel state
623 * \param closure Event handler
624 * \param disp Current dispatcher pointer
626 errval_t waitset_chan_register_polled_disabled(struct waitset *ws,
627 struct waitset_chanstate *chan,
628 struct event_closure closure,
629 dispatcher_handle_t handle)
631 if (chan->waitset != NULL) {
632 return LIB_ERR_CHAN_ALREADY_REGISTERED;
637 // channel must not already be registered!
638 assert_disabled(chan->next == NULL && chan->prev == NULL);
639 assert_disabled(chan->state == CHAN_UNREGISTERED);
642 chan->closure = closure;
644 // enqueue this channel on the waitset's queue of polled channels
645 if (ws->polled == NULL) {
646 chan->next = chan->prev = chan;
648 if (ws->waiting_threads != NULL && !ws->polling) {
649 // start a blocked thread polling
652 t = thread_unblock_one_disabled(handle, &ws->waiting_threads, NULL);
653 assert_disabled(t == NULL); // shouldn't see a remote thread: waitsets are per-dispatcher
656 chan->next = ws->polled;
657 chan->prev = chan->next->prev;
658 chan->next->prev = chan;
659 chan->prev->next = chan;
661 chan->state = CHAN_POLLED;
667 * \brief Register a closure to be called when a channel is triggered
669 * In the Future, call the closure on a thread associated with the waitset
670 * when the channel is triggered. Only one closure may be registered per
671 * channel state at any one time.
672 * This function must only be called when enabled.
675 * \param chan Waitset's per-channel state
676 * \param closure Event handler
678 errval_t waitset_chan_register(struct waitset *ws, struct waitset_chanstate *chan,
679 struct event_closure closure)
681 dispatcher_handle_t handle = disp_disable();
682 errval_t err = waitset_chan_register_disabled(ws, chan, closure);
688 * \brief Register a closure on a channel, and mark the channel as polled
690 * In the Future, call the closure on a thread associated with the waitset
691 * when the channel is triggered. Only one closure may be registered per
692 * channel state at any one time. Additionally, mark the channel as polled.
693 * This function must only be called when enabled. It is equivalent to
694 * calling waitset_chan_register() followed by waitset_chan_start_polling().
697 * \param chan Waitset's per-channel state
698 * \param closure Event handler
700 errval_t waitset_chan_register_polled(struct waitset *ws,
701 struct waitset_chanstate *chan,
702 struct event_closure closure)
704 dispatcher_handle_t handle = disp_disable();
705 errval_t err = waitset_chan_register_polled_disabled(ws, chan, closure, handle);
711 * \brief Mark an idle channel as polled
713 * The given channel will periodically have its poll function called.
714 * The channel must already be registered.
716 * \param chan Waitset's per-channel state
718 errval_t waitset_chan_start_polling(struct waitset_chanstate *chan)
720 errval_t err = SYS_ERR_OK;
722 dispatcher_handle_t handle = disp_disable();
724 struct waitset *ws = chan->waitset;
726 err = LIB_ERR_CHAN_NOT_REGISTERED;
730 assert(chan->state != CHAN_UNREGISTERED);
731 if (chan->state != CHAN_IDLE) {
732 goto out; // no-op if polled or pending
735 // remove from idle queue
736 if (chan->next == chan) {
737 assert(chan->prev == chan);
738 assert(ws->idle == chan);
741 chan->prev->next = chan->next;
742 chan->next->prev = chan->prev;
743 if (ws->idle == chan) {
744 ws->idle = chan->next;
748 // enqueue on polled queue
749 if (ws->polled == NULL) {
751 chan->next = chan->prev = chan;
752 if (ws->waiting_threads != NULL && !ws->polling) {
753 // start a blocked thread polling
756 t = thread_unblock_one_disabled(handle, &ws->waiting_threads, NULL);
757 assert(t == NULL); // shouldn't see a remote thread: waitsets are per-dispatcher
760 chan->next = ws->polled;
761 chan->prev = ws->polled->prev;
762 chan->next->prev = chan;
763 chan->prev->next = chan;
765 chan->state = CHAN_POLLED;
773 * \brief Stop polling the given channel, making it idle again
775 * \param chan Waitset's per-channel state
777 errval_t waitset_chan_stop_polling(struct waitset_chanstate *chan)
779 errval_t err = SYS_ERR_OK;
781 dispatcher_handle_t handle = disp_disable();
783 struct waitset *ws = chan->waitset;
785 err = LIB_ERR_CHAN_NOT_REGISTERED;
789 assert(chan->state != CHAN_UNREGISTERED);
790 if (chan->state != CHAN_POLLED) {
791 goto out; // no-op if idle or pending
794 // remove from polled queue
795 if (chan->next == chan) {
796 assert(chan->prev == chan);
797 assert(ws->polled == chan);
800 chan->prev->next = chan->next;
801 chan->next->prev = chan->prev;
802 if (ws->polled == chan) {
803 ws->polled = chan->next;
807 // enqueue on idle queue
808 if (ws->idle == NULL) {
810 chan->next = chan->prev = chan;
812 chan->next = ws->idle;
813 chan->prev = ws->idle->prev;
814 chan->next->prev = chan;
815 chan->prev->next = chan;
817 chan->state = CHAN_IDLE;
825 * \brief Cancel a previous callback registration
827 * Remove the registration for a callback on the given channel.
828 * This function must only be called when disabled.
830 * \param chan Waitset's per-channel state
832 errval_t waitset_chan_deregister_disabled(struct waitset_chanstate *chan)
834 assert_disabled(chan != NULL);
835 struct waitset *ws = chan->waitset;
837 return LIB_ERR_CHAN_NOT_REGISTERED;
840 // remove this channel from the queue in which it is waiting
841 chan->waitset = NULL;
842 assert_disabled(chan->next != NULL && chan->prev != NULL);
844 if (chan->next == chan) {
845 // only thing in the list: must be the head
846 assert_disabled(chan->prev == chan);
847 switch (chan->state) {
849 assert_disabled(chan == ws->idle);
854 assert_disabled(chan == ws->polled);
859 assert_disabled(chan == ws->pending);
864 assert_disabled(!"invalid channel state in deregister");
867 assert_disabled(chan->prev != chan);
868 chan->prev->next = chan->next;
869 chan->next->prev = chan->prev;
870 switch (chan->state) {
872 if (chan == ws->idle) {
873 ws->idle = chan->next;
878 if (chan == ws->polled) {
879 ws->polled = chan->next;
884 if (chan == ws->pending) {
885 ws->pending = chan->next;
890 assert_disabled(!"invalid channel state in deregister");
893 chan->state = CHAN_UNREGISTERED;
896 chan->prev = chan->next = NULL;
903 * \brief Cancel a previous callback registration
905 * Remove the registration for a callback on the given channel.
906 * This function must only be called when enabled.
908 * \param chan Waitset's per-channel state
910 errval_t waitset_chan_deregister(struct waitset_chanstate *chan)
912 dispatcher_handle_t handle = disp_disable();
913 errval_t err = waitset_chan_deregister_disabled(chan);
919 * \brief Migrate callback registrations to a new waitset.
921 * \param chan Old waitset's per-channel state to migrate
922 * \param new_ws New waitset to migrate to
924 void waitset_chan_migrate(struct waitset_chanstate *chan,
925 struct waitset *new_ws)
927 struct waitset *ws = chan->waitset;
929 // Only when registered
934 switch(chan->state) {
936 if (chan->next == chan) {
937 assert(chan->prev == chan);
938 assert(ws->idle == chan);
941 chan->prev->next = chan->next;
942 chan->next->prev = chan->prev;
943 if (ws->idle == chan) {
944 ws->idle = chan->next;
948 if (new_ws->idle == NULL) {
950 chan->next = chan->prev = chan;
952 chan->next = new_ws->idle;
953 chan->prev = new_ws->idle->prev;
954 chan->next->prev = chan;
955 chan->prev->next = chan;
960 if (chan->next == chan) {
961 assert(chan->prev == chan);
962 assert(ws->polled == chan);
965 chan->prev->next = chan->next;
966 chan->next->prev = chan->prev;
967 if (ws->polled == chan) {
968 ws->polled = chan->next;
972 if (new_ws->polled == NULL) {
973 new_ws->polled = chan;
974 chan->next = chan->prev = chan;
976 chan->next = new_ws->polled;
977 chan->prev = new_ws->polled->prev;
978 chan->next->prev = chan;
979 chan->prev->next = chan;
984 if (chan->next == chan) {
985 assert(chan->prev == chan);
986 assert(ws->pending == chan);
989 chan->prev->next = chan->next;
990 chan->next->prev = chan->prev;
991 if (ws->pending == chan) {
992 ws->pending = chan->next;
996 if (new_ws->pending == NULL) {
997 new_ws->pending = chan;
998 chan->next = chan->prev = chan;
1000 chan->next = new_ws->pending;
1001 chan->prev = new_ws->pending->prev;
1002 chan->next->prev = chan;
1003 chan->prev->next = chan;
1007 case CHAN_UNREGISTERED:
1012 // Remember new waitset association
1013 chan->waitset = new_ws;
1017 * \brief Trigger an event callback on a channel
1019 * Marks the given channel as having a pending event, causing some future call
1020 * to get_next_event() to return the registered closure.
1021 * This function must only be called when disabled.
1023 * \param chan Waitset's per-channel state
1024 * \param disp Current dispatcher pointer
1026 errval_t waitset_chan_trigger_disabled(struct waitset_chanstate *chan,
1027 dispatcher_handle_t handle)
1029 assert_disabled(chan != NULL);
1030 struct waitset *ws = chan->waitset;
1031 assert_disabled(ws != NULL);
1032 assert_disabled(chan->prev != NULL && chan->next != NULL);
1034 // no-op if already pending
1035 if (chan->state == CHAN_PENDING) {
1039 // remove from previous queue (either idle or polled)
1040 if (chan->next == chan) {
1041 assert_disabled(chan->prev == chan);
1042 if (chan->state == CHAN_IDLE) {
1043 assert_disabled(ws->idle == chan);
1046 assert_disabled(chan->state == CHAN_POLLED);
1047 assert_disabled(ws->polled == chan);
1051 chan->prev->next = chan->next;
1052 chan->next->prev = chan->prev;
1053 if (chan->state == CHAN_IDLE) {
1054 if (ws->idle == chan) {
1055 ws->idle = chan->next;
1058 assert_disabled(chan->state == CHAN_POLLED);
1059 if (ws->polled == chan) {
1060 ws->polled = chan->next;
1065 // is there a thread blocked on this waitset? if so, awaken it with the event
1066 if (ws->waiting_threads != NULL) {
1067 chan->waitset = NULL;
1069 chan->prev = chan->next = NULL;
1071 chan->state = CHAN_UNREGISTERED;
1073 t = thread_unblock_one_disabled(handle, &ws->waiting_threads, chan);
1074 assert_disabled(t == NULL);
1078 // else mark channel pending and move to end of pending event queue
1079 chan->state = CHAN_PENDING;
1080 if (ws->pending == NULL) {
1082 chan->next = chan->prev = chan;
1084 chan->next = ws->pending;
1085 chan->prev = ws->pending->prev;
1086 assert_disabled(ws->pending->next != NULL);
1087 assert_disabled(ws->pending->prev != NULL);
1088 assert_disabled(chan->prev != NULL);
1089 chan->next->prev = chan;
1090 chan->prev->next = chan;
1097 * \brief Trigger an event callback on a channel
1099 * Marks the given channel as having a pending event, causing some future call
1100 * to get_next_event() to return the registered closure.
1101 * This function must only be called when enabled.
1103 * \param chan Waitset's per-channel state
1104 * \param disp Current dispatcher pointer
1106 errval_t waitset_chan_trigger(struct waitset_chanstate *chan)
1108 dispatcher_handle_t disp = disp_disable();
1109 errval_t err = waitset_chan_trigger_disabled(chan, disp);
1115 * \brief Trigger a specific event callback on an unregistered channel
1117 * This function is equivalent to waitset_chan_register_disabled() immediately
1118 * followed by waitset_chan_trigger_disabled(), but avoids unneccessary queue
1119 * manipulation. This function must only be called when disabled.
1122 * \param chan Waitset's per-channel state
1123 * \param closure Event handler
1124 * \param disp Current dispatcher pointer
1126 errval_t waitset_chan_trigger_closure_disabled(struct waitset *ws,
1127 struct waitset_chanstate *chan,
1128 struct event_closure closure,
1129 dispatcher_handle_t handle)
1131 assert_disabled(chan != NULL);
1132 assert_disabled(ws != NULL);
1134 // check if already registered
1135 if (chan->waitset != NULL || chan->state != CHAN_UNREGISTERED) {
1136 return LIB_ERR_CHAN_ALREADY_REGISTERED;
1139 assert_disabled(chan->prev == NULL && chan->next == NULL);
1142 chan->closure = closure;
1144 // is there a thread blocked on this waitset? if so, awaken it with the event
1145 if (ws->waiting_threads != NULL) {
1147 t = thread_unblock_one_disabled(handle, &ws->waiting_threads, chan);
1148 assert_disabled(t == NULL);
1152 // mark channel pending and place on end of pending event queue
1154 chan->state = CHAN_PENDING;
1155 if (ws->pending == NULL) {
1157 chan->next = chan->prev = chan;
1159 chan->next = ws->pending;
1160 chan->prev = ws->pending->prev;
1161 chan->next->prev = chan;
1162 chan->prev->next = chan;
1165 assert(ws->pending->prev != NULL && ws->pending->next != NULL);
1172 * \brief Trigger a specific event callback on an unregistered channel
1174 * This function is equivalent to waitset_chan_register()
1175 * followed by waitset_chan_trigger(), but avoids unneccessary queue
1176 * manipulation. This function must only be called when enabled.
1179 * \param chan Waitset's per-channel state
1180 * \param closure Event handler
1182 errval_t waitset_chan_trigger_closure(struct waitset *ws,
1183 struct waitset_chanstate *chan,
1184 struct event_closure closure)
1186 dispatcher_handle_t disp = disp_disable();
1187 errval_t err = waitset_chan_trigger_closure_disabled(ws, chan, closure, disp);