From 7c313ac654a6a459bfbb77fef79515a969475769 Mon Sep 17 00:00:00 2001 From: "John F. Carr" Date: Wed, 22 Jan 2025 14:55:46 -0500 Subject: [PATCH] Make futex and condition variable waiting interfaces more similar. Also, move some code out of header files. The implementations already require function calls and system calls. Inlining them adds little value. --- runtime/CMakeLists.txt | 1 + runtime/condvar.c | 159 ++++++++++++++++++++++++++++++++++++ runtime/global.c | 4 +- runtime/global.h | 16 ++-- runtime/init.c | 2 + runtime/mutex.h | 46 ++++++++++- runtime/worker_coord.h | 180 ++++++++++------------------------------- runtime/worker_sleep.h | 2 +- 8 files changed, 260 insertions(+), 150 deletions(-) create mode 100644 runtime/condvar.c diff --git a/runtime/CMakeLists.txt b/runtime/CMakeLists.txt index 6ea86d05..decdeac7 100644 --- a/runtime/CMakeLists.txt +++ b/runtime/CMakeLists.txt @@ -16,6 +16,7 @@ set(CHEETAH_SOURCES personality.c sched_stats.c scheduler.c + condvar.c ) set(CHEETAH_ABI_SOURCE diff --git a/runtime/condvar.c b/runtime/condvar.c new file mode 100644 index 00000000..c58bcfcb --- /dev/null +++ b/runtime/condvar.c @@ -0,0 +1,159 @@ +#include +#include + +#include "mutex.h" + +#if defined __FreeBSD__ || defined __OpenBSD__ || defined __APPLE__ +#define HAVE_ERRC 1 +#endif + +#if USE_FUTEX + +#include +#include // INT_MAX + +#ifdef __linux__ +#include +#include +#include +#include +#define WAIT FUTEX_WAIT_PRIVATE +#define WAKE FUTEX_WAKE_PRIVATE +#endif + +#ifdef __FreeBSD__ +#include +#include +#define WAIT UMTX_OP_WAIT +#define WAKE UMTX_OP_WAKE_PRIVATE +#endif + +#ifdef __OpenBSD__ +// This is not tested. +#include +#include +#define WAIT FUTEX_WAIT +#define WAKE FUTEX_WAKE +#endif + +// Convenience wrapper for futex syscall. +// In this context only success (true) or failure needs to be returned. +// Linux syscall() returns long. Here the values fit in an int. +// OpenBSD syscall() returns int. +// BSD _umtx_op returns int. +static inline bool futex(futex_t *obj, int futex_op, futex_t val) { +#if defined __linux__ + return syscall(SYS_futex, obj, futex_op, val, NULL, NULL, 0) >= 0; +#elif defined __FreeBSD__ + return _umtx_op(obj, futex_op, 0, NULL, NULL) >= 0; +#elif defined __OpenBSD__ + // This is not tested. + return futex(obj, futex_op, 0, NULL, NULL) >= 0; +#else +// TODO: Private interface __ulock_wait on Mac OS? +// TODO: C++20 std::atomic<>::wait, notify_one, notify_all +#error "no futex implementation" + return false; +#endif +} + +// Wait for the object to be unequal to the value. +// Acquire here pairs with release in cond_post. +void cond_wait(futex_t *obj, futex_val_t val) { + while (atomic_load_explicit(obj, memory_order_acquire) == val) { + if (futex(obj, WAIT, val)) { + // Formally the futex operation does not include a fence. + atomic_thread_fence(memory_order_acquire); + break; + } + if (errno != EAGAIN) + err(EXIT_FAILURE, "futex(FUTEX_WAIT)"); + } +} + +// Set the futex pointed to by `obj` to `val`, and wake up one +// thread waiting on that futex. +void cond_post(futex_t *obj, futex_val_t val) { + atomic_store_explicit(obj, val, memory_order_release); + if (!futex(obj, WAKE, 1)) + err(EXIT_FAILURE, "futex(FUTEX_WAKE)"); +} + +// Set the futex pointed to by `obj` to `val`, and wake up all +// threads waiting on that futex. +void cond_broadcast(futex_t *obj, futex_val_t val) { + atomic_store_explicit(obj, val, memory_order_release); + if (!futex(obj, WAKE, INT_MAX)) + err(EXIT_FAILURE, "futex(FUTEX_WAKE)"); +} + +void cond_wake_some(futex_t *obj, int count) { + if (!futex(obj, WAKE, count)) + err(EXIT_FAILURE, "futex(FUTEX_WAKE)"); +} + +#else // begin pthread implementation + +#include + +#if HAVE_ERRC +#define ERRCHK(MSG) \ + if (__builtin_expect(error, 0)) errc(EXIT_FAILURE, error, MSG) +#else +#define ERRCHK(MSG) \ + if (__builtin_expect(error, 0)) errx(EXIT_FAILURE, MSG " returned %d", error) +#endif + +void cond_wait(futex_t *obj, futex_val_t val, + pthread_cond_t *cond, pthread_mutex_t *mutex) { + int error; + // No fast path check. This is only called if *obj == val. + error = pthread_mutex_lock(mutex); + ERRCHK("pthread_mutex_lock"); + while (atomic_load_explicit(obj, memory_order_acquire) == val) { + error = pthread_cond_wait(cond, mutex); + ERRCHK("pthread_cond_wait"); + } + error = pthread_mutex_unlock(mutex); + ERRCHK("pthread_mutex_unlock"); +} + +void cond_post(futex_t *obj, futex_val_t val, + pthread_cond_t *cond, pthread_mutex_t *mutex) { + int error; + + error = pthread_mutex_lock(mutex); + ERRCHK("pthread_mutex_lock"); + + atomic_store_explicit(obj, val, memory_order_release); + error = pthread_cond_signal(cond); + ERRCHK("pthread_cond_signal"); + error = pthread_mutex_unlock(mutex); + ERRCHK("pthread_mutex_unlock"); +} + +void cond_broadcast(futex_t *obj, futex_val_t val, + pthread_cond_t *cond, pthread_mutex_t *mutex) { + int error; + + error = pthread_mutex_lock(mutex); + ERRCHK("pthread_mutex_lock"); + atomic_store_explicit(obj, val, memory_order_release); + error = pthread_cond_broadcast(cond); + ERRCHK("pthread_mutex_broadcast"); + error = pthread_mutex_unlock(mutex); + ERRCHK("pthread_mutex_unlock"); +} + +void cond_wake_some_locked(futex_t *obj, futex_val_t val, + pthread_cond_t *cond, int count) { + int error; + + atomic_store_explicit(obj, val, memory_order_release); + while (count-- > 0) { + error = pthread_cond_signal(cond); + ERRCHK("pthread_cond_signal"); + } +} + +#endif // USE_FUTEX diff --git a/runtime/global.c b/runtime/global.c index 45097ae7..b7e09486 100644 --- a/runtime/global.c +++ b/runtime/global.c @@ -57,14 +57,16 @@ static global_state *global_state_allocate() { cilk_mutex_init(&g->index_lock); cilk_mutex_init(&g->print_lock); - atomic_store_explicit(&g->cilkified_futex, 0, memory_order_relaxed); + atomic_store_explicit(&g->cilkified, 0, memory_order_relaxed); +#if !USE_FUTEX // TODO: Convert to cilk_* equivalents pthread_mutex_init(&g->cilkified_lock, NULL); pthread_cond_init(&g->cilkified_cond_var, NULL); pthread_mutex_init(&g->disengaged_lock, NULL); pthread_cond_init(&g->disengaged_cond_var, NULL); +#endif return g; } diff --git a/runtime/global.h b/runtime/global.h index 6340685b..370d4079 100644 --- a/runtime/global.h +++ b/runtime/global.h @@ -69,15 +69,14 @@ struct global_state { // These fields are shared between the boss thread and a couple workers. - // NOTE: We can probably update the runtime system so that, when it uses - // cilkified_futex, it does not also use the cilkified field. But the - // cilkified field is helpful for debugging, and it seems unlikely that this - // optimization would improve performance. - _Atomic uint32_t cilkified_futex __attribute__((aligned(CILK_CACHE_LINE))); - atomic_bool cilkified; +#if USE_FUTEX + futex_t cilkified __attribute__((aligned(CILK_CACHE_LINE))); +#else + futex_t cilkified; pthread_mutex_t cilkified_lock; pthread_cond_t cilkified_cond_var; +#endif // These fields are shared among all workers in the work-stealing loop. @@ -98,10 +97,11 @@ struct global_state { #define GET_SENTINEL(D) ((D) & 0xffffffff) #define DISENGAGED_SENTINEL(A, B) (((uint64_t)(A) << 32) | (uint32_t)(B)) - _Atomic uint32_t disengaged_thieves_futex __attribute__((aligned(CILK_CACHE_LINE))); - + futex_t disengaged_thieves __attribute__((aligned(CILK_CACHE_LINE))); +#if !USE_FUTEX pthread_mutex_t disengaged_lock; pthread_cond_t disengaged_cond_var; +#endif cilk_mutex print_lock; // global lock for printing messages diff --git a/runtime/init.c b/runtime/init.c index c2fb865d..d8f43aad 100644 --- a/runtime/init.c +++ b/runtime/init.c @@ -659,6 +659,7 @@ static void global_state_deinit(global_state *g) { cilk_internal_malloc_global_destroy(g); // internal malloc last cilk_mutex_destroy(&(g->print_lock)); cilk_mutex_destroy(&(g->index_lock)); +#if !USE_FUTEX // TODO: Convert to cilk_* equivalents pthread_mutex_destroy(&g->cilkified_lock); pthread_cond_destroy(&g->cilkified_cond_var); @@ -666,6 +667,7 @@ static void global_state_deinit(global_state *g) { /* pthread_cond_destroy(&g->start_thieves_cond_var); */ pthread_mutex_destroy(&g->disengaged_lock); pthread_cond_destroy(&g->disengaged_cond_var); +#endif free(g->worker_args); g->worker_args = NULL; free(g->workers); diff --git a/runtime/mutex.h b/runtime/mutex.h index c2ecb001..3a78219f 100644 --- a/runtime/mutex.h +++ b/runtime/mutex.h @@ -9,9 +9,28 @@ typedef union cilk_mutex cilk_mutex; #include #include #include +#include #include "rts-config.h" +// Linux uses uint32_t. OpenBSD copies Linux. +// FreeBSD uses long. +// Other systems don't use the futex interface and can pick either. + +#ifdef __FreeBSD__ +typedef long futex_val_t; +#define FUTEX_MAX LONG_MAX +#define USE_FUTEX 1 +#else +typedef uint32_t futex_val_t; +#define FUTEX_MAX 0x7fffffff +#if defined __linux__ || defined __OpenBSD__ +#define USE_FUTEX 1 +#endif +#endif + +typedef _Atomic futex_val_t futex_t; + #ifndef __APPLE__ #define USE_SPINLOCK 1 #endif @@ -85,4 +104,29 @@ static inline void cilk_mutex_destroy(cilk_mutex *lock) { pthread_mutex_destroy(&(lock->posix)); #endif } -#endif + +#if USE_FUTEX +// Wait for *obj to be unequal to val. +extern void cond_wait(futex_t *obj, futex_val_t val); +// Set *obj = val and wake up one waiter. +extern void cond_post(futex_t *obj, futex_val_t val); +// Set *obj = val and wake up all waiters. +extern void cond_broadcast(futex_t *obj, futex_val_t val); +// Wake up COUNT waiters. The value has already been updated. +extern void cond_wake_some(futex_t *obj, int count); +#else +extern void cond_wait(futex_t *obj, futex_val_t val, + pthread_cond_t *cond, + pthread_mutex_t *mutex); +extern void cond_post(futex_t *obj, futex_val_t val, + pthread_cond_t *cond, + pthread_mutex_t *mutex); +extern void cond_broadcast(futex_t *obj, futex_val_t val, + pthread_cond_t *cond, + pthread_mutex_t *mutex); +// This function is called with the lock held. +extern void cond_wake_some_locked(futex_t *obj, futex_val_t val, + pthread_cond_t *cond, int count); +#endif // USE_FUTEX +#endif // _CILK_MUTEX_H + diff --git a/runtime/worker_coord.h b/runtime/worker_coord.h index c27f573d..2699241f 100644 --- a/runtime/worker_coord.h +++ b/runtime/worker_coord.h @@ -7,68 +7,10 @@ #include #include #include - -#ifdef __linux__ -#include -#include -#include -#include -#endif +#include #include "global.h" - -#define USER_USE_FUTEX 1 -#ifdef __linux__ -#define USE_FUTEX USER_USE_FUTEX -#else -#define USE_FUTEX 0 -#endif - -#if USE_FUTEX -//========================================================= -// Primitive futex operations. -//========================================================= -#define errExit(msg) \ - do { \ - perror(msg); \ - exit(EXIT_FAILURE); \ - } while (false) - -// Convenience wrapper for futex syscall. -static inline long futex(_Atomic uint32_t *uaddr, int futex_op, uint32_t val, - const struct timespec *timeout, uint32_t *uaddr2, - uint32_t val3) { - return syscall(SYS_futex, uaddr, futex_op, val, timeout, uaddr2, val3); -} - -// Wait for the futex pointed to by `futexp` to become 1. -static inline void fwait(_Atomic uint32_t *futexp) { - // We don't worry about spurious wakeups here, since we ensure that all - // calls to fwait are contained in their own loops that effectively check - // for spurious wakeups. - long s = futex(futexp, FUTEX_WAIT_PRIVATE, 0, NULL, NULL, 0); - if (__builtin_expect(s == -1 && errno != EAGAIN, false)) - errExit("futex-FUTEX_WAIT"); -} - -// Set the futex pointed to by `futexp` to 1, and wake up 1 thread waiting on -// that futex. -static inline void fpost(_Atomic uint32_t *futexp) { - atomic_store_explicit(futexp, 1, memory_order_release); - long s = futex(futexp, FUTEX_WAKE_PRIVATE, 1, NULL, NULL, 0); - if (s == -1) - errExit("futex-FUTEX_WAKE"); -} - -// Set the futex pointed to by `futexp` to 1, and wake up all threads waiting on -// that futex. -static inline void fbroadcast(_Atomic uint32_t *futexp) { - atomic_store_explicit(futexp, 1, memory_order_release); - long s = futex(futexp, FUTEX_WAKE_PRIVATE, INT_MAX, NULL, NULL, 0); - if (s == -1) - errExit("futex-FUTEX_WAKE"); -} -#endif +#include "mutex.h" //========================================================= // Common internal interface for managing execution of workers. @@ -98,22 +40,15 @@ __attribute__((always_inline)) static inline void busy_pause(void) { static inline void set_cilkified(global_state *g) { // Set g->cilkified = 1, indicating that the execution is now cilkified. atomic_store_explicit(&g->cilkified, 1, memory_order_release); -#if USE_FUTEX - atomic_store_explicit(&g->cilkified_futex, 0, memory_order_release); -#endif } // Mark the computation as no longer cilkified and signal the thread that // originally cilkified the execution. static inline void signal_uncilkified(global_state *g) { #if USE_FUTEX - atomic_store_explicit(&g->cilkified, 0, memory_order_release); - fpost(&g->cilkified_futex); + cond_post(&g->cilkified, 0); #else - pthread_mutex_lock(&(g->cilkified_lock)); - atomic_store_explicit(&g->cilkified, 0, memory_order_release); - pthread_cond_signal(&g->cilkified_cond_var); - pthread_mutex_unlock(&(g->cilkified_lock)); + cond_post(&g->cilkified, 0, &g->cilkified_cond_var, &g->cilkified_lock); #endif } @@ -121,30 +56,16 @@ static inline void signal_uncilkified(global_state *g) { // region. static inline void wait_while_cilkified(global_state *g) { unsigned int fail = 0; - while (fail++ < BUSY_LOOP_SPIN) { + do { if (!atomic_load_explicit(&g->cilkified, memory_order_acquire)) { return; } busy_pause(); - } + } while (fail++ < BUSY_LOOP_SPIN); #if USE_FUTEX - while (atomic_load_explicit(&g->cilkified, memory_order_acquire)) { - fwait(&g->cilkified_futex); - } + cond_wait(&g->cilkified, 1); // Wait as long as cilkified == 1. #else - // TODO: Convert pthread_mutex_lock, pthread_mutex_unlock, and - // pthread_cond_wait to cilk_* equivalents. - pthread_mutex_lock(&(g->cilkified_lock)); - - // There may be a *very unlikely* scenario where the Cilk computation has - // already been completed before even starting to wait. In that case, do - // not wait and continue directly. Also handle spurious wakeups with a - // 'while' instead of an 'if'. - while (atomic_load_explicit(&g->cilkified, memory_order_acquire)) { - pthread_cond_wait(&(g->cilkified_cond_var), &(g->cilkified_lock)); - } - - pthread_mutex_unlock(&(g->cilkified_lock)); + cond_wait(&g->cilkified, 1, &g->cilkified_cond_var, &g->cilkified_lock); #endif } @@ -157,8 +78,7 @@ static inline void reset_disengaged_var(global_state *g) { #if !USE_FUTEX pthread_mutex_lock(&g->disengaged_lock); #endif - atomic_store_explicit(&g->disengaged_thieves_futex, 0, - memory_order_release); + atomic_store_explicit(&g->disengaged_thieves, 0, memory_order_release); #if !USE_FUTEX pthread_mutex_unlock(&g->disengaged_lock); #endif @@ -177,56 +97,51 @@ static inline void request_more_thieves(global_state *g, uint32_t count) { // This step synchronizes with concurrent calls to request_more_thieves and // concurrent calls to try_to_disengage_thief. while (true) { - uint32_t disengaged_thieves_futex = atomic_load_explicit( - &g->disengaged_thieves_futex, memory_order_acquire); + futex_val_t disengaged_thieves = atomic_load_explicit( + &g->disengaged_thieves, memory_order_acquire); - int32_t max_to_wake = max_requests - disengaged_thieves_futex; + int32_t max_to_wake = max_requests - disengaged_thieves; if (max_to_wake <= 0) return; uint64_t to_wake = max_to_wake < (int32_t)count ? max_to_wake : count; if (atomic_compare_exchange_strong_explicit( - &g->disengaged_thieves_futex, &disengaged_thieves_futex, - disengaged_thieves_futex + to_wake, memory_order_release, + &g->disengaged_thieves, &disengaged_thieves, + disengaged_thieves + to_wake, memory_order_release, memory_order_relaxed)) { // We successfully updated the futex. Wake the thief threads // waiting on this futex. - long s = futex(&g->disengaged_thieves_futex, FUTEX_WAKE_PRIVATE, - to_wake, NULL, NULL, 0); - if (s == -1) - errExit("futex-FUTEX_WAKE"); + cond_wake_some(&g->disengaged_thieves, to_wake); return; } } #else pthread_mutex_lock(&g->disengaged_lock); - uint32_t disengaged_thieves_futex = atomic_load_explicit( - &g->disengaged_thieves_futex, memory_order_acquire); + uint32_t disengaged_thieves = atomic_load_explicit( + &g->disengaged_thieves, memory_order_acquire); - int32_t max_to_wake = max_requests - disengaged_thieves_futex; + int32_t max_to_wake = max_requests - disengaged_thieves; if (max_to_wake <= 0) { pthread_mutex_unlock(&g->disengaged_lock); return; } uint32_t to_wake = max_to_wake < (int32_t)count ? max_to_wake : count; - atomic_store_explicit(&g->disengaged_thieves_futex, - disengaged_thieves_futex + to_wake, - memory_order_release); - while (to_wake-- > 0) { - pthread_cond_signal(&g->disengaged_cond_var); - } + cond_wake_some_locked(&g->disengaged_thieves, + disengaged_thieves + to_wake, + &g->cilkified_cond_var, to_wake); pthread_mutex_unlock(&g->disengaged_lock); #endif } #if USE_FUTEX -static inline uint32_t thief_disengage_futex(_Atomic uint32_t *futexp) { +static inline uint32_t thief_disengage_futex(futex_t *cilkified, + futex_t *futexp) { // This step synchronizes with calls to request_more_thieves. while (true) { // Decrement the futex when woken up. The loop and compare-exchange are // designed to handle cases where multiple threads waiting on the futex // were woken up and where there may be spurious wakeups. - uint32_t val; + futex_val_t val; while ((val = atomic_load_explicit(futexp, memory_order_relaxed)) > 0) { if (atomic_compare_exchange_weak_explicit(futexp, &val, val - 1, memory_order_release, @@ -236,10 +151,13 @@ static inline uint32_t thief_disengage_futex(_Atomic uint32_t *futexp) { busy_loop_pause(); } - // Wait on the futex. - long s = futex(futexp, FUTEX_WAIT_PRIVATE, 0, NULL, NULL, 0); - if (__builtin_expect(s == -1 && errno != EAGAIN, false)) - errExit("futex-FUTEX_WAIT"); + // The futex was 0 when the loop above terminated. + // Possibly it is 0 because Cilk is over. + // XXX Is there still a race here? + if (!atomic_load_explicit(cilkified, memory_order_acquire)) + return 0; + + cond_wait(futexp, 0); } } #else @@ -261,9 +179,9 @@ static inline uint32_t thief_disengage_cond_var(_Atomic uint32_t *count, #endif static inline uint32_t thief_disengage(global_state *g) { #if USE_FUTEX - return thief_disengage_futex(&g->disengaged_thieves_futex); + return thief_disengage_futex(&g->cilkified, &g->disengaged_thieves); #else - return thief_disengage_cond_var(&g->disengaged_thieves_futex, + return thief_disengage_cond_var(&g->disengaged_thieves, &g->disengaged_lock, &g->disengaged_cond_var); #endif @@ -272,18 +190,10 @@ static inline uint32_t thief_disengage(global_state *g) { // Signal to all disengaged thief threads to resume work-stealing. static inline void wake_all_disengaged(global_state *g) { #if USE_FUTEX - atomic_store_explicit(&g->disengaged_thieves_futex, INT_MAX, - memory_order_release); - long s = futex(&g->disengaged_thieves_futex, FUTEX_WAKE_PRIVATE, INT_MAX, - NULL, NULL, 0); - if (s == -1) - errExit("futex-FUTEX_WAKE"); + cond_broadcast(&g->disengaged_thieves, FUTEX_MAX); #else - pthread_mutex_lock(&g->disengaged_lock); - atomic_store_explicit(&g->disengaged_thieves_futex, INT_MAX, - memory_order_release); - pthread_cond_broadcast(&g->disengaged_cond_var); - pthread_mutex_unlock(&g->disengaged_lock); + cond_broadcast(&g->disengaged_thieves, FUTEX_MAX, + &g->disengaged_cond_var, &g->disengaged_lock); #endif } @@ -304,8 +214,8 @@ static inline uint32_t thief_wait(global_state *g) { // already, update the global state to indicate that this worker is engaged in // work stealing. static inline bool thief_should_wait(global_state *g) { - _Atomic uint32_t *futexp = &g->disengaged_thieves_futex; - uint32_t val = atomic_load_explicit(futexp, memory_order_relaxed); + futex_t *futexp = &g->disengaged_thieves; + futex_val_t val = atomic_load_explicit(futexp, memory_order_relaxed); #if USE_FUTEX while (val > 0) { if (atomic_compare_exchange_weak_explicit(futexp, &val, val - 1, @@ -337,18 +247,10 @@ static inline bool thief_should_wait(global_state *g) { // g->terminate == 1). static inline void wake_thieves(global_state *g) { #if USE_FUTEX - atomic_store_explicit(&g->disengaged_thieves_futex, g->nworkers - 1, - memory_order_release); - long s = futex(&g->disengaged_thieves_futex, FUTEX_WAKE_PRIVATE, INT_MAX, - NULL, NULL, 0); - if (s == -1) - errExit("futex-FUTEX_WAKE"); + cond_broadcast(&g->disengaged_thieves, g->nworkers - 1); #else - pthread_mutex_lock(&g->disengaged_lock); - atomic_store_explicit(&g->disengaged_thieves_futex, g->nworkers - 1, - memory_order_release); - pthread_cond_broadcast(&g->disengaged_cond_var); - pthread_mutex_unlock(&g->disengaged_lock); + cond_broadcast(&g->disengaged_thieves, g->nworkers - 1, + &g->disengaged_cond_var, &g->disengaged_lock); #endif } diff --git a/runtime/worker_sleep.h b/runtime/worker_sleep.h index 9099af93..c6fd62ed 100644 --- a/runtime/worker_sleep.h +++ b/runtime/worker_sleep.h @@ -312,7 +312,7 @@ maybe_reengage_workers(global_state *const rts, worker_id self, if (request == 0 && counts.sentinels == 0 && counts.active < (int32_t)nworkers) { int32_t current_request = atomic_load_explicit( - &rts->disengaged_thieves_futex, memory_order_relaxed); + &rts->disengaged_thieves, memory_order_relaxed); if (current_request < ((counts.active + 3) / 4)) { request = ((counts.active + 3) / 4) - current_request; WHEN_SCHED_STATS(w->l->stats.onesen_rqsts += request);