Add smp_cond_load_relaxed_timewait(), which extends smp_cond_load_relaxed() to allow waiting for a finite duration. Additional parameters allow for timeout checks and a measure of how much slack the caller can tolerate in the timeout. The waiting is done via the usual cpu_relax() spin-wait around the condition variable with periodic evaluation of the time-check. And, optionally with architectural primitives that allow for cheaper mechanisms such as waiting on a cacheline with out-of-band timeout. Cc: Arnd Bergmann <arnd@xxxxxxxx> Cc: Will Deacon <will@xxxxxxxxxx> Cc: Catalin Marinas <catalin.marinas@xxxxxxx> Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx> Cc: linux-arch@xxxxxxxxxxxxxxx Signed-off-by: Ankur Arora <ankur.a.arora@xxxxxxxxxx> --- include/asm-generic/barrier.h | 95 +++++++++++++++++++++++++++++++++++ 1 file changed, 95 insertions(+) diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h index d4f581c1e21d..d33c2701c9ee 100644 --- a/include/asm-generic/barrier.h +++ b/include/asm-generic/barrier.h @@ -273,6 +273,101 @@ do { \ }) #endif +#ifndef SMP_TIMEWAIT_SPIN_BASE +#define SMP_TIMEWAIT_SPIN_BASE 16 +#endif + +/* + * Policy handler that adjusts the number of times we spin or + * wait for cacheline to change before evaluating the time-expr. + * + * The generic version only supports spinning. + */ +static inline u64 ___smp_cond_spinwait(u64 now, u64 prev, u64 end, + u32 *spin, bool *wait, u64 slack) +{ + if (now >= end) + return 0; + + *spin = SMP_TIMEWAIT_SPIN_BASE; + *wait = false; + return now; +} + +#ifndef __smp_cond_policy +#define __smp_cond_policy ___smp_cond_spinwait +#endif + +/* + * Non-spin primitive that allows waiting for stores to an address, + * with support for a timeout. This works in conjunction with an + * architecturally defined policy. + */ +#ifndef __smp_timewait_store +#define __smp_timewait_store(ptr, val) do { } while (0) +#endif + +#ifndef __smp_cond_load_relaxed_timewait +#define __smp_cond_load_relaxed_timewait(ptr, cond_expr, policy, \ + time_expr, time_end, \ + slack) ({ \ + typeof(ptr) __PTR = (ptr); \ + __unqual_scalar_typeof(*ptr) VAL; \ + u32 __n = 0, __spin = SMP_TIMEWAIT_SPIN_BASE; \ + u64 __prev = 0, __end = (time_end); \ + u64 __slack = slack; \ + bool __wait = false; \ + \ + for (;;) { \ + VAL = READ_ONCE(*__PTR); \ + if (cond_expr) \ + break; \ + cpu_relax(); \ + if (++__n < __spin) \ + continue; \ + if (!(__prev = policy((time_expr), __prev, __end, \ + &__spin, &__wait, __slack))) \ + break; \ + if (__wait) \ + __smp_timewait_store(__PTR, VAL); \ + __n = 0; \ + } \ + (typeof(*ptr))VAL; \ +}) +#endif + +#define __check_time_types(type, a, b) \ + (__same_type(typeof(a), type) && \ + __same_type(typeof(b), type)) + +/** + * smp_cond_load_relaxed_timewait() - (Spin) wait for cond with no ordering + * guarantees until a timeout expires. + * @ptr: pointer to the variable to wait on + * @cond: boolean expression to wait for + * @time_expr: monotonic expression that evaluates to the current time + * @time_end: end time, compared against time_expr + * @slack: how much timer overshoot can the caller tolerate? + * Useful for when we go into wait states. A value of 0 indicates a high + * tolerance. + * + * Note that all times (time_expr, time_end, and slack) are in microseconds, + * with no mandated precision. + * + * Equivalent to using READ_ONCE() on the condition variable. + */ +#define smp_cond_load_relaxed_timewait(ptr, cond_expr, time_expr, \ + time_end, slack) ({ \ + __unqual_scalar_typeof(*ptr) _val; \ + BUILD_BUG_ON_MSG(!__check_time_types(u64, time_expr, time_end), \ + "incompatible time units"); \ + _val = __smp_cond_load_relaxed_timewait(ptr, cond_expr, \ + __smp_cond_policy, \ + time_expr, time_end, \ + slack); \ + (typeof(*ptr))_val; \ +}) + /* * pmem_wmb() ensures that all stores for which the modification * are written to persistent storage by preceding instructions have -- 2.43.5