30 #ifndef _GLIBCXX_ATOMIC_WAIT_H
31 #define _GLIBCXX_ATOMIC_WAIT_H 1
33 #ifdef _GLIBCXX_SYSHDR
34 #pragma GCC system_header
39 #if __glibcxx_atomic_wait
42 #include <bits/gthr.h>
45 #ifdef _GLIBCXX_HAVE_LINUX_FUTEX
55 namespace std _GLIBCXX_VISIBILITY(default)
57 _GLIBCXX_BEGIN_NAMESPACE_VERSION
60 #ifdef _GLIBCXX_HAVE_LINUX_FUTEX
61 #define _GLIBCXX_HAVE_PLATFORM_WAIT 1
62 using __platform_wait_t = int;
63 inline constexpr
size_t __platform_wait_alignment = 4;
69 # if ATOMIC_LONG_LOCK_FREE == 2
70 using __platform_wait_t =
unsigned long;
72 using __platform_wait_t =
unsigned int;
74 inline constexpr
size_t __platform_wait_alignment
75 = __alignof__(__platform_wait_t);
79 template<
typename _Tp>
80 inline constexpr
bool __platform_wait_uses_type
81 #ifdef _GLIBCXX_HAVE_PLATFORM_WAIT
83 && ((
sizeof(_Tp) ==
sizeof(__detail::__platform_wait_t))
84 && (
alignof(_Tp*) >= __detail::__platform_wait_alignment));
91 #ifdef _GLIBCXX_HAVE_LINUX_FUTEX
92 enum class __futex_wait_flags : int
94 #ifdef _GLIBCXX_HAVE_LINUX_FUTEX_PRIVATE
103 __wait_private = __wait | __private_flag,
104 __wake_private = __wake | __private_flag,
105 __wait_bitset_private = __wait_bitset | __private_flag,
106 __wake_bitset_private = __wake_bitset | __private_flag,
107 __bitset_match_any = -1
110 template<
typename _Tp>
112 __platform_wait(
const _Tp* __addr, __platform_wait_t __val) noexcept
114 auto __e = syscall (SYS_futex,
static_cast<const void*
>(__addr),
115 static_cast<int>(__futex_wait_flags::__wait_private),
117 if (!__e || errno == EAGAIN)
120 __throw_system_error(errno);
123 template<
typename _Tp>
125 __platform_notify(
const _Tp* __addr,
bool __all) noexcept
127 syscall (SYS_futex,
static_cast<const void*
>(__addr),
128 static_cast<int>(__futex_wait_flags::__wake_private),
129 __all ? INT_MAX : 1);
134 __thread_yield() noexcept
136 #if defined _GLIBCXX_HAS_GTHREADS && defined _GLIBCXX_USE_SCHED_YIELD
142 __thread_relax() noexcept
144 #if defined __i386__ || defined __x86_64__
145 __builtin_ia32_pause();
151 inline constexpr
auto __atomic_spin_count_relax = 12;
152 inline constexpr
auto __atomic_spin_count = 16;
154 struct __default_spin_policy
157 operator()() const noexcept
161 template<
typename _Pred,
162 typename _Spin = __default_spin_policy>
164 __atomic_spin(_Pred& __pred, _Spin __spin = _Spin{ }) noexcept
166 for (
auto __i = 0; __i < __atomic_spin_count; ++__i)
171 if (__i < __atomic_spin_count_relax)
172 __detail::__thread_relax();
174 __detail::__thread_yield();
187 template<
typename _Tp>
188 bool __atomic_compare(
const _Tp& __a,
const _Tp& __b)
195 struct __waiter_pool_base
199 static constexpr
auto _S_align = 64;
201 alignas(_S_align) __platform_wait_t _M_wait = 0;
203 #ifndef _GLIBCXX_HAVE_PLATFORM_WAIT
207 alignas(_S_align) __platform_wait_t _M_ver = 0;
209 #ifndef _GLIBCXX_HAVE_PLATFORM_WAIT
212 __waiter_pool_base() =
default;
215 _M_enter_wait() noexcept
216 { __atomic_fetch_add(&_M_wait, 1, __ATOMIC_SEQ_CST); }
219 _M_leave_wait() noexcept
220 { __atomic_fetch_sub(&_M_wait, 1, __ATOMIC_RELEASE); }
223 _M_waiting() const noexcept
225 __platform_wait_t __res;
226 __atomic_load(&_M_wait, &__res, __ATOMIC_SEQ_CST);
231 _M_notify(__platform_wait_t* __addr, [[maybe_unused]]
bool __all,
232 bool __bare) noexcept
234 #ifdef _GLIBCXX_HAVE_PLATFORM_WAIT
235 if (__addr == &_M_ver)
237 __atomic_fetch_add(__addr, 1, __ATOMIC_SEQ_CST);
241 if (__bare || _M_waiting())
242 __platform_notify(__addr, __all);
245 lock_guard<mutex> __l(_M_mtx);
246 __atomic_fetch_add(__addr, 1, __ATOMIC_RELAXED);
248 if (__bare || _M_waiting())
253 static __waiter_pool_base&
254 _S_for(
const void* __addr) noexcept
256 constexpr __UINTPTR_TYPE__ __ct = 16;
257 static __waiter_pool_base __w[__ct];
258 auto __key = ((__UINTPTR_TYPE__)__addr >> 2) % __ct;
263 struct __waiter_pool : __waiter_pool_base
266 _M_do_wait(
const __platform_wait_t* __addr, __platform_wait_t __old) noexcept
268 #ifdef _GLIBCXX_HAVE_PLATFORM_WAIT
269 __platform_wait(__addr, __old);
271 __platform_wait_t __val;
272 __atomic_load(__addr, &__val, __ATOMIC_SEQ_CST);
275 lock_guard<mutex> __l(_M_mtx);
276 __atomic_load(__addr, &__val, __ATOMIC_RELAXED);
284 template<
typename _Tp>
287 using __waiter_type = _Tp;
290 __platform_wait_t* _M_addr;
292 template<
typename _Up>
293 static __platform_wait_t*
294 _S_wait_addr(
const _Up* __a, __platform_wait_t* __b)
296 if constexpr (__platform_wait_uses_type<_Up>)
297 return reinterpret_cast<__platform_wait_t*
>(
const_cast<_Up*
>(__a));
302 static __waiter_type&
303 _S_for(
const void* __addr) noexcept
305 static_assert(
sizeof(__waiter_type) ==
sizeof(__waiter_pool_base));
306 auto& res = __waiter_pool_base::_S_for(__addr);
307 return reinterpret_cast<__waiter_type&
>(res);
310 template<
typename _Up>
311 explicit __waiter_base(
const _Up* __addr) noexcept
312 : _M_w(_S_for(__addr))
313 , _M_addr(_S_wait_addr(__addr, &_M_w._M_ver))
317 _M_notify(
bool __all,
bool __bare =
false) noexcept
318 { _M_w._M_notify(_M_addr, __all, __bare); }
320 template<
typename _Up,
typename _ValFn,
321 typename _Spin = __default_spin_policy>
323 _S_do_spin_v(__platform_wait_t* __addr,
324 const _Up& __old, _ValFn __vfn,
325 __platform_wait_t& __val,
326 _Spin __spin = _Spin{ })
328 auto const __pred = [=]
329 {
return !__detail::__atomic_compare(__old, __vfn()); };
331 if constexpr (__platform_wait_uses_type<_Up>)
333 __builtin_memcpy(&__val, &__old,
sizeof(__val));
337 __atomic_load(__addr, &__val, __ATOMIC_ACQUIRE);
339 return __atomic_spin(__pred, __spin);
342 template<
typename _Up,
typename _ValFn,
343 typename _Spin = __default_spin_policy>
345 _M_do_spin_v(
const _Up& __old, _ValFn __vfn,
346 __platform_wait_t& __val,
347 _Spin __spin = _Spin{ })
348 {
return _S_do_spin_v(_M_addr, __old, __vfn, __val, __spin); }
350 template<
typename _Pred,
351 typename _Spin = __default_spin_policy>
353 _S_do_spin(
const __platform_wait_t* __addr,
355 __platform_wait_t& __val,
356 _Spin __spin = _Spin{ })
358 __atomic_load(__addr, &__val, __ATOMIC_ACQUIRE);
359 return __atomic_spin(__pred, __spin);
362 template<
typename _Pred,
363 typename _Spin = __default_spin_policy>
365 _M_do_spin(_Pred __pred, __platform_wait_t& __val,
366 _Spin __spin = _Spin{ })
367 {
return _S_do_spin(_M_addr, __pred, __val, __spin); }
370 template<
typename _EntersWait>
371 struct __waiter : __waiter_base<__waiter_pool>
373 using __base_type = __waiter_base<__waiter_pool>;
375 template<
typename _Tp>
376 explicit __waiter(
const _Tp* __addr) noexcept
377 : __base_type(__addr)
379 if constexpr (_EntersWait::value)
380 _M_w._M_enter_wait();
385 if constexpr (_EntersWait::value)
386 _M_w._M_leave_wait();
389 template<
typename _Tp,
typename _ValFn>
391 _M_do_wait_v(_Tp __old, _ValFn __vfn)
395 __platform_wait_t __val;
396 if (__base_type::_M_do_spin_v(__old, __vfn, __val))
398 __base_type::_M_w._M_do_wait(__base_type::_M_addr, __val);
400 while (__detail::__atomic_compare(__old, __vfn()));
403 template<
typename _Pred>
405 _M_do_wait(_Pred __pred) noexcept
409 __platform_wait_t __val;
410 if (__base_type::_M_do_spin(__pred, __val))
412 __base_type::_M_w._M_do_wait(__base_type::_M_addr, __val);
418 using __enters_wait = __waiter<std::true_type>;
419 using __bare_wait = __waiter<std::false_type>;
422 template<
typename _Tp,
typename _ValFn>
424 __atomic_wait_address_v(
const _Tp* __addr, _Tp __old,
425 _ValFn __vfn) noexcept
427 __detail::__enters_wait __w(__addr);
428 __w._M_do_wait_v(__old, __vfn);
431 template<
typename _Tp,
typename _Pred>
433 __atomic_wait_address(
const _Tp* __addr, _Pred __pred) noexcept
435 __detail::__enters_wait __w(__addr);
436 __w._M_do_wait(__pred);
440 template<
typename _Pred>
442 __atomic_wait_address_bare(
const __detail::__platform_wait_t* __addr,
443 _Pred __pred) noexcept
445 #ifdef _GLIBCXX_HAVE_PLATFORM_WAIT
448 __detail::__platform_wait_t __val;
449 if (__detail::__bare_wait::_S_do_spin(__addr, __pred, __val))
451 __detail::__platform_wait(__addr, __val);
455 __detail::__bare_wait __w(__addr);
456 __w._M_do_wait(__pred);
460 template<
typename _Tp>
462 __atomic_notify_address(
const _Tp* __addr,
bool __all) noexcept
464 __detail::__bare_wait __w(__addr);
465 __w._M_notify(__all);
470 __atomic_notify_address_bare(
const __detail::__platform_wait_t* __addr,
473 #ifdef _GLIBCXX_HAVE_PLATFORM_WAIT
474 __detail::__platform_notify(__addr, __all);
476 __detail::__bare_wait __w(__addr);
477 __w._M_notify(__all,
true);
480 _GLIBCXX_END_NAMESPACE_VERSION
constexpr _Tp * addressof(_Tp &__r) noexcept
Returns the actual address of the object or function referenced by r, even in the presence of an over...
ISO C++ entities toplevel namespace is std.