libstdc++
atomic_wait.h
Go to the documentation of this file.
1 // -*- C++ -*- header.
2 
3 // Copyright (C) 2020-2023 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
9 // any later version.
10 
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
15 
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
19 
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
24 
25 /** @file bits/atomic_wait.h
26  * This is an internal header file, included by other library headers.
27  * Do not attempt to use it directly. @headername{atomic}
28  */
29 
30 #ifndef _GLIBCXX_ATOMIC_WAIT_H
31 #define _GLIBCXX_ATOMIC_WAIT_H 1
32 
33 #pragma GCC system_header
34 
35 #include <bits/c++config.h>
36 #if defined _GLIBCXX_HAS_GTHREADS || defined _GLIBCXX_HAVE_LINUX_FUTEX
37 #include <bits/functional_hash.h>
38 #include <bits/gthr.h>
39 #include <ext/numeric_traits.h>
40 
41 #ifdef _GLIBCXX_HAVE_LINUX_FUTEX
42 # include <cerrno>
43 # include <climits>
44 # include <unistd.h>
45 # include <syscall.h>
46 # include <bits/functexcept.h>
47 #endif
48 
49 # include <bits/std_mutex.h> // std::mutex, std::__condvar
50 
51 #define __cpp_lib_atomic_wait 201907L
52 
53 namespace std _GLIBCXX_VISIBILITY(default)
54 {
55 _GLIBCXX_BEGIN_NAMESPACE_VERSION
56  namespace __detail
57  {
58 #ifdef _GLIBCXX_HAVE_LINUX_FUTEX
59 #define _GLIBCXX_HAVE_PLATFORM_WAIT 1
60  using __platform_wait_t = int;
61  inline constexpr size_t __platform_wait_alignment = 4;
62 #else
63 // define _GLIBCX_HAVE_PLATFORM_WAIT and implement __platform_wait()
64 // and __platform_notify() if there is a more efficient primitive supported
65 // by the platform (e.g. __ulock_wait()/__ulock_wake()) which is better than
66 // a mutex/condvar based wait.
67 # if ATOMIC_LONG_LOCK_FREE == 2
68  using __platform_wait_t = unsigned long;
69 # else
70  using __platform_wait_t = unsigned int;
71 # endif
72  inline constexpr size_t __platform_wait_alignment
73  = __alignof__(__platform_wait_t);
74 #endif
75  } // namespace __detail
76 
77  template<typename _Tp>
78  inline constexpr bool __platform_wait_uses_type
79 #ifdef _GLIBCXX_HAVE_PLATFORM_WAIT
80  = is_scalar_v<_Tp>
81  && ((sizeof(_Tp) == sizeof(__detail::__platform_wait_t))
82  && (alignof(_Tp*) >= __detail::__platform_wait_alignment));
83 #else
84  = false;
85 #endif
86 
87  namespace __detail
88  {
89 #ifdef _GLIBCXX_HAVE_LINUX_FUTEX
90  enum class __futex_wait_flags : int
91  {
92 #ifdef _GLIBCXX_HAVE_LINUX_FUTEX_PRIVATE
93  __private_flag = 128,
94 #else
95  __private_flag = 0,
96 #endif
97  __wait = 0,
98  __wake = 1,
99  __wait_bitset = 9,
100  __wake_bitset = 10,
101  __wait_private = __wait | __private_flag,
102  __wake_private = __wake | __private_flag,
103  __wait_bitset_private = __wait_bitset | __private_flag,
104  __wake_bitset_private = __wake_bitset | __private_flag,
105  __bitset_match_any = -1
106  };
107 
108  template<typename _Tp>
109  void
110  __platform_wait(const _Tp* __addr, __platform_wait_t __val) noexcept
111  {
112  auto __e = syscall (SYS_futex, static_cast<const void*>(__addr),
113  static_cast<int>(__futex_wait_flags::__wait_private),
114  __val, nullptr);
115  if (!__e || errno == EAGAIN)
116  return;
117  if (errno != EINTR)
118  __throw_system_error(errno);
119  }
120 
121  template<typename _Tp>
122  void
123  __platform_notify(const _Tp* __addr, bool __all) noexcept
124  {
125  syscall (SYS_futex, static_cast<const void*>(__addr),
126  static_cast<int>(__futex_wait_flags::__wake_private),
127  __all ? INT_MAX : 1);
128  }
129 #endif
130 
131  inline void
132  __thread_yield() noexcept
133  {
134 #if defined _GLIBCXX_HAS_GTHREADS && defined _GLIBCXX_USE_SCHED_YIELD
135  __gthread_yield();
136 #endif
137  }
138 
139  inline void
140  __thread_relax() noexcept
141  {
142 #if defined __i386__ || defined __x86_64__
143  __builtin_ia32_pause();
144 #else
145  __thread_yield();
146 #endif
147  }
148 
149  inline constexpr auto __atomic_spin_count_relax = 12;
150  inline constexpr auto __atomic_spin_count = 16;
151 
152  struct __default_spin_policy
153  {
154  bool
155  operator()() const noexcept
156  { return false; }
157  };
158 
159  template<typename _Pred,
160  typename _Spin = __default_spin_policy>
161  bool
162  __atomic_spin(_Pred& __pred, _Spin __spin = _Spin{ }) noexcept
163  {
164  for (auto __i = 0; __i < __atomic_spin_count; ++__i)
165  {
166  if (__pred())
167  return true;
168 
169  if (__i < __atomic_spin_count_relax)
170  __detail::__thread_relax();
171  else
172  __detail::__thread_yield();
173  }
174 
175  while (__spin())
176  {
177  if (__pred())
178  return true;
179  }
180 
181  return false;
182  }
183 
184  // return true if equal
185  template<typename _Tp>
186  bool __atomic_compare(const _Tp& __a, const _Tp& __b)
187  {
188  // TODO make this do the correct padding bit ignoring comparison
189  return __builtin_memcmp(&__a, &__b, sizeof(_Tp)) == 0;
190  }
191 
192  struct __waiter_pool_base
193  {
194  // Don't use std::hardware_destructive_interference_size here because we
195  // don't want the layout of library types to depend on compiler options.
196  static constexpr auto _S_align = 64;
197 
198  alignas(_S_align) __platform_wait_t _M_wait = 0;
199 
200 #ifndef _GLIBCXX_HAVE_PLATFORM_WAIT
201  mutex _M_mtx;
202 #endif
203 
204  alignas(_S_align) __platform_wait_t _M_ver = 0;
205 
206 #ifndef _GLIBCXX_HAVE_PLATFORM_WAIT
207  __condvar _M_cv;
208 #endif
209  __waiter_pool_base() = default;
210 
211  void
212  _M_enter_wait() noexcept
213  { __atomic_fetch_add(&_M_wait, 1, __ATOMIC_SEQ_CST); }
214 
215  void
216  _M_leave_wait() noexcept
217  { __atomic_fetch_sub(&_M_wait, 1, __ATOMIC_RELEASE); }
218 
219  bool
220  _M_waiting() const noexcept
221  {
222  __platform_wait_t __res;
223  __atomic_load(&_M_wait, &__res, __ATOMIC_SEQ_CST);
224  return __res != 0;
225  }
226 
227  void
228  _M_notify(__platform_wait_t* __addr, [[maybe_unused]] bool __all,
229  bool __bare) noexcept
230  {
231 #ifdef _GLIBCXX_HAVE_PLATFORM_WAIT
232  if (__addr == &_M_ver)
233  {
234  __atomic_fetch_add(__addr, 1, __ATOMIC_SEQ_CST);
235  __all = true;
236  }
237 
238  if (__bare || _M_waiting())
239  __platform_notify(__addr, __all);
240 #else
241  {
242  lock_guard<mutex> __l(_M_mtx);
243  __atomic_fetch_add(__addr, 1, __ATOMIC_RELAXED);
244  }
245  if (__bare || _M_waiting())
246  _M_cv.notify_all();
247 #endif
248  }
249 
250  static __waiter_pool_base&
251  _S_for(const void* __addr) noexcept
252  {
253  constexpr uintptr_t __ct = 16;
254  static __waiter_pool_base __w[__ct];
255  auto __key = (uintptr_t(__addr) >> 2) % __ct;
256  return __w[__key];
257  }
258  };
259 
260  struct __waiter_pool : __waiter_pool_base
261  {
262  void
263  _M_do_wait(const __platform_wait_t* __addr, __platform_wait_t __old) noexcept
264  {
265 #ifdef _GLIBCXX_HAVE_PLATFORM_WAIT
266  __platform_wait(__addr, __old);
267 #else
268  __platform_wait_t __val;
269  __atomic_load(__addr, &__val, __ATOMIC_SEQ_CST);
270  if (__val == __old)
271  {
272  lock_guard<mutex> __l(_M_mtx);
273  __atomic_load(__addr, &__val, __ATOMIC_RELAXED);
274  if (__val == __old)
275  _M_cv.wait(_M_mtx);
276  }
277 #endif // __GLIBCXX_HAVE_PLATFORM_WAIT
278  }
279  };
280 
281  template<typename _Tp>
282  struct __waiter_base
283  {
284  using __waiter_type = _Tp;
285 
286  __waiter_type& _M_w;
287  __platform_wait_t* _M_addr;
288 
289  template<typename _Up>
290  static __platform_wait_t*
291  _S_wait_addr(const _Up* __a, __platform_wait_t* __b)
292  {
293  if constexpr (__platform_wait_uses_type<_Up>)
294  return reinterpret_cast<__platform_wait_t*>(const_cast<_Up*>(__a));
295  else
296  return __b;
297  }
298 
299  static __waiter_type&
300  _S_for(const void* __addr) noexcept
301  {
302  static_assert(sizeof(__waiter_type) == sizeof(__waiter_pool_base));
303  auto& res = __waiter_pool_base::_S_for(__addr);
304  return reinterpret_cast<__waiter_type&>(res);
305  }
306 
307  template<typename _Up>
308  explicit __waiter_base(const _Up* __addr) noexcept
309  : _M_w(_S_for(__addr))
310  , _M_addr(_S_wait_addr(__addr, &_M_w._M_ver))
311  { }
312 
313  void
314  _M_notify(bool __all, bool __bare = false) noexcept
315  { _M_w._M_notify(_M_addr, __all, __bare); }
316 
317  template<typename _Up, typename _ValFn,
318  typename _Spin = __default_spin_policy>
319  static bool
320  _S_do_spin_v(__platform_wait_t* __addr,
321  const _Up& __old, _ValFn __vfn,
322  __platform_wait_t& __val,
323  _Spin __spin = _Spin{ })
324  {
325  auto const __pred = [=]
326  { return !__detail::__atomic_compare(__old, __vfn()); };
327 
328  if constexpr (__platform_wait_uses_type<_Up>)
329  {
330  __builtin_memcpy(&__val, &__old, sizeof(__val));
331  }
332  else
333  {
334  __atomic_load(__addr, &__val, __ATOMIC_ACQUIRE);
335  }
336  return __atomic_spin(__pred, __spin);
337  }
338 
339  template<typename _Up, typename _ValFn,
340  typename _Spin = __default_spin_policy>
341  bool
342  _M_do_spin_v(const _Up& __old, _ValFn __vfn,
343  __platform_wait_t& __val,
344  _Spin __spin = _Spin{ })
345  { return _S_do_spin_v(_M_addr, __old, __vfn, __val, __spin); }
346 
347  template<typename _Pred,
348  typename _Spin = __default_spin_policy>
349  static bool
350  _S_do_spin(const __platform_wait_t* __addr,
351  _Pred __pred,
352  __platform_wait_t& __val,
353  _Spin __spin = _Spin{ })
354  {
355  __atomic_load(__addr, &__val, __ATOMIC_ACQUIRE);
356  return __atomic_spin(__pred, __spin);
357  }
358 
359  template<typename _Pred,
360  typename _Spin = __default_spin_policy>
361  bool
362  _M_do_spin(_Pred __pred, __platform_wait_t& __val,
363  _Spin __spin = _Spin{ })
364  { return _S_do_spin(_M_addr, __pred, __val, __spin); }
365  };
366 
367  template<typename _EntersWait>
368  struct __waiter : __waiter_base<__waiter_pool>
369  {
370  using __base_type = __waiter_base<__waiter_pool>;
371 
372  template<typename _Tp>
373  explicit __waiter(const _Tp* __addr) noexcept
374  : __base_type(__addr)
375  {
376  if constexpr (_EntersWait::value)
377  _M_w._M_enter_wait();
378  }
379 
380  ~__waiter()
381  {
382  if constexpr (_EntersWait::value)
383  _M_w._M_leave_wait();
384  }
385 
386  template<typename _Tp, typename _ValFn>
387  void
388  _M_do_wait_v(_Tp __old, _ValFn __vfn)
389  {
390  do
391  {
392  __platform_wait_t __val;
393  if (__base_type::_M_do_spin_v(__old, __vfn, __val))
394  return;
395  __base_type::_M_w._M_do_wait(__base_type::_M_addr, __val);
396  }
397  while (__detail::__atomic_compare(__old, __vfn()));
398  }
399 
400  template<typename _Pred>
401  void
402  _M_do_wait(_Pred __pred) noexcept
403  {
404  do
405  {
406  __platform_wait_t __val;
407  if (__base_type::_M_do_spin(__pred, __val))
408  return;
409  __base_type::_M_w._M_do_wait(__base_type::_M_addr, __val);
410  }
411  while (!__pred());
412  }
413  };
414 
415  using __enters_wait = __waiter<std::true_type>;
416  using __bare_wait = __waiter<std::false_type>;
417  } // namespace __detail
418 
419  template<typename _Tp, typename _ValFn>
420  void
421  __atomic_wait_address_v(const _Tp* __addr, _Tp __old,
422  _ValFn __vfn) noexcept
423  {
424  __detail::__enters_wait __w(__addr);
425  __w._M_do_wait_v(__old, __vfn);
426  }
427 
428  template<typename _Tp, typename _Pred>
429  void
430  __atomic_wait_address(const _Tp* __addr, _Pred __pred) noexcept
431  {
432  __detail::__enters_wait __w(__addr);
433  __w._M_do_wait(__pred);
434  }
435 
436  // This call is to be used by atomic types which track contention externally
437  template<typename _Pred>
438  void
439  __atomic_wait_address_bare(const __detail::__platform_wait_t* __addr,
440  _Pred __pred) noexcept
441  {
442 #ifdef _GLIBCXX_HAVE_PLATFORM_WAIT
443  do
444  {
445  __detail::__platform_wait_t __val;
446  if (__detail::__bare_wait::_S_do_spin(__addr, __pred, __val))
447  return;
448  __detail::__platform_wait(__addr, __val);
449  }
450  while (!__pred());
451 #else // !_GLIBCXX_HAVE_PLATFORM_WAIT
452  __detail::__bare_wait __w(__addr);
453  __w._M_do_wait(__pred);
454 #endif
455  }
456 
457  template<typename _Tp>
458  void
459  __atomic_notify_address(const _Tp* __addr, bool __all) noexcept
460  {
461  __detail::__bare_wait __w(__addr);
462  __w._M_notify(__all);
463  }
464 
465  // This call is to be used by atomic types which track contention externally
466  inline void
467  __atomic_notify_address_bare(const __detail::__platform_wait_t* __addr,
468  bool __all) noexcept
469  {
470 #ifdef _GLIBCXX_HAVE_PLATFORM_WAIT
471  __detail::__platform_notify(__addr, __all);
472 #else
473  __detail::__bare_wait __w(__addr);
474  __w._M_notify(__all, true);
475 #endif
476  }
477 _GLIBCXX_END_NAMESPACE_VERSION
478 } // namespace std
479 #endif // GTHREADS || LINUX_FUTEX
480 #endif // _GLIBCXX_ATOMIC_WAIT_H
cerrno
numeric_traits.h
c++config.h
functexcept.h
std_mutex.h
functional_hash.h
std
ISO C++ entities toplevel namespace is std.