libstdc++
atomic_base.h
Go to the documentation of this file.
1 // -*- C++ -*- header.
2 
3 // Copyright (C) 2008-2026 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
9 // any later version.
10 
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
15 
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
19 
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
24 
25 /** @file bits/atomic_base.h
26  * This is an internal header file, included by other library headers.
27  * Do not attempt to use it directly. @headername{atomic}
28  */
29 
30 #ifndef _GLIBCXX_ATOMIC_BASE_H
31 #define _GLIBCXX_ATOMIC_BASE_H 1
32 
33 #ifdef _GLIBCXX_SYSHDR
34 #pragma GCC system_header
35 #endif
36 
37 #include <bits/c++config.h>
38 #include <new> // For placement new
40 #include <bits/move.h>
41 
42 #if __cplusplus > 201703L && _GLIBCXX_HOSTED
43 #include <bits/atomic_wait.h>
44 #endif
45 
46 #ifndef _GLIBCXX_ALWAYS_INLINE
47 #define _GLIBCXX_ALWAYS_INLINE inline __attribute__((__always_inline__))
48 #endif
49 
50 #include <bits/version.h>
51 
52 namespace std _GLIBCXX_VISIBILITY(default)
53 {
54 _GLIBCXX_BEGIN_NAMESPACE_VERSION
55 
56  /**
57  * @defgroup atomics Atomics
58  *
59  * Components for performing atomic operations.
60  * @{
61  */
62 
63  /// Enumeration for memory_order
64 #if __cplusplus > 201703L
65  enum class memory_order : int
66  {
67  relaxed,
68  consume,
69  acquire,
70  release,
71  acq_rel,
72  seq_cst
73  };
74 
75  inline constexpr memory_order memory_order_relaxed = memory_order::relaxed;
76  inline constexpr memory_order memory_order_consume = memory_order::consume;
77  inline constexpr memory_order memory_order_acquire = memory_order::acquire;
78  inline constexpr memory_order memory_order_release = memory_order::release;
79  inline constexpr memory_order memory_order_acq_rel = memory_order::acq_rel;
80  inline constexpr memory_order memory_order_seq_cst = memory_order::seq_cst;
81 #else
82  enum memory_order : int
83  {
84  memory_order_relaxed,
85  memory_order_consume,
86  memory_order_acquire,
87  memory_order_release,
88  memory_order_acq_rel,
89  memory_order_seq_cst
90  };
91 #endif
92 
93  /// @cond undocumented
94  enum __memory_order_modifier
95  {
96  __memory_order_mask = 0x0ffff,
97  __memory_order_modifier_mask = 0xffff0000,
98  __memory_order_hle_acquire = 0x10000,
99  __memory_order_hle_release = 0x20000
100  };
101  /// @endcond
102 
103  constexpr memory_order
104  operator|(memory_order __m, __memory_order_modifier __mod) noexcept
105  {
106  return memory_order(int(__m) | int(__mod));
107  }
108 
109  constexpr memory_order
110  operator&(memory_order __m, __memory_order_modifier __mod) noexcept
111  {
112  return memory_order(int(__m) & int(__mod));
113  }
114 
115  /// @cond undocumented
116 
117  // Drop release ordering as per [atomics.types.operations.req]/21
118  constexpr memory_order
119  __cmpexch_failure_order2(memory_order __m) noexcept
120  {
121  return __m == memory_order_acq_rel ? memory_order_acquire
122  : __m == memory_order_release ? memory_order_relaxed : __m;
123  }
124 
125  constexpr memory_order
126  __cmpexch_failure_order(memory_order __m) noexcept
127  {
128  return memory_order(__cmpexch_failure_order2(__m & __memory_order_mask)
129  | __memory_order_modifier(__m & __memory_order_modifier_mask));
130  }
131 
132  constexpr bool
133  __is_valid_cmpexch_failure_order(memory_order __m) noexcept
134  {
135  return (__m & __memory_order_mask) != memory_order_release
136  && (__m & __memory_order_mask) != memory_order_acq_rel;
137  }
138 
139  // Base types for atomics.
140  template<typename _IntTp>
141  struct __atomic_base;
142 
143  /// @endcond
144 
145  _GLIBCXX_ALWAYS_INLINE void
146  atomic_thread_fence(memory_order __m) noexcept
147  { __atomic_thread_fence(int(__m)); }
148 
149  _GLIBCXX_ALWAYS_INLINE void
150  atomic_signal_fence(memory_order __m) noexcept
151  { __atomic_signal_fence(int(__m)); }
152 
153  /// kill_dependency
154  template<typename _Tp>
155  inline _Tp
156  kill_dependency(_Tp __y) noexcept
157  {
158  _Tp __ret(__y);
159  return __ret;
160  }
161 
162 /// @cond undocumented
163 #if __glibcxx_atomic_value_initialization
164 # define _GLIBCXX20_INIT(I) = I
165 #else
166 # define _GLIBCXX20_INIT(I)
167 #endif
168 /// @endcond
169 
170 #define ATOMIC_VAR_INIT(_VI) { _VI }
171 
172  template<typename _Tp>
173  struct atomic;
174 
175  template<typename _Tp>
176  struct atomic<_Tp*>;
177 
178  /* The target's "set" value for test-and-set may not be exactly 1. */
179 #if __GCC_ATOMIC_TEST_AND_SET_TRUEVAL == 1
180  typedef bool __atomic_flag_data_type;
181 #else
182  typedef unsigned char __atomic_flag_data_type;
183 #endif
184 
185  /// @cond undocumented
186 
187  /*
188  * Base type for atomic_flag.
189  *
190  * Base type is POD with data, allowing atomic_flag to derive from
191  * it and meet the standard layout type requirement. In addition to
192  * compatibility with a C interface, this allows different
193  * implementations of atomic_flag to use the same atomic operation
194  * functions, via a standard conversion to the __atomic_flag_base
195  * argument.
196  */
197  _GLIBCXX_BEGIN_EXTERN_C
198 
199  struct __atomic_flag_base
200  {
201  __atomic_flag_data_type _M_i _GLIBCXX20_INIT({});
202  };
203 
204  _GLIBCXX_END_EXTERN_C
205 
206  /// @endcond
207 
208 #define ATOMIC_FLAG_INIT { 0 }
209 
210  /// atomic_flag
211  struct atomic_flag : public __atomic_flag_base
212  {
213  atomic_flag() noexcept = default;
214  ~atomic_flag() noexcept = default;
215  atomic_flag(const atomic_flag&) = delete;
216  atomic_flag& operator=(const atomic_flag&) = delete;
217  atomic_flag& operator=(const atomic_flag&) volatile = delete;
218 
219  // Conversion to ATOMIC_FLAG_INIT.
220  constexpr atomic_flag(bool __i) noexcept
221  : __atomic_flag_base{ _S_init(__i) }
222  { }
223 
224  _GLIBCXX_ALWAYS_INLINE bool
225  test_and_set(memory_order __m = memory_order_seq_cst) noexcept
226  {
227  return __atomic_test_and_set (&_M_i, int(__m));
228  }
229 
230  _GLIBCXX_ALWAYS_INLINE bool
231  test_and_set(memory_order __m = memory_order_seq_cst) volatile noexcept
232  {
233  return __atomic_test_and_set (&_M_i, int(__m));
234  }
235 
236 #ifdef __glibcxx_atomic_flag_test // C++ >= 20
237  _GLIBCXX_ALWAYS_INLINE bool
238  test(memory_order __m = memory_order_seq_cst) const noexcept
239  {
240  __atomic_flag_data_type __v;
241  __atomic_load(&_M_i, &__v, int(__m));
242  return __v == __GCC_ATOMIC_TEST_AND_SET_TRUEVAL;
243  }
244 
245  _GLIBCXX_ALWAYS_INLINE bool
246  test(memory_order __m = memory_order_seq_cst) const volatile noexcept
247  {
248  __atomic_flag_data_type __v;
249  __atomic_load(&_M_i, &__v, int(__m));
250  return __v == __GCC_ATOMIC_TEST_AND_SET_TRUEVAL;
251  }
252 #endif
253 
254 #if __glibcxx_atomic_wait // C++ >= 20 && (linux_futex || gthread)
255  _GLIBCXX_ALWAYS_INLINE void
256  wait(bool __old,
257  memory_order __m = memory_order_seq_cst) const noexcept
258  {
259  const __atomic_flag_data_type __v
260  = __old ? __GCC_ATOMIC_TEST_AND_SET_TRUEVAL : 0;
261 
262  std::__atomic_wait_address_v(&_M_i, __v,
263  [__m, this] { return __atomic_load_n(&_M_i, int(__m)); });
264  }
265 
266  // TODO add const volatile overload
267 
268  _GLIBCXX_ALWAYS_INLINE void
269  notify_one() noexcept
270  { std::__atomic_notify_address(&_M_i, false); }
271 
272  // TODO add const volatile overload
273 
274  _GLIBCXX_ALWAYS_INLINE void
275  notify_all() noexcept
276  { std::__atomic_notify_address(&_M_i, true); }
277 
278  // TODO add const volatile overload
279 #endif // __glibcxx_atomic_wait
280 
281  _GLIBCXX_ALWAYS_INLINE void
282  clear(memory_order __m = memory_order_seq_cst) noexcept
283  {
284  memory_order __b __attribute__ ((__unused__))
285  = __m & __memory_order_mask;
286  __glibcxx_assert(__b != memory_order_consume);
287  __glibcxx_assert(__b != memory_order_acquire);
288  __glibcxx_assert(__b != memory_order_acq_rel);
289 
290  __atomic_clear (&_M_i, int(__m));
291  }
292 
293  _GLIBCXX_ALWAYS_INLINE void
294  clear(memory_order __m = memory_order_seq_cst) volatile noexcept
295  {
296  memory_order __b __attribute__ ((__unused__))
297  = __m & __memory_order_mask;
298  __glibcxx_assert(__b != memory_order_consume);
299  __glibcxx_assert(__b != memory_order_acquire);
300  __glibcxx_assert(__b != memory_order_acq_rel);
301 
302  __atomic_clear (&_M_i, int(__m));
303  }
304 
305  private:
306  static constexpr __atomic_flag_data_type
307  _S_init(bool __i)
308  { return __i ? __GCC_ATOMIC_TEST_AND_SET_TRUEVAL : 0; }
309  };
310 
311  /// @cond undocumented
312 
313  /// Base class for atomic integrals.
314  //
315  // For each of the integral types, define atomic_[integral type] struct
316  //
317  // atomic_bool bool
318  // atomic_char char
319  // atomic_schar signed char
320  // atomic_uchar unsigned char
321  // atomic_short short
322  // atomic_ushort unsigned short
323  // atomic_int int
324  // atomic_uint unsigned int
325  // atomic_long long
326  // atomic_ulong unsigned long
327  // atomic_llong long long
328  // atomic_ullong unsigned long long
329  // atomic_char8_t char8_t
330  // atomic_char16_t char16_t
331  // atomic_char32_t char32_t
332  // atomic_wchar_t wchar_t
333  //
334  // NB: Assuming _ITp is an integral scalar type that is 1, 2, 4, or
335  // 8 bytes, since that is what GCC built-in functions for atomic
336  // memory access expect.
337 
338  namespace __atomic_impl
339  {
340  template<typename _Tp>
341  using _Val = typename remove_volatile<_Tp>::type;
342 
343 #if __glibcxx_atomic_min_max
344  template<typename _Tp>
345  _Tp
346  __fetch_min(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept;
347 
348  template<typename _Tp>
349  _Tp
350  __fetch_max(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept;
351 #endif
352  }
353 
354  template<typename _ITp>
355  struct __atomic_base
356  {
357  using value_type = _ITp;
358  using difference_type = value_type;
359 
360  private:
361  typedef _ITp __int_type;
362 
363  static constexpr int _S_alignment =
364  sizeof(_ITp) > alignof(_ITp) ? sizeof(_ITp) : alignof(_ITp);
365 
366  alignas(_S_alignment) __int_type _M_i _GLIBCXX20_INIT(0);
367 
368  public:
369  __atomic_base() noexcept = default;
370  ~__atomic_base() noexcept = default;
371  __atomic_base(const __atomic_base&) = delete;
372  __atomic_base& operator=(const __atomic_base&) = delete;
373  __atomic_base& operator=(const __atomic_base&) volatile = delete;
374 
375  constexpr __atomic_base(__int_type __i) noexcept : _M_i (__i) { }
376 
377  operator __int_type() const noexcept
378  { return load(); }
379 
380  operator __int_type() const volatile noexcept
381  { return load(); }
382 
383  __int_type
384  operator=(__int_type __i) noexcept
385  {
386  store(__i);
387  return __i;
388  }
389 
390  __int_type
391  operator=(__int_type __i) volatile noexcept
392  {
393  store(__i);
394  return __i;
395  }
396 
397  __int_type
398  operator++(int) noexcept
399  { return fetch_add(1); }
400 
401  __int_type
402  operator++(int) volatile noexcept
403  { return fetch_add(1); }
404 
405  __int_type
406  operator--(int) noexcept
407  { return fetch_sub(1); }
408 
409  __int_type
410  operator--(int) volatile noexcept
411  { return fetch_sub(1); }
412 
413  __int_type
414  operator++() noexcept
415  { return __atomic_add_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
416 
417  __int_type
418  operator++() volatile noexcept
419  { return __atomic_add_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
420 
421  __int_type
422  operator--() noexcept
423  { return __atomic_sub_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
424 
425  __int_type
426  operator--() volatile noexcept
427  { return __atomic_sub_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
428 
429  __int_type
430  operator+=(__int_type __i) noexcept
431  { return __atomic_add_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
432 
433  __int_type
434  operator+=(__int_type __i) volatile noexcept
435  { return __atomic_add_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
436 
437  __int_type
438  operator-=(__int_type __i) noexcept
439  { return __atomic_sub_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
440 
441  __int_type
442  operator-=(__int_type __i) volatile noexcept
443  { return __atomic_sub_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
444 
445  __int_type
446  operator&=(__int_type __i) noexcept
447  { return __atomic_and_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
448 
449  __int_type
450  operator&=(__int_type __i) volatile noexcept
451  { return __atomic_and_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
452 
453  __int_type
454  operator|=(__int_type __i) noexcept
455  { return __atomic_or_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
456 
457  __int_type
458  operator|=(__int_type __i) volatile noexcept
459  { return __atomic_or_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
460 
461  __int_type
462  operator^=(__int_type __i) noexcept
463  { return __atomic_xor_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
464 
465  __int_type
466  operator^=(__int_type __i) volatile noexcept
467  { return __atomic_xor_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
468 
469  bool
470  is_lock_free() const noexcept
471  {
472  // Use a fake, minimally aligned pointer.
473  return __atomic_is_lock_free(sizeof(_M_i),
474  reinterpret_cast<void *>(-_S_alignment));
475  }
476 
477  bool
478  is_lock_free() const volatile noexcept
479  {
480  // Use a fake, minimally aligned pointer.
481  return __atomic_is_lock_free(sizeof(_M_i),
482  reinterpret_cast<void *>(-_S_alignment));
483  }
484 
485  _GLIBCXX_ALWAYS_INLINE void
486  store(__int_type __i, memory_order __m = memory_order_seq_cst) noexcept
487  {
488  memory_order __b __attribute__ ((__unused__))
489  = __m & __memory_order_mask;
490  __glibcxx_assert(__b != memory_order_acquire);
491  __glibcxx_assert(__b != memory_order_acq_rel);
492  __glibcxx_assert(__b != memory_order_consume);
493 
494  __atomic_store_n(&_M_i, __i, int(__m));
495  }
496 
497  _GLIBCXX_ALWAYS_INLINE void
498  store(__int_type __i,
499  memory_order __m = memory_order_seq_cst) volatile noexcept
500  {
501  memory_order __b __attribute__ ((__unused__))
502  = __m & __memory_order_mask;
503  __glibcxx_assert(__b != memory_order_acquire);
504  __glibcxx_assert(__b != memory_order_acq_rel);
505  __glibcxx_assert(__b != memory_order_consume);
506 
507  __atomic_store_n(&_M_i, __i, int(__m));
508  }
509 
510  _GLIBCXX_ALWAYS_INLINE __int_type
511  load(memory_order __m = memory_order_seq_cst) const noexcept
512  {
513  memory_order __b __attribute__ ((__unused__))
514  = __m & __memory_order_mask;
515  __glibcxx_assert(__b != memory_order_release);
516  __glibcxx_assert(__b != memory_order_acq_rel);
517 
518  return __atomic_load_n(&_M_i, int(__m));
519  }
520 
521  _GLIBCXX_ALWAYS_INLINE __int_type
522  load(memory_order __m = memory_order_seq_cst) const volatile noexcept
523  {
524  memory_order __b __attribute__ ((__unused__))
525  = __m & __memory_order_mask;
526  __glibcxx_assert(__b != memory_order_release);
527  __glibcxx_assert(__b != memory_order_acq_rel);
528 
529  return __atomic_load_n(&_M_i, int(__m));
530  }
531 
532  _GLIBCXX_ALWAYS_INLINE __int_type
533  exchange(__int_type __i,
534  memory_order __m = memory_order_seq_cst) noexcept
535  {
536  return __atomic_exchange_n(&_M_i, __i, int(__m));
537  }
538 
539 
540  _GLIBCXX_ALWAYS_INLINE __int_type
541  exchange(__int_type __i,
542  memory_order __m = memory_order_seq_cst) volatile noexcept
543  {
544  return __atomic_exchange_n(&_M_i, __i, int(__m));
545  }
546 
547  _GLIBCXX_ALWAYS_INLINE bool
548  compare_exchange_weak(__int_type& __i1, __int_type __i2,
549  memory_order __m1, memory_order __m2) noexcept
550  {
551  __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
552 
553  return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1,
554  int(__m1), int(__m2));
555  }
556 
557  _GLIBCXX_ALWAYS_INLINE bool
558  compare_exchange_weak(__int_type& __i1, __int_type __i2,
559  memory_order __m1,
560  memory_order __m2) volatile noexcept
561  {
562  __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
563 
564  return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1,
565  int(__m1), int(__m2));
566  }
567 
568  _GLIBCXX_ALWAYS_INLINE bool
569  compare_exchange_weak(__int_type& __i1, __int_type __i2,
570  memory_order __m = memory_order_seq_cst) noexcept
571  {
572  return compare_exchange_weak(__i1, __i2, __m,
573  __cmpexch_failure_order(__m));
574  }
575 
576  _GLIBCXX_ALWAYS_INLINE bool
577  compare_exchange_weak(__int_type& __i1, __int_type __i2,
578  memory_order __m = memory_order_seq_cst) volatile noexcept
579  {
580  return compare_exchange_weak(__i1, __i2, __m,
581  __cmpexch_failure_order(__m));
582  }
583 
584  _GLIBCXX_ALWAYS_INLINE bool
585  compare_exchange_strong(__int_type& __i1, __int_type __i2,
586  memory_order __m1, memory_order __m2) noexcept
587  {
588  __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
589 
590  return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0,
591  int(__m1), int(__m2));
592  }
593 
594  _GLIBCXX_ALWAYS_INLINE bool
595  compare_exchange_strong(__int_type& __i1, __int_type __i2,
596  memory_order __m1,
597  memory_order __m2) volatile noexcept
598  {
599  __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
600 
601  return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0,
602  int(__m1), int(__m2));
603  }
604 
605  _GLIBCXX_ALWAYS_INLINE bool
606  compare_exchange_strong(__int_type& __i1, __int_type __i2,
607  memory_order __m = memory_order_seq_cst) noexcept
608  {
609  return compare_exchange_strong(__i1, __i2, __m,
610  __cmpexch_failure_order(__m));
611  }
612 
613  _GLIBCXX_ALWAYS_INLINE bool
614  compare_exchange_strong(__int_type& __i1, __int_type __i2,
615  memory_order __m = memory_order_seq_cst) volatile noexcept
616  {
617  return compare_exchange_strong(__i1, __i2, __m,
618  __cmpexch_failure_order(__m));
619  }
620 
621 #if __glibcxx_atomic_wait
622  _GLIBCXX_ALWAYS_INLINE void
623  wait(__int_type __old,
624  memory_order __m = memory_order_seq_cst) const noexcept
625  {
626  std::__atomic_wait_address_v(&_M_i, __old,
627  [__m, this] { return this->load(__m); });
628  }
629 
630  // TODO add const volatile overload
631 
632  _GLIBCXX_ALWAYS_INLINE void
633  notify_one() noexcept
634  { std::__atomic_notify_address(&_M_i, false); }
635 
636  // TODO add const volatile overload
637 
638  _GLIBCXX_ALWAYS_INLINE void
639  notify_all() noexcept
640  { std::__atomic_notify_address(&_M_i, true); }
641 
642  // TODO add const volatile overload
643 #endif // __glibcxx_atomic_wait
644 
645  _GLIBCXX_ALWAYS_INLINE __int_type
646  fetch_add(__int_type __i,
647  memory_order __m = memory_order_seq_cst) noexcept
648  { return __atomic_fetch_add(&_M_i, __i, int(__m)); }
649 
650  _GLIBCXX_ALWAYS_INLINE __int_type
651  fetch_add(__int_type __i,
652  memory_order __m = memory_order_seq_cst) volatile noexcept
653  { return __atomic_fetch_add(&_M_i, __i, int(__m)); }
654 
655  _GLIBCXX_ALWAYS_INLINE __int_type
656  fetch_sub(__int_type __i,
657  memory_order __m = memory_order_seq_cst) noexcept
658  { return __atomic_fetch_sub(&_M_i, __i, int(__m)); }
659 
660  _GLIBCXX_ALWAYS_INLINE __int_type
661  fetch_sub(__int_type __i,
662  memory_order __m = memory_order_seq_cst) volatile noexcept
663  { return __atomic_fetch_sub(&_M_i, __i, int(__m)); }
664 
665  _GLIBCXX_ALWAYS_INLINE __int_type
666  fetch_and(__int_type __i,
667  memory_order __m = memory_order_seq_cst) noexcept
668  { return __atomic_fetch_and(&_M_i, __i, int(__m)); }
669 
670  _GLIBCXX_ALWAYS_INLINE __int_type
671  fetch_and(__int_type __i,
672  memory_order __m = memory_order_seq_cst) volatile noexcept
673  { return __atomic_fetch_and(&_M_i, __i, int(__m)); }
674 
675  _GLIBCXX_ALWAYS_INLINE __int_type
676  fetch_or(__int_type __i,
677  memory_order __m = memory_order_seq_cst) noexcept
678  { return __atomic_fetch_or(&_M_i, __i, int(__m)); }
679 
680  _GLIBCXX_ALWAYS_INLINE __int_type
681  fetch_or(__int_type __i,
682  memory_order __m = memory_order_seq_cst) volatile noexcept
683  { return __atomic_fetch_or(&_M_i, __i, int(__m)); }
684 
685  _GLIBCXX_ALWAYS_INLINE __int_type
686  fetch_xor(__int_type __i,
687  memory_order __m = memory_order_seq_cst) noexcept
688  { return __atomic_fetch_xor(&_M_i, __i, int(__m)); }
689 
690  _GLIBCXX_ALWAYS_INLINE __int_type
691  fetch_xor(__int_type __i,
692  memory_order __m = memory_order_seq_cst) volatile noexcept
693  { return __atomic_fetch_xor(&_M_i, __i, int(__m)); }
694 
695 #if __glibcxx_atomic_min_max
696  _GLIBCXX_ALWAYS_INLINE __int_type
697  fetch_min(__int_type __i,
698  memory_order __m = memory_order_seq_cst) noexcept
699  { return __atomic_impl::__fetch_min(&_M_i, __i, __m); }
700 
701  _GLIBCXX_ALWAYS_INLINE __int_type
702  fetch_min(__int_type __i,
703  memory_order __m = memory_order_seq_cst) volatile noexcept
704  { return __atomic_impl::__fetch_min(&_M_i, __i, __m); }
705 
706  _GLIBCXX_ALWAYS_INLINE __int_type
707  fetch_max(__int_type __i,
708  memory_order __m = memory_order_seq_cst) noexcept
709  { return __atomic_impl::__fetch_max(&_M_i, __i, __m); }
710 
711  _GLIBCXX_ALWAYS_INLINE __int_type
712  fetch_max(__int_type __i,
713  memory_order __m = memory_order_seq_cst) volatile noexcept
714  { return __atomic_impl::__fetch_max(&_M_i, __i, __m); }
715 #endif
716  };
717 
718 
719  /// Partial specialization for pointer types.
720  template<typename _PTp>
721  struct __atomic_base<_PTp*>
722  {
723  private:
724  typedef _PTp* __pointer_type;
725 
726  __pointer_type _M_p _GLIBCXX20_INIT(nullptr);
727 
728  static constexpr ptrdiff_t
729  _S_type_size(ptrdiff_t __d)
730  { return __d * sizeof(_PTp); }
731 
732  public:
733  __atomic_base() noexcept = default;
734  ~__atomic_base() noexcept = default;
735  __atomic_base(const __atomic_base&) = delete;
736  __atomic_base& operator=(const __atomic_base&) = delete;
737  __atomic_base& operator=(const __atomic_base&) volatile = delete;
738 
739  // Requires __pointer_type convertible to _M_p.
740  constexpr __atomic_base(__pointer_type __p) noexcept : _M_p (__p) { }
741 
742  operator __pointer_type() const noexcept
743  { return load(); }
744 
745  operator __pointer_type() const volatile noexcept
746  { return load(); }
747 
748  __pointer_type
749  operator=(__pointer_type __p) noexcept
750  {
751  store(__p);
752  return __p;
753  }
754 
755  __pointer_type
756  operator=(__pointer_type __p) volatile noexcept
757  {
758  store(__p);
759  return __p;
760  }
761 
762  __pointer_type
763  operator++(int) noexcept
764  { return fetch_add(1); }
765 
766  __pointer_type
767  operator++(int) volatile noexcept
768  { return fetch_add(1); }
769 
770  __pointer_type
771  operator--(int) noexcept
772  { return fetch_sub(1); }
773 
774  __pointer_type
775  operator--(int) volatile noexcept
776  { return fetch_sub(1); }
777 
778  __pointer_type
779  operator++() noexcept
780  { return __atomic_add_fetch(&_M_p, _S_type_size(1),
781  int(memory_order_seq_cst)); }
782 
783  __pointer_type
784  operator++() volatile noexcept
785  { return __atomic_add_fetch(&_M_p, _S_type_size(1),
786  int(memory_order_seq_cst)); }
787 
788  __pointer_type
789  operator--() noexcept
790  { return __atomic_sub_fetch(&_M_p, _S_type_size(1),
791  int(memory_order_seq_cst)); }
792 
793  __pointer_type
794  operator--() volatile noexcept
795  { return __atomic_sub_fetch(&_M_p, _S_type_size(1),
796  int(memory_order_seq_cst)); }
797 
798  __pointer_type
799  operator+=(ptrdiff_t __d) noexcept
800  { return __atomic_add_fetch(&_M_p, _S_type_size(__d),
801  int(memory_order_seq_cst)); }
802 
803  __pointer_type
804  operator+=(ptrdiff_t __d) volatile noexcept
805  { return __atomic_add_fetch(&_M_p, _S_type_size(__d),
806  int(memory_order_seq_cst)); }
807 
808  __pointer_type
809  operator-=(ptrdiff_t __d) noexcept
810  { return __atomic_sub_fetch(&_M_p, _S_type_size(__d),
811  int(memory_order_seq_cst)); }
812 
813  __pointer_type
814  operator-=(ptrdiff_t __d) volatile noexcept
815  { return __atomic_sub_fetch(&_M_p, _S_type_size(__d),
816  int(memory_order_seq_cst)); }
817 
818  bool
819  is_lock_free() const noexcept
820  {
821  // Produce a fake, minimally aligned pointer.
822  return __atomic_is_lock_free(sizeof(_M_p),
823  reinterpret_cast<void *>(-__alignof(_M_p)));
824  }
825 
826  bool
827  is_lock_free() const volatile noexcept
828  {
829  // Produce a fake, minimally aligned pointer.
830  return __atomic_is_lock_free(sizeof(_M_p),
831  reinterpret_cast<void *>(-__alignof(_M_p)));
832  }
833 
834  _GLIBCXX_ALWAYS_INLINE void
835  store(__pointer_type __p,
836  memory_order __m = memory_order_seq_cst) noexcept
837  {
838  memory_order __b __attribute__ ((__unused__))
839  = __m & __memory_order_mask;
840 
841  __glibcxx_assert(__b != memory_order_acquire);
842  __glibcxx_assert(__b != memory_order_acq_rel);
843  __glibcxx_assert(__b != memory_order_consume);
844 
845  __atomic_store_n(&_M_p, __p, int(__m));
846  }
847 
848  _GLIBCXX_ALWAYS_INLINE void
849  store(__pointer_type __p,
850  memory_order __m = memory_order_seq_cst) volatile noexcept
851  {
852  memory_order __b __attribute__ ((__unused__))
853  = __m & __memory_order_mask;
854  __glibcxx_assert(__b != memory_order_acquire);
855  __glibcxx_assert(__b != memory_order_acq_rel);
856  __glibcxx_assert(__b != memory_order_consume);
857 
858  __atomic_store_n(&_M_p, __p, int(__m));
859  }
860 
861  _GLIBCXX_ALWAYS_INLINE __pointer_type
862  load(memory_order __m = memory_order_seq_cst) const noexcept
863  {
864  memory_order __b __attribute__ ((__unused__))
865  = __m & __memory_order_mask;
866  __glibcxx_assert(__b != memory_order_release);
867  __glibcxx_assert(__b != memory_order_acq_rel);
868 
869  return __atomic_load_n(&_M_p, int(__m));
870  }
871 
872  _GLIBCXX_ALWAYS_INLINE __pointer_type
873  load(memory_order __m = memory_order_seq_cst) const volatile noexcept
874  {
875  memory_order __b __attribute__ ((__unused__))
876  = __m & __memory_order_mask;
877  __glibcxx_assert(__b != memory_order_release);
878  __glibcxx_assert(__b != memory_order_acq_rel);
879 
880  return __atomic_load_n(&_M_p, int(__m));
881  }
882 
883  _GLIBCXX_ALWAYS_INLINE __pointer_type
884  exchange(__pointer_type __p,
885  memory_order __m = memory_order_seq_cst) noexcept
886  {
887  return __atomic_exchange_n(&_M_p, __p, int(__m));
888  }
889 
890 
891  _GLIBCXX_ALWAYS_INLINE __pointer_type
892  exchange(__pointer_type __p,
893  memory_order __m = memory_order_seq_cst) volatile noexcept
894  {
895  return __atomic_exchange_n(&_M_p, __p, int(__m));
896  }
897 
898  _GLIBCXX_ALWAYS_INLINE bool
899  compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2,
900  memory_order __m1,
901  memory_order __m2) noexcept
902  {
903  __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
904 
905  return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 1,
906  int(__m1), int(__m2));
907  }
908 
909  _GLIBCXX_ALWAYS_INLINE bool
910  compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2,
911  memory_order __m1,
912  memory_order __m2) volatile noexcept
913  {
914  __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
915 
916  return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 1,
917  int(__m1), int(__m2));
918  }
919 
920  _GLIBCXX_ALWAYS_INLINE bool
921  compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
922  memory_order __m1,
923  memory_order __m2) noexcept
924  {
925  __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
926 
927  return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0,
928  int(__m1), int(__m2));
929  }
930 
931  _GLIBCXX_ALWAYS_INLINE bool
932  compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
933  memory_order __m1,
934  memory_order __m2) volatile noexcept
935  {
936  __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
937 
938  return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0,
939  int(__m1), int(__m2));
940  }
941 
942 #if __glibcxx_atomic_wait
943  _GLIBCXX_ALWAYS_INLINE void
944  wait(__pointer_type __old,
945  memory_order __m = memory_order_seq_cst) const noexcept
946  {
947  std::__atomic_wait_address_v(&_M_p, __old,
948  [__m, this]
949  { return this->load(__m); });
950  }
951 
952  // TODO add const volatile overload
953 
954  _GLIBCXX_ALWAYS_INLINE void
955  notify_one() const noexcept
956  { std::__atomic_notify_address(&_M_p, false); }
957 
958  // TODO add const volatile overload
959 
960  _GLIBCXX_ALWAYS_INLINE void
961  notify_all() const noexcept
962  { std::__atomic_notify_address(&_M_p, true); }
963 
964  // TODO add const volatile overload
965 #endif // __glibcxx_atomic_wait
966 
967  _GLIBCXX_ALWAYS_INLINE __pointer_type
968  fetch_add(ptrdiff_t __d,
969  memory_order __m = memory_order_seq_cst) noexcept
970  { return __atomic_fetch_add(&_M_p, _S_type_size(__d), int(__m)); }
971 
972  _GLIBCXX_ALWAYS_INLINE __pointer_type
973  fetch_add(ptrdiff_t __d,
974  memory_order __m = memory_order_seq_cst) volatile noexcept
975  { return __atomic_fetch_add(&_M_p, _S_type_size(__d), int(__m)); }
976 
977  _GLIBCXX_ALWAYS_INLINE __pointer_type
978  fetch_sub(ptrdiff_t __d,
979  memory_order __m = memory_order_seq_cst) noexcept
980  { return __atomic_fetch_sub(&_M_p, _S_type_size(__d), int(__m)); }
981 
982  _GLIBCXX_ALWAYS_INLINE __pointer_type
983  fetch_sub(ptrdiff_t __d,
984  memory_order __m = memory_order_seq_cst) volatile noexcept
985  { return __atomic_fetch_sub(&_M_p, _S_type_size(__d), int(__m)); }
986 
987 #if __glibcxx_atomic_min_max
988  _GLIBCXX_ALWAYS_INLINE __pointer_type
989  fetch_min(__pointer_type __p,
990  memory_order __m = memory_order_seq_cst) noexcept
991  { return __atomic_impl::__fetch_min(&_M_p, __p, __m); }
992 
993  _GLIBCXX_ALWAYS_INLINE __pointer_type
994  fetch_min(__pointer_type __p,
995  memory_order __m = memory_order_seq_cst) volatile noexcept
996  { return __atomic_impl::__fetch_min(&_M_p, __p, __m); }
997 
998  _GLIBCXX_ALWAYS_INLINE __pointer_type
999  fetch_max(__pointer_type __p,
1000  memory_order __m = memory_order_seq_cst) noexcept
1001  { return __atomic_impl::__fetch_max(&_M_p, __p, __m); }
1002 
1003  _GLIBCXX_ALWAYS_INLINE __pointer_type
1004  fetch_max(__pointer_type __p,
1005  memory_order __m = memory_order_seq_cst) volatile noexcept
1006  { return __atomic_impl::__fetch_max(&_M_p, __p, __m); }
1007 #endif
1008  };
1009 
1010  namespace __atomic_impl
1011  {
1012  // Implementation details of atomic padding handling
1013 
1014  template<typename _Tp>
1015  constexpr bool
1016  __maybe_has_padding()
1017  {
1018 #if ! __has_builtin(__builtin_clear_padding)
1019  return false;
1020 #elif __has_builtin(__has_unique_object_representations)
1021  return !__has_unique_object_representations(_Tp)
1022  && !is_same<_Tp, float>::value && !is_same<_Tp, double>::value;
1023 #else
1024  return true;
1025 #endif
1026  }
1027 
1028 #pragma GCC diagnostic push
1029 #pragma GCC diagnostic ignored "-Wc++17-extensions"
1030 
1031  template<typename _Tp>
1032  _GLIBCXX_ALWAYS_INLINE _GLIBCXX14_CONSTEXPR _Tp*
1033  __clear_padding(_Tp& __val) noexcept
1034  {
1035  auto* __ptr = std::__addressof(__val);
1036 #if __has_builtin(__builtin_clear_padding)
1037  if constexpr (__atomic_impl::__maybe_has_padding<_Tp>())
1038  __builtin_clear_padding(__ptr);
1039 #endif
1040  return __ptr;
1041  }
1042 
1043  template<bool _AtomicRef = false, typename _Tp>
1044  _GLIBCXX_ALWAYS_INLINE bool
1045  __compare_exchange(_Tp& __val, _Val<_Tp>& __e, _Val<_Tp>& __i,
1046  bool __is_weak,
1047  memory_order __s, memory_order __f) noexcept
1048  {
1049  __glibcxx_assert(__is_valid_cmpexch_failure_order(__f));
1050 
1051  using _Vp = _Val<_Tp>;
1052  _Tp* const __pval = std::__addressof(__val);
1053 
1054  if constexpr (!__atomic_impl::__maybe_has_padding<_Vp>())
1055  {
1056  return __atomic_compare_exchange(__pval, std::__addressof(__e),
1057  std::__addressof(__i), __is_weak,
1058  int(__s), int(__f));
1059  }
1060  else if constexpr (!_AtomicRef) // std::atomic<T>
1061  {
1062  // Clear padding of the value we want to set:
1063  _Vp* const __pi = __atomic_impl::__clear_padding(__i);
1064  // Only allowed to modify __e on failure, so make a copy:
1065  _Vp __exp = __e;
1066  // Clear padding of the expected value:
1067  _Vp* const __pexp = __atomic_impl::__clear_padding(__exp);
1068 
1069  // For std::atomic<T> we know that the contained value will already
1070  // have zeroed padding, so trivial memcmp semantics are OK.
1071  if (__atomic_compare_exchange(__pval, __pexp, __pi,
1072  __is_weak, int(__s), int(__f)))
1073  return true;
1074  // Value bits must be different, copy from __exp back to __e:
1075  __builtin_memcpy(std::__addressof(__e), __pexp, sizeof(_Vp));
1076  return false;
1077  }
1078  else // std::atomic_ref<T> where T has padding bits.
1079  {
1080  // Clear padding of the value we want to set:
1081  _Vp* const __pi = __atomic_impl::__clear_padding(__i);
1082 
1083  // Only allowed to modify __e on failure, so make a copy:
1084  _Vp __exp = __e;
1085  // Optimistically assume that a previous store had zeroed padding
1086  // so that zeroing it in the expected value will match first time.
1087  _Vp* const __pexp = __atomic_impl::__clear_padding(__exp);
1088 
1089  // compare_exchange is specified to compare value representations.
1090  // Need to check whether a failure is 'real' or just due to
1091  // differences in padding bits. This loop should run no more than
1092  // three times, because the worst case scenario is:
1093  // First CAS fails because the actual value has non-zero padding.
1094  // Second CAS fails because another thread stored the same value,
1095  // but now with padding cleared. Third CAS succeeds.
1096  // We will never need to loop a fourth time, because any value
1097  // written by another thread (whether via store, exchange or
1098  // compare_exchange) will have had its padding cleared.
1099  while (true)
1100  {
1101  // Copy of the expected value so we can clear its padding.
1102  _Vp __orig = __exp;
1103 
1104  if (__atomic_compare_exchange(__pval, __pexp, __pi,
1105  __is_weak, int(__s), int(__f)))
1106  return true;
1107 
1108  // Copy of the actual value so we can clear its padding.
1109  _Vp __curr = __exp;
1110 
1111  // Compare value representations (i.e. ignoring padding).
1112  if (__builtin_memcmp(__atomic_impl::__clear_padding(__orig),
1113  __atomic_impl::__clear_padding(__curr),
1114  sizeof(_Vp)))
1115  {
1116  // Value representations compare unequal, real failure.
1117  __builtin_memcpy(std::__addressof(__e), __pexp,
1118  sizeof(_Vp));
1119  return false;
1120  }
1121  }
1122  }
1123  }
1124 #pragma GCC diagnostic pop
1125  } // namespace __atomic_impl
1126 
1127 #if __cplusplus > 201703L
1128  // Implementation details of atomic_ref and atomic<floating-point>.
1129  namespace __atomic_impl
1130  {
1131  // Like _Val<T> above, but for difference_type arguments.
1132  template<typename _Tp>
1133  using _Diff = __conditional_t<is_pointer_v<_Tp>, ptrdiff_t, _Val<_Tp>>;
1134 
1135  template<size_t _Size, size_t _Align>
1136  _GLIBCXX_ALWAYS_INLINE bool
1137  is_lock_free() noexcept
1138  {
1139  // Produce a fake, minimally aligned pointer.
1140  return __atomic_is_lock_free(_Size, reinterpret_cast<void *>(-_Align));
1141  }
1142 
1143  template<typename _Tp>
1144  _GLIBCXX_ALWAYS_INLINE void
1145  store(_Tp* __ptr, _Val<_Tp> __t, memory_order __m) noexcept
1146  {
1147  __atomic_store(__ptr, __atomic_impl::__clear_padding(__t), int(__m));
1148  }
1149 
1150  template<typename _Tp>
1151  _GLIBCXX_ALWAYS_INLINE _Val<_Tp>
1152  load(const _Tp* __ptr, memory_order __m) noexcept
1153  {
1154  alignas(_Tp) unsigned char __buf[sizeof(_Tp)];
1155  auto* __dest = reinterpret_cast<_Val<_Tp>*>(__buf);
1156  __atomic_load(__ptr, __dest, int(__m));
1157  return *__dest;
1158  }
1159 
1160  template<typename _Tp>
1161  _GLIBCXX_ALWAYS_INLINE _Val<_Tp>
1162  exchange(_Tp* __ptr, _Val<_Tp> __desired, memory_order __m) noexcept
1163  {
1164  alignas(_Tp) unsigned char __buf[sizeof(_Tp)];
1165  auto* __dest = reinterpret_cast<_Val<_Tp>*>(__buf);
1166  __atomic_exchange(__ptr, __atomic_impl::__clear_padding(__desired),
1167  __dest, int(__m));
1168  return *__dest;
1169  }
1170 
1171  template<bool _AtomicRef = false, typename _Tp>
1172  _GLIBCXX_ALWAYS_INLINE bool
1173  compare_exchange_weak(_Tp* __ptr, _Val<_Tp>& __expected,
1174  _Val<_Tp> __desired, memory_order __success,
1175  memory_order __failure) noexcept
1176  {
1177  return __atomic_impl::__compare_exchange<_AtomicRef>(
1178  *__ptr, __expected, __desired, true, __success, __failure);
1179  }
1180 
1181  template<bool _AtomicRef = false, typename _Tp>
1182  _GLIBCXX_ALWAYS_INLINE bool
1183  compare_exchange_strong(_Tp* __ptr, _Val<_Tp>& __expected,
1184  _Val<_Tp> __desired, memory_order __success,
1185  memory_order __failure) noexcept
1186  {
1187  return __atomic_impl::__compare_exchange<_AtomicRef>(
1188  *__ptr, __expected, __desired, false, __success, __failure);
1189  }
1190 
1191 #if __glibcxx_atomic_wait
1192  template<typename _Tp>
1193  _GLIBCXX_ALWAYS_INLINE void
1194  wait(const _Tp* __ptr, _Val<_Tp> __old,
1195  memory_order __m = memory_order_seq_cst) noexcept
1196  {
1197  std::__atomic_wait_address_v(__ptr, __old,
1198  [__ptr, __m]() { return __atomic_impl::load(__ptr, __m); });
1199  }
1200 
1201  // TODO add const volatile overload
1202 
1203  template<typename _Tp>
1204  _GLIBCXX_ALWAYS_INLINE void
1205  notify_one(const _Tp* __ptr) noexcept
1206  { std::__atomic_notify_address(__ptr, false); }
1207 
1208  // TODO add const volatile overload
1209 
1210  template<typename _Tp>
1211  _GLIBCXX_ALWAYS_INLINE void
1212  notify_all(const _Tp* __ptr) noexcept
1213  { std::__atomic_notify_address(__ptr, true); }
1214 
1215  // TODO add const volatile overload
1216 #endif // __glibcxx_atomic_wait
1217 
1218  template<typename _Tp>
1219  _GLIBCXX_ALWAYS_INLINE _Tp
1220  fetch_add(_Tp* __ptr, _Diff<_Tp> __i, memory_order __m) noexcept
1221  { return __atomic_fetch_add(__ptr, __i, int(__m)); }
1222 
1223  template<typename _Tp>
1224  _GLIBCXX_ALWAYS_INLINE _Tp
1225  fetch_sub(_Tp* __ptr, _Diff<_Tp> __i, memory_order __m) noexcept
1226  { return __atomic_fetch_sub(__ptr, __i, int(__m)); }
1227 
1228  template<typename _Tp>
1229  _GLIBCXX_ALWAYS_INLINE _Tp
1230  fetch_and(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
1231  { return __atomic_fetch_and(__ptr, __i, int(__m)); }
1232 
1233  template<typename _Tp>
1234  _GLIBCXX_ALWAYS_INLINE _Tp
1235  fetch_or(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
1236  { return __atomic_fetch_or(__ptr, __i, int(__m)); }
1237 
1238  template<typename _Tp>
1239  _GLIBCXX_ALWAYS_INLINE _Tp
1240  fetch_xor(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
1241  { return __atomic_fetch_xor(__ptr, __i, int(__m)); }
1242 
1243  template<typename _Tp>
1244  _GLIBCXX_ALWAYS_INLINE _Tp
1245  __add_fetch(_Tp* __ptr, _Diff<_Tp> __i) noexcept
1246  { return __atomic_add_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1247 
1248  template<typename _Tp>
1249  _GLIBCXX_ALWAYS_INLINE _Tp
1250  __sub_fetch(_Tp* __ptr, _Diff<_Tp> __i) noexcept
1251  { return __atomic_sub_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1252 
1253  template<typename _Tp>
1254  _GLIBCXX_ALWAYS_INLINE _Tp
1255  __and_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept
1256  { return __atomic_and_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1257 
1258  template<typename _Tp>
1259  _GLIBCXX_ALWAYS_INLINE _Tp
1260  __or_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept
1261  { return __atomic_or_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1262 
1263  template<typename _Tp>
1264  _GLIBCXX_ALWAYS_INLINE _Tp
1265  __xor_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept
1266  { return __atomic_xor_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1267 
1268  template<typename _Tp>
1269  concept __atomic_fetch_addable
1270  = requires (_Tp __t) { __atomic_fetch_add(&__t, __t, 0); };
1271 
1272  template<typename _Tp>
1273  _Tp
1274  __fetch_add_flt(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
1275  {
1276  if constexpr (__atomic_fetch_addable<_Tp>)
1277  return __atomic_fetch_add(__ptr, __i, int(__m));
1278  else
1279  {
1280  _Val<_Tp> __oldval = load (__ptr, memory_order_relaxed);
1281  _Val<_Tp> __newval = __oldval + __i;
1282  while (!compare_exchange_weak (__ptr, __oldval, __newval, __m,
1283  memory_order_relaxed))
1284  __newval = __oldval + __i;
1285  return __oldval;
1286  }
1287  }
1288 
1289  template<typename _Tp>
1290  concept __atomic_fetch_subtractable
1291  = requires (_Tp __t) { __atomic_fetch_sub(&__t, __t, 0); };
1292 
1293  template<typename _Tp>
1294  _Tp
1295  __fetch_sub_flt(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
1296  {
1297  if constexpr (__atomic_fetch_subtractable<_Tp>)
1298  return __atomic_fetch_sub(__ptr, __i, int(__m));
1299  else
1300  {
1301  _Val<_Tp> __oldval = load (__ptr, memory_order_relaxed);
1302  _Val<_Tp> __newval = __oldval - __i;
1303  while (!compare_exchange_weak (__ptr, __oldval, __newval, __m,
1304  memory_order_relaxed))
1305  __newval = __oldval - __i;
1306  return __oldval;
1307  }
1308  }
1309 
1310  template<typename _Tp>
1311  concept __atomic_add_fetchable
1312  = requires (_Tp __t) { __atomic_add_fetch(&__t, __t, 0); };
1313 
1314  template<typename _Tp>
1315  _Tp
1316  __add_fetch_flt(_Tp* __ptr, _Val<_Tp> __i) noexcept
1317  {
1318  if constexpr (__atomic_add_fetchable<_Tp>)
1319  return __atomic_add_fetch(__ptr, __i, __ATOMIC_SEQ_CST);
1320  else
1321  {
1322  _Val<_Tp> __oldval = load (__ptr, memory_order_relaxed);
1323  _Val<_Tp> __newval = __oldval + __i;
1324  while (!compare_exchange_weak (__ptr, __oldval, __newval,
1325  memory_order_seq_cst,
1326  memory_order_relaxed))
1327  __newval = __oldval + __i;
1328  return __newval;
1329  }
1330  }
1331 
1332  template<typename _Tp>
1333  concept __atomic_sub_fetchable
1334  = requires (_Tp __t) { __atomic_sub_fetch(&__t, __t, 0); };
1335 
1336  template<typename _Tp>
1337  _Tp
1338  __sub_fetch_flt(_Tp* __ptr, _Val<_Tp> __i) noexcept
1339  {
1340  if constexpr (__atomic_sub_fetchable<_Tp>)
1341  return __atomic_sub_fetch(__ptr, __i, __ATOMIC_SEQ_CST);
1342  else
1343  {
1344  _Val<_Tp> __oldval = load (__ptr, memory_order_relaxed);
1345  _Val<_Tp> __newval = __oldval - __i;
1346  while (!compare_exchange_weak (__ptr, __oldval, __newval,
1347  memory_order_seq_cst,
1348  memory_order_relaxed))
1349  __newval = __oldval - __i;
1350  return __newval;
1351  }
1352  }
1353 
1354 #if __glibcxx_atomic_min_max
1355  template<typename _Tp>
1356  concept __atomic_fetch_minmaxable
1357  = requires (_Tp __t) {
1358  __atomic_fetch_min(&__t, __t, 0);
1359  __atomic_fetch_max(&__t, __t, 0);
1360  };
1361 
1362  template<typename _Tp>
1363  _Tp
1364  __fetch_min(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
1365  {
1366  if constexpr (__atomic_fetch_minmaxable<_Tp>)
1367  return __atomic_fetch_min(__ptr, __i, int(__m));
1368  else
1369  {
1370  _Val<_Tp> __oldval = load (__ptr, memory_order_relaxed);
1371  _Val<_Tp> __newval = __oldval < __i ? __oldval : __i;
1372  while (!compare_exchange_weak (__ptr, __oldval, __newval, __m,
1373  memory_order_relaxed))
1374  __newval = __oldval < __i ? __oldval : __i;
1375  return __oldval;
1376  }
1377  }
1378 
1379  template<typename _Tp>
1380  _Tp
1381  __fetch_max(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
1382  {
1383  if constexpr (__atomic_fetch_minmaxable<_Tp>)
1384  return __atomic_fetch_max(__ptr, __i, int(__m));
1385  else
1386  {
1387  _Val<_Tp> __oldval = load (__ptr, memory_order_relaxed);
1388  _Val<_Tp> __newval = __oldval > __i ? __oldval : __i;
1389  while (!compare_exchange_weak (__ptr, __oldval, __newval, __m,
1390  memory_order_relaxed))
1391  __newval = __oldval > __i ? __oldval : __i;
1392  return __oldval;
1393  }
1394  }
1395 #endif
1396  } // namespace __atomic_impl
1397 
1398  // base class for atomic<floating-point-type>
1399  template<typename _Fp>
1400  struct __atomic_float
1401  {
1402  static_assert(is_floating_point_v<_Fp>);
1403 
1404  static constexpr size_t _S_alignment = __alignof__(_Fp);
1405 
1406  public:
1407  using value_type = _Fp;
1408  using difference_type = value_type;
1409 
1410  static constexpr bool is_always_lock_free
1411  = __atomic_always_lock_free(sizeof(_Fp), 0);
1412 
1413  __atomic_float() = default;
1414 
1415  constexpr
1416  __atomic_float(_Fp __t) : _M_fp(__t)
1417  {
1418  if (!std::__is_constant_evaluated())
1419  __atomic_impl::__clear_padding(_M_fp);
1420  }
1421 
1422  __atomic_float(const __atomic_float&) = delete;
1423  __atomic_float& operator=(const __atomic_float&) = delete;
1424  __atomic_float& operator=(const __atomic_float&) volatile = delete;
1425 
1426  _Fp
1427  operator=(_Fp __t) volatile noexcept
1428  {
1429  this->store(__t);
1430  return __t;
1431  }
1432 
1433  _Fp
1434  operator=(_Fp __t) noexcept
1435  {
1436  this->store(__t);
1437  return __t;
1438  }
1439 
1440  bool
1441  is_lock_free() const volatile noexcept
1442  { return __atomic_impl::is_lock_free<sizeof(_Fp), _S_alignment>(); }
1443 
1444  bool
1445  is_lock_free() const noexcept
1446  { return __atomic_impl::is_lock_free<sizeof(_Fp), _S_alignment>(); }
1447 
1448  void
1449  store(_Fp __t, memory_order __m = memory_order_seq_cst) volatile noexcept
1450  { __atomic_impl::store(&_M_fp, __t, __m); }
1451 
1452  void
1453  store(_Fp __t, memory_order __m = memory_order_seq_cst) noexcept
1454  { __atomic_impl::store(&_M_fp, __t, __m); }
1455 
1456  _Fp
1457  load(memory_order __m = memory_order_seq_cst) const volatile noexcept
1458  { return __atomic_impl::load(&_M_fp, __m); }
1459 
1460  _Fp
1461  load(memory_order __m = memory_order_seq_cst) const noexcept
1462  { return __atomic_impl::load(&_M_fp, __m); }
1463 
1464  operator _Fp() const volatile noexcept { return this->load(); }
1465  operator _Fp() const noexcept { return this->load(); }
1466 
1467  _Fp
1468  exchange(_Fp __desired,
1469  memory_order __m = memory_order_seq_cst) volatile noexcept
1470  { return __atomic_impl::exchange(&_M_fp, __desired, __m); }
1471 
1472  _Fp
1473  exchange(_Fp __desired,
1474  memory_order __m = memory_order_seq_cst) noexcept
1475  { return __atomic_impl::exchange(&_M_fp, __desired, __m); }
1476 
1477  bool
1478  compare_exchange_weak(_Fp& __expected, _Fp __desired,
1479  memory_order __success,
1480  memory_order __failure) noexcept
1481  {
1482  return __atomic_impl::compare_exchange_weak(&_M_fp,
1483  __expected, __desired,
1484  __success, __failure);
1485  }
1486 
1487  bool
1488  compare_exchange_weak(_Fp& __expected, _Fp __desired,
1489  memory_order __success,
1490  memory_order __failure) volatile noexcept
1491  {
1492  return __atomic_impl::compare_exchange_weak(&_M_fp,
1493  __expected, __desired,
1494  __success, __failure);
1495  }
1496 
1497  bool
1498  compare_exchange_strong(_Fp& __expected, _Fp __desired,
1499  memory_order __success,
1500  memory_order __failure) noexcept
1501  {
1502  return __atomic_impl::compare_exchange_strong(&_M_fp,
1503  __expected, __desired,
1504  __success, __failure);
1505  }
1506 
1507  bool
1508  compare_exchange_strong(_Fp& __expected, _Fp __desired,
1509  memory_order __success,
1510  memory_order __failure) volatile noexcept
1511  {
1512  return __atomic_impl::compare_exchange_strong(&_M_fp,
1513  __expected, __desired,
1514  __success, __failure);
1515  }
1516 
1517  bool
1518  compare_exchange_weak(_Fp& __expected, _Fp __desired,
1519  memory_order __order = memory_order_seq_cst)
1520  noexcept
1521  {
1522  return compare_exchange_weak(__expected, __desired, __order,
1523  __cmpexch_failure_order(__order));
1524  }
1525 
1526  bool
1527  compare_exchange_weak(_Fp& __expected, _Fp __desired,
1528  memory_order __order = memory_order_seq_cst)
1529  volatile noexcept
1530  {
1531  return compare_exchange_weak(__expected, __desired, __order,
1532  __cmpexch_failure_order(__order));
1533  }
1534 
1535  bool
1536  compare_exchange_strong(_Fp& __expected, _Fp __desired,
1537  memory_order __order = memory_order_seq_cst)
1538  noexcept
1539  {
1540  return compare_exchange_strong(__expected, __desired, __order,
1541  __cmpexch_failure_order(__order));
1542  }
1543 
1544  bool
1545  compare_exchange_strong(_Fp& __expected, _Fp __desired,
1546  memory_order __order = memory_order_seq_cst)
1547  volatile noexcept
1548  {
1549  return compare_exchange_strong(__expected, __desired, __order,
1550  __cmpexch_failure_order(__order));
1551  }
1552 
1553 #if __glibcxx_atomic_wait
1554  _GLIBCXX_ALWAYS_INLINE void
1555  wait(_Fp __old, memory_order __m = memory_order_seq_cst) const noexcept
1556  { __atomic_impl::wait(&_M_fp, __old, __m); }
1557 
1558  // TODO add const volatile overload
1559 
1560  _GLIBCXX_ALWAYS_INLINE void
1561  notify_one() const noexcept
1562  { __atomic_impl::notify_one(&_M_fp); }
1563 
1564  // TODO add const volatile overload
1565 
1566  _GLIBCXX_ALWAYS_INLINE void
1567  notify_all() const noexcept
1568  { __atomic_impl::notify_all(&_M_fp); }
1569 
1570  // TODO add const volatile overload
1571 #endif // __glibcxx_atomic_wait
1572 
1573  value_type
1574  fetch_add(value_type __i,
1575  memory_order __m = memory_order_seq_cst) noexcept
1576  { return __atomic_impl::__fetch_add_flt(&_M_fp, __i, __m); }
1577 
1578  value_type
1579  fetch_add(value_type __i,
1580  memory_order __m = memory_order_seq_cst) volatile noexcept
1581  { return __atomic_impl::__fetch_add_flt(&_M_fp, __i, __m); }
1582 
1583  value_type
1584  fetch_sub(value_type __i,
1585  memory_order __m = memory_order_seq_cst) noexcept
1586  { return __atomic_impl::__fetch_sub_flt(&_M_fp, __i, __m); }
1587 
1588  value_type
1589  fetch_sub(value_type __i,
1590  memory_order __m = memory_order_seq_cst) volatile noexcept
1591  { return __atomic_impl::__fetch_sub_flt(&_M_fp, __i, __m); }
1592 
1593 #if __glibcxx_atomic_min_max
1594  value_type
1595  fetch_min(value_type __i,
1596  memory_order __m = memory_order_seq_cst) noexcept
1597  { return __atomic_impl::__fetch_min(&_M_fp, __i, __m); }
1598 
1599  value_type
1600  fetch_min(value_type __i,
1601  memory_order __m = memory_order_seq_cst) volatile noexcept
1602  { return __atomic_impl::__fetch_min(&_M_fp, __i, __m); }
1603 
1604  value_type
1605  fetch_max(value_type __i,
1606  memory_order __m = memory_order_seq_cst) noexcept
1607  { return __atomic_impl::__fetch_max(&_M_fp, __i, __m); }
1608 
1609  value_type
1610  fetch_max(value_type __i,
1611  memory_order __m = memory_order_seq_cst) volatile noexcept
1612  { return __atomic_impl::__fetch_max(&_M_fp, __i, __m); }
1613 #endif
1614 
1615  value_type
1616  operator+=(value_type __i) noexcept
1617  { return __atomic_impl::__add_fetch_flt(&_M_fp, __i); }
1618 
1619  value_type
1620  operator+=(value_type __i) volatile noexcept
1621  { return __atomic_impl::__add_fetch_flt(&_M_fp, __i); }
1622 
1623  value_type
1624  operator-=(value_type __i) noexcept
1625  { return __atomic_impl::__sub_fetch_flt(&_M_fp, __i); }
1626 
1627  value_type
1628  operator-=(value_type __i) volatile noexcept
1629  { return __atomic_impl::__sub_fetch_flt(&_M_fp, __i); }
1630 
1631  private:
1632  alignas(_S_alignment) _Fp _M_fp _GLIBCXX20_INIT(0);
1633  };
1634 #undef _GLIBCXX20_INIT
1635 
1636  // __atomic_ref_base<const _Tp> provides the common APIs for const and
1637  // non-const types,
1638  // __atomic_ref_base<_Tp> inherits from __atomic_ref_base<const _Tp>,
1639  // and provides the common APIs implementing constraints in [atomic.ref].
1640  // __atomic_ref<_Tp> inherits from __atomic_ref_base<_Tp> (const or non-const)
1641  // adds type-specific mutating APIs.
1642  // atomic_ref inherits from __atomic_ref;
1643 
1644  template<typename _Tp>
1645  struct __atomic_ref_base;
1646 
1647  template<typename _Tp>
1648  struct __atomic_ref_base<const _Tp>
1649  {
1650  private:
1651  using _Vt = remove_cv_t<_Tp>;
1652 
1653  static consteval bool
1654  _S_is_always_lock_free()
1655  {
1656  if constexpr (is_pointer_v<_Vt>)
1657  return ATOMIC_POINTER_LOCK_FREE == 2;
1658  else
1659  return __atomic_always_lock_free(sizeof(_Vt), 0);
1660  }
1661 
1662  static consteval int
1663  _S_required_alignment()
1664  {
1665  if constexpr (is_floating_point_v<_Vt> || is_pointer_v<_Vt>)
1666  return __alignof__(_Vt);
1667  else if constexpr ((sizeof(_Vt) & (sizeof(_Vt) - 1)) || sizeof(_Vt) > 16)
1668  return alignof(_Vt);
1669  else
1670  // 1/2/4/8/16-byte types, including integral types,
1671  // must be aligned to at least their size.
1672  return (sizeof(_Vt) > alignof(_Vt)) ? sizeof(_Vt) : alignof(_Vt);
1673  }
1674 
1675  public:
1676  using value_type = _Vt;
1677  static_assert(is_trivially_copyable_v<value_type>);
1678 
1679  static constexpr bool is_always_lock_free = _S_is_always_lock_free();
1680  static_assert(is_always_lock_free || !is_volatile_v<_Tp>,
1681  "atomic operations on volatile T must be lock-free");
1682 
1683  static constexpr size_t required_alignment = _S_required_alignment();
1684 
1685  __atomic_ref_base() = delete;
1686  __atomic_ref_base& operator=(const __atomic_ref_base&) = delete;
1687 
1688  explicit
1689  __atomic_ref_base(const _Tp* __ptr) noexcept
1690  : _M_ptr(const_cast<_Tp*>(__ptr))
1691  { }
1692 
1693  __atomic_ref_base(const __atomic_ref_base&) noexcept = default;
1694 
1695  operator value_type() const noexcept { return this->load(); }
1696 
1697  bool
1698  is_lock_free() const noexcept
1699  { return __atomic_impl::is_lock_free<sizeof(_Tp), required_alignment>(); }
1700 
1701  value_type
1702  load(memory_order __m = memory_order_seq_cst) const noexcept
1703  { return __atomic_impl::load(_M_ptr, __m); }
1704 
1705 #if __glibcxx_atomic_wait
1706  _GLIBCXX_ALWAYS_INLINE void
1707  wait(value_type __old, memory_order __m = memory_order_seq_cst) const noexcept
1708  {
1709  // TODO remove when volatile is supported
1710  static_assert(!is_volatile_v<_Tp>,
1711  "atomic waits on volatile are not supported");
1712  __atomic_impl::wait(_M_ptr, __old, __m);
1713  }
1714 #endif // __glibcxx_atomic_wait
1715 
1716 #if __glibcxx_atomic_ref >= 202411L
1717  _GLIBCXX_ALWAYS_INLINE constexpr const _Tp*
1718  address() const noexcept
1719  { return _M_ptr; }
1720 #endif // __glibcxx_atomic_ref >= 202411L
1721 
1722  protected:
1723  _Tp* _M_ptr;
1724  };
1725 
1726  template<typename _Tp>
1727  struct __atomic_ref_base
1728  : __atomic_ref_base<const _Tp>
1729  {
1730  using value_type = typename __atomic_ref_base<const _Tp>::value_type;
1731 
1732  explicit
1733  __atomic_ref_base(_Tp* __ptr) noexcept
1734  : __atomic_ref_base<const _Tp>(__ptr)
1735  { }
1736 
1737  value_type
1738  operator=(value_type __t) const noexcept
1739  {
1740  this->store(__t);
1741  return __t;
1742  }
1743 
1744  void
1745  store(value_type __t, memory_order __m = memory_order_seq_cst) const noexcept
1746  { __atomic_impl::store(this->_M_ptr, __t, __m); }
1747 
1748  value_type
1749  exchange(value_type __desired, memory_order __m = memory_order_seq_cst)
1750  const noexcept
1751  { return __atomic_impl::exchange(this->_M_ptr, __desired, __m); }
1752 
1753  bool
1754  compare_exchange_weak(value_type& __expected, value_type __desired,
1755  memory_order __success,
1756  memory_order __failure) const noexcept
1757  {
1758  return __atomic_impl::compare_exchange_weak<true>(
1759  this->_M_ptr, __expected, __desired, __success, __failure);
1760  }
1761 
1762  bool
1763  compare_exchange_strong(value_type& __expected, value_type __desired,
1764  memory_order __success,
1765  memory_order __failure) const noexcept
1766  {
1767  return __atomic_impl::compare_exchange_strong<true>(
1768  this->_M_ptr, __expected, __desired, __success, __failure);
1769  }
1770 
1771  bool
1772  compare_exchange_weak(value_type& __expected, value_type __desired,
1773  memory_order __order = memory_order_seq_cst)
1774  const noexcept
1775  {
1776  return compare_exchange_weak(__expected, __desired, __order,
1777  __cmpexch_failure_order(__order));
1778  }
1779 
1780  bool
1781  compare_exchange_strong(value_type& __expected, value_type __desired,
1782  memory_order __order = memory_order_seq_cst)
1783  const noexcept
1784  {
1785  return compare_exchange_strong(__expected, __desired, __order,
1786  __cmpexch_failure_order(__order));
1787  }
1788 
1789 #if __glibcxx_atomic_wait
1790  _GLIBCXX_ALWAYS_INLINE void
1791  notify_one() const noexcept
1792  {
1793  // TODO remove when volatile is supported
1794  static_assert(!is_volatile_v<_Tp>,
1795  "atomic waits on volatile are not supported");
1796  __atomic_impl::notify_one(this->_M_ptr);
1797  }
1798 
1799  _GLIBCXX_ALWAYS_INLINE void
1800  notify_all() const noexcept
1801  {
1802  // TODO remove when volatile is supported
1803  static_assert(!is_volatile_v<_Tp>,
1804  "atomic waits on volatile are not supported");
1805  __atomic_impl::notify_all(this->_M_ptr);
1806  }
1807 #endif // __glibcxx_atomic_wait
1808 
1809 #if __glibcxx_atomic_ref >= 202411L
1810  _GLIBCXX_ALWAYS_INLINE constexpr _Tp*
1811  address() const noexcept
1812  { return this->_M_ptr; }
1813 #endif // __glibcxx_atomic_ref >= 202411L
1814  };
1815 
1816  template<typename _Tp,
1817  bool = is_integral_v<_Tp> && !is_same_v<remove_cv_t<_Tp>, bool>,
1818  bool = is_floating_point_v<_Tp>,
1819  bool = is_pointer_v<_Tp>>
1820  struct __atomic_ref;
1821 
1822  // base class for non-integral, non-floating-point, non-pointer types
1823  template<typename _Tp>
1824  struct __atomic_ref<_Tp, false, false, false>
1825  : __atomic_ref_base<_Tp>
1826  {
1827  using __atomic_ref_base<_Tp>::__atomic_ref_base;
1828  using __atomic_ref_base<_Tp>::operator=;
1829  };
1830 
1831  template<typename _Tp>
1832  struct __atomic_ref<const _Tp, false, false, false>
1833  : __atomic_ref_base<const _Tp>
1834  {
1835  using __atomic_ref_base<const _Tp>::__atomic_ref_base;
1836  };
1837 
1838  // base class for atomic_ref<integral-type>
1839  template<typename _Tp>
1840  struct __atomic_ref<_Tp, true, false, false>
1841  : __atomic_ref_base<_Tp>
1842  {
1843  using value_type = typename __atomic_ref_base<_Tp>::value_type;
1844  using difference_type = value_type;
1845 
1846  using __atomic_ref_base<_Tp>::__atomic_ref_base;
1847  using __atomic_ref_base<_Tp>::operator=;
1848 
1849  value_type
1850  fetch_add(value_type __i,
1851  memory_order __m = memory_order_seq_cst) const noexcept
1852  { return __atomic_impl::fetch_add(this->_M_ptr, __i, __m); }
1853 
1854  value_type
1855  fetch_sub(value_type __i,
1856  memory_order __m = memory_order_seq_cst) const noexcept
1857  { return __atomic_impl::fetch_sub(this->_M_ptr, __i, __m); }
1858 
1859  value_type
1860  fetch_and(value_type __i,
1861  memory_order __m = memory_order_seq_cst) const noexcept
1862  { return __atomic_impl::fetch_and(this->_M_ptr, __i, __m); }
1863 
1864  value_type
1865  fetch_or(value_type __i,
1866  memory_order __m = memory_order_seq_cst) const noexcept
1867  { return __atomic_impl::fetch_or(this->_M_ptr, __i, __m); }
1868 
1869  value_type
1870  fetch_xor(value_type __i,
1871  memory_order __m = memory_order_seq_cst) const noexcept
1872  { return __atomic_impl::fetch_xor(this->_M_ptr, __i, __m); }
1873 
1874 #if __glibcxx_atomic_min_max
1875  value_type
1876  fetch_min(value_type __i,
1877  memory_order __m = memory_order_seq_cst) const noexcept
1878  { return __atomic_impl::__fetch_min(this->_M_ptr, __i, __m); }
1879 
1880  value_type
1881  fetch_max(value_type __i,
1882  memory_order __m = memory_order_seq_cst) const noexcept
1883  { return __atomic_impl::__fetch_max(this->_M_ptr, __i, __m); }
1884 #endif
1885 
1886  _GLIBCXX_ALWAYS_INLINE value_type
1887  operator++(int) const noexcept
1888  { return fetch_add(1); }
1889 
1890  _GLIBCXX_ALWAYS_INLINE value_type
1891  operator--(int) const noexcept
1892  { return fetch_sub(1); }
1893 
1894  value_type
1895  operator++() const noexcept
1896  { return __atomic_impl::__add_fetch(this->_M_ptr, value_type(1)); }
1897 
1898  value_type
1899  operator--() const noexcept
1900  { return __atomic_impl::__sub_fetch(this->_M_ptr, value_type(1)); }
1901 
1902  value_type
1903  operator+=(value_type __i) const noexcept
1904  { return __atomic_impl::__add_fetch(this->_M_ptr, __i); }
1905 
1906  value_type
1907  operator-=(value_type __i) const noexcept
1908  { return __atomic_impl::__sub_fetch(this->_M_ptr, __i); }
1909 
1910  value_type
1911  operator&=(value_type __i) const noexcept
1912  { return __atomic_impl::__and_fetch(this->_M_ptr, __i); }
1913 
1914  value_type
1915  operator|=(value_type __i) const noexcept
1916  { return __atomic_impl::__or_fetch(this->_M_ptr, __i); }
1917 
1918  value_type
1919  operator^=(value_type __i) const noexcept
1920  { return __atomic_impl::__xor_fetch(this->_M_ptr, __i); }
1921  };
1922 
1923  template<typename _Tp>
1924  struct __atomic_ref<const _Tp, true, false, false>
1925  : __atomic_ref_base<const _Tp>
1926  {
1927  using difference_type = typename __atomic_ref_base<const _Tp>::value_type;
1928  using __atomic_ref_base<const _Tp>::__atomic_ref_base;
1929  };
1930 
1931  // base class for atomic_ref<floating-point-type>
1932  template<typename _Fp>
1933  struct __atomic_ref<_Fp, false, true, false>
1934  : __atomic_ref_base<_Fp>
1935  {
1936  using value_type = typename __atomic_ref_base<_Fp>::value_type;
1937  using difference_type = value_type;
1938 
1939  using __atomic_ref_base<_Fp>::__atomic_ref_base;
1940  using __atomic_ref_base<_Fp>::operator=;
1941 
1942  value_type
1943  fetch_add(value_type __i,
1944  memory_order __m = memory_order_seq_cst) const noexcept
1945  { return __atomic_impl::__fetch_add_flt(this->_M_ptr, __i, __m); }
1946 
1947  value_type
1948  fetch_sub(value_type __i,
1949  memory_order __m = memory_order_seq_cst) const noexcept
1950  { return __atomic_impl::__fetch_sub_flt(this->_M_ptr, __i, __m); }
1951 
1952 #if __glibcxx_atomic_min_max
1953  value_type
1954  fetch_min(value_type __i,
1955  memory_order __m = memory_order_seq_cst) const noexcept
1956  { return __atomic_impl::__fetch_min(this->_M_ptr, __i, __m); }
1957 
1958  value_type
1959  fetch_max(value_type __i,
1960  memory_order __m = memory_order_seq_cst) const noexcept
1961  { return __atomic_impl::__fetch_max(this->_M_ptr, __i, __m); }
1962 #endif
1963 
1964  value_type
1965  operator+=(value_type __i) const noexcept
1966  { return __atomic_impl::__add_fetch_flt(this->_M_ptr, __i); }
1967 
1968  value_type
1969  operator-=(value_type __i) const noexcept
1970  { return __atomic_impl::__sub_fetch_flt(this->_M_ptr, __i); }
1971  };
1972 
1973  template<typename _Fp>
1974  struct __atomic_ref<const _Fp, false, true, false>
1975  : __atomic_ref_base<const _Fp>
1976  {
1977  using difference_type = typename __atomic_ref_base<const _Fp>::value_type;
1978  using __atomic_ref_base<const _Fp>::__atomic_ref_base;
1979  };
1980 
1981  // base class for atomic_ref<pointer-type>
1982  template<typename _Pt>
1983  struct __atomic_ref<_Pt, false, false, true>
1984  : __atomic_ref_base<_Pt>
1985  {
1986  using value_type = typename __atomic_ref_base<_Pt>::value_type;
1987  using difference_type = ptrdiff_t;
1988 
1989  using __atomic_ref_base<_Pt>::__atomic_ref_base;
1990  using __atomic_ref_base<_Pt>::operator=;
1991  _GLIBCXX_ALWAYS_INLINE value_type
1992  fetch_add(difference_type __d,
1993  memory_order __m = memory_order_seq_cst) const noexcept
1994  { return __atomic_impl::fetch_add(this->_M_ptr, _S_type_size(__d), __m); }
1995 
1996  _GLIBCXX_ALWAYS_INLINE value_type
1997  fetch_sub(difference_type __d,
1998  memory_order __m = memory_order_seq_cst) const noexcept
1999  { return __atomic_impl::fetch_sub(this->_M_ptr, _S_type_size(__d), __m); }
2000 
2001 #if __glibcxx_atomic_min_max
2002  _GLIBCXX_ALWAYS_INLINE value_type
2003  fetch_min(value_type __i,
2004  memory_order __m = memory_order_seq_cst) const noexcept
2005  { return __atomic_impl::__fetch_min(this->_M_ptr, __i, __m); }
2006 
2007  _GLIBCXX_ALWAYS_INLINE value_type
2008  fetch_max(value_type __i,
2009  memory_order __m = memory_order_seq_cst) const noexcept
2010  { return __atomic_impl::__fetch_max(this->_M_ptr, __i, __m); }
2011 #endif
2012 
2013  value_type
2014  operator++(int) const noexcept
2015  { return fetch_add(1); }
2016 
2017  value_type
2018  operator--(int) const noexcept
2019  { return fetch_sub(1); }
2020 
2021  value_type
2022  operator++() const noexcept
2023  {
2024  return __atomic_impl::__add_fetch(this->_M_ptr, _S_type_size(1));
2025  }
2026 
2027  value_type
2028  operator--() const noexcept
2029  {
2030  return __atomic_impl::__sub_fetch(this->_M_ptr, _S_type_size(1));
2031  }
2032 
2033  value_type
2034  operator+=(difference_type __d) const noexcept
2035  {
2036  return __atomic_impl::__add_fetch(this->_M_ptr, _S_type_size(__d));
2037  }
2038 
2039  value_type
2040  operator-=(difference_type __d) const noexcept
2041  {
2042  return __atomic_impl::__sub_fetch(this->_M_ptr, _S_type_size(__d));
2043  }
2044 
2045  private:
2046  static constexpr ptrdiff_t
2047  _S_type_size(ptrdiff_t __d) noexcept
2048  {
2049  using _Et = remove_pointer_t<value_type>;
2050  static_assert(is_object_v<_Et>);
2051  return __d * sizeof(_Et);
2052  }
2053  };
2054 
2055  template<typename _Pt>
2056  struct __atomic_ref<const _Pt, false, false, true>
2057  : __atomic_ref_base<const _Pt>
2058  {
2059  using difference_type = ptrdiff_t;
2060  using __atomic_ref_base<const _Pt>::__atomic_ref_base;
2061  };
2062 #endif // C++2a
2063 
2064  /// @endcond
2065 
2066  /// @} group atomics
2067 
2068 _GLIBCXX_END_NAMESPACE_VERSION
2069 } // namespace std
2070 
2071 #endif
atomic_lockfree_defines.h
std::kill_dependency
_Tp kill_dependency(_Tp __y) noexcept
kill_dependency
Definition: atomic_base.h:156
atomic_wait.h
move.h
std::atomic_flag
atomic_flag
Definition: atomic_base.h:211
std::operator|
constexpr bitset< _Nb > operator|(const bitset< _Nb > &__x, const bitset< _Nb > &__y) noexcept
Global bitwise operations on bitsets.
Definition: bitset:1628
std::lock
void lock(_L1 &__l1, _L2 &__l2, _L3 &... __l3)
Generic lock.
Definition: mutex:686
std
ISO C++ entities toplevel namespace is std.
std::operator&
constexpr bitset< _Nb > operator&(const bitset< _Nb > &__x, const bitset< _Nb > &__y) noexcept
Global bitwise operations on bitsets.
Definition: bitset:1618
c++config.h
std::__addressof
constexpr _Tp * __addressof(_Tp &__r) noexcept
Same as C++11 std::addressof.
Definition: move.h:52
std::memory_order
memory_order
Enumeration for memory_order.
Definition: atomic_base.h:65
new
version.h