libstdc++
shared_ptr_atomic.h
Go to the documentation of this file.
1 // shared_ptr atomic access -*- C++ -*-
2 
3 // Copyright (C) 2014-2026 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
9 // any later version.
10 
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
15 
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
19 
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
24 
25 /** @file bits/shared_ptr_atomic.h
26  * This is an internal header file, included by other library headers.
27  * Do not attempt to use it directly. @headername{memory}
28  */
29 
30 #ifndef _SHARED_PTR_ATOMIC_H
31 #define _SHARED_PTR_ATOMIC_H 1
32 
33 #include <bits/atomic_base.h>
34 #include <bits/shared_ptr.h>
35 
36 // Annotations for the custom locking in atomic<shared_ptr<T>>.
37 #if defined _GLIBCXX_TSAN && __has_include(<sanitizer/tsan_interface.h>)
38 #include <sanitizer/tsan_interface.h>
39 #define _GLIBCXX_TSAN_MUTEX_DESTROY(X) \
40  __tsan_mutex_destroy(X, __tsan_mutex_not_static)
41 #define _GLIBCXX_TSAN_MUTEX_TRY_LOCK(X) \
42  __tsan_mutex_pre_lock(X, __tsan_mutex_not_static|__tsan_mutex_try_lock)
43 #define _GLIBCXX_TSAN_MUTEX_TRY_LOCK_FAILED(X) __tsan_mutex_post_lock(X, \
44  __tsan_mutex_not_static|__tsan_mutex_try_lock_failed, 0)
45 #define _GLIBCXX_TSAN_MUTEX_LOCKED(X) \
46  __tsan_mutex_post_lock(X, __tsan_mutex_not_static, 0)
47 #define _GLIBCXX_TSAN_MUTEX_PRE_UNLOCK(X) __tsan_mutex_pre_unlock(X, 0)
48 #define _GLIBCXX_TSAN_MUTEX_POST_UNLOCK(X) __tsan_mutex_post_unlock(X, 0)
49 #define _GLIBCXX_TSAN_MUTEX_PRE_SIGNAL(X) __tsan_mutex_pre_signal(X, 0)
50 #define _GLIBCXX_TSAN_MUTEX_POST_SIGNAL(X) __tsan_mutex_post_signal(X, 0)
51 #else
52 #define _GLIBCXX_TSAN_MUTEX_DESTROY(X)
53 #define _GLIBCXX_TSAN_MUTEX_TRY_LOCK(X)
54 #define _GLIBCXX_TSAN_MUTEX_TRY_LOCK_FAILED(X)
55 #define _GLIBCXX_TSAN_MUTEX_LOCKED(X)
56 #define _GLIBCXX_TSAN_MUTEX_PRE_UNLOCK(X)
57 #define _GLIBCXX_TSAN_MUTEX_POST_UNLOCK(X)
58 #define _GLIBCXX_TSAN_MUTEX_PRE_SIGNAL(X)
59 #define _GLIBCXX_TSAN_MUTEX_POST_SIGNAL(X)
60 #endif
61 
62 namespace std _GLIBCXX_VISIBILITY(default)
63 {
64 _GLIBCXX_BEGIN_NAMESPACE_VERSION
65 
66  /**
67  * @addtogroup pointer_abstractions
68  * @relates shared_ptr
69  * @{
70  */
71 
72  /// @cond undocumented
73 
74  struct _Sp_locker
75  {
76  _Sp_locker(const _Sp_locker&) = delete;
77  _Sp_locker& operator=(const _Sp_locker&) = delete;
78 
79 #ifdef __GTHREADS
80  explicit
81  _Sp_locker(const void*) noexcept;
82  _Sp_locker(const void*, const void*) noexcept;
83  ~_Sp_locker();
84 
85  private:
86  unsigned char _M_key1;
87  unsigned char _M_key2;
88 #else
89  explicit _Sp_locker(const void*, const void* = nullptr) { }
90 #endif
91  };
92 
93  /// @endcond
94 
95  /**
96  * @brief Report whether shared_ptr atomic operations are lock-free.
97  * @param __p A non-null pointer to a shared_ptr object.
98  * @return True if atomic access to @c *__p is lock-free, false otherwise.
99  * @{
100  */
101  template<typename _Tp, _Lock_policy _Lp>
102  _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr<T>>")
103  inline bool
104  atomic_is_lock_free(const __shared_ptr<_Tp, _Lp>*)
105  {
106 #ifdef __GTHREADS
107  return __gthread_active_p() == 0;
108 #else
109  return true;
110 #endif
111  }
112 
113  template<typename _Tp>
114  _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr<T>>")
115  inline bool
116  atomic_is_lock_free(const shared_ptr<_Tp>* __p)
117  { return std::atomic_is_lock_free<_Tp, __default_lock_policy>(__p); }
118 
119  /// @}
120 
121  /**
122  * @brief Atomic load for shared_ptr objects.
123  * @param __p A non-null pointer to a shared_ptr object.
124  * @return @c *__p
125  *
126  * The memory order shall not be `memory_order_release` or
127  * `memory_order_acq_rel`.
128  * @{
129  */
130  template<typename _Tp>
131  _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr<T>>")
132  inline shared_ptr<_Tp>
133  atomic_load_explicit(const shared_ptr<_Tp>* __p, memory_order)
134  {
135  _Sp_locker __lock{__p};
136  return *__p;
137  }
138 
139  template<typename _Tp>
140  _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr<T>>")
141  inline shared_ptr<_Tp>
142  atomic_load(const shared_ptr<_Tp>* __p)
143  { return std::atomic_load_explicit(__p, memory_order_seq_cst); }
144 
145  template<typename _Tp, _Lock_policy _Lp>
146  _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr<T>>")
147  inline __shared_ptr<_Tp, _Lp>
148  atomic_load_explicit(const __shared_ptr<_Tp, _Lp>* __p, memory_order)
149  {
150  _Sp_locker __lock{__p};
151  return *__p;
152  }
153 
154  template<typename _Tp, _Lock_policy _Lp>
155  _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr<T>>")
156  inline __shared_ptr<_Tp, _Lp>
157  atomic_load(const __shared_ptr<_Tp, _Lp>* __p)
158  { return std::atomic_load_explicit(__p, memory_order_seq_cst); }
159  /// @}
160 
161  /**
162  * @brief Atomic store for shared_ptr objects.
163  * @param __p A non-null pointer to a shared_ptr object.
164  * @param __r The value to store.
165  *
166  * The memory order shall not be `memory_order_acquire` or
167  * `memory_order_acq_rel`.
168  * @{
169  */
170  template<typename _Tp>
171  _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr<T>>")
172  inline void
173  atomic_store_explicit(shared_ptr<_Tp>* __p, shared_ptr<_Tp> __r,
174  memory_order)
175  {
176  _Sp_locker __lock{__p};
177  __p->swap(__r); // use swap so that **__p not destroyed while lock held
178  }
179 
180  template<typename _Tp>
181  _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr<T>>")
182  inline void
183  atomic_store(shared_ptr<_Tp>* __p, shared_ptr<_Tp> __r)
184  { std::atomic_store_explicit(__p, std::move(__r), memory_order_seq_cst); }
185 
186  template<typename _Tp, _Lock_policy _Lp>
187  _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr<T>>")
188  inline void
189  atomic_store_explicit(__shared_ptr<_Tp, _Lp>* __p,
190  __shared_ptr<_Tp, _Lp> __r,
191  memory_order)
192  {
193  _Sp_locker __lock{__p};
194  __p->swap(__r); // use swap so that **__p not destroyed while lock held
195  }
196 
197  template<typename _Tp, _Lock_policy _Lp>
198  _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr<T>>")
199  inline void
200  atomic_store(__shared_ptr<_Tp, _Lp>* __p, __shared_ptr<_Tp, _Lp> __r)
201  { std::atomic_store_explicit(__p, std::move(__r), memory_order_seq_cst); }
202  /// @}
203 
204  /**
205  * @brief Atomic exchange for shared_ptr objects.
206  * @param __p A non-null pointer to a shared_ptr object.
207  * @param __r New value to store in `*__p`.
208  * @return The original value of `*__p`
209  * @{
210  */
211  template<typename _Tp>
212  _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr<T>>")
213  inline shared_ptr<_Tp>
214  atomic_exchange_explicit(shared_ptr<_Tp>* __p, shared_ptr<_Tp> __r,
215  memory_order)
216  {
217  _Sp_locker __lock{__p};
218  __p->swap(__r);
219  return __r;
220  }
221 
222  template<typename _Tp>
223  _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr<T>>")
224  inline shared_ptr<_Tp>
225  atomic_exchange(shared_ptr<_Tp>* __p, shared_ptr<_Tp> __r)
226  {
227  return std::atomic_exchange_explicit(__p, std::move(__r),
228  memory_order_seq_cst);
229  }
230 
231  template<typename _Tp, _Lock_policy _Lp>
232  _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr<T>>")
233  inline __shared_ptr<_Tp, _Lp>
234  atomic_exchange_explicit(__shared_ptr<_Tp, _Lp>* __p,
235  __shared_ptr<_Tp, _Lp> __r,
236  memory_order)
237  {
238  _Sp_locker __lock{__p};
239  __p->swap(__r);
240  return __r;
241  }
242 
243  template<typename _Tp, _Lock_policy _Lp>
244  _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr<T>>")
245  inline __shared_ptr<_Tp, _Lp>
246  atomic_exchange(__shared_ptr<_Tp, _Lp>* __p, __shared_ptr<_Tp, _Lp> __r)
247  {
248  return std::atomic_exchange_explicit(__p, std::move(__r),
249  memory_order_seq_cst);
250  }
251  /// @}
252 
253  /**
254  * @brief Atomic compare-and-swap for shared_ptr objects.
255  * @param __p A non-null pointer to a shared_ptr object.
256  * @param __v A non-null pointer to a shared_ptr object.
257  * @param __w A non-null pointer to a shared_ptr object.
258  * @return True if `*__p` was equivalent to `*__v`, false otherwise.
259  *
260  * The memory order for failure shall not be `memory_order_release` or
261  * `memory_order_acq_rel`.
262  * @{
263  */
264  template<typename _Tp>
265  _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr<T>>")
266  bool
267  atomic_compare_exchange_strong_explicit(shared_ptr<_Tp>* __p,
268  shared_ptr<_Tp>* __v,
269  shared_ptr<_Tp> __w,
270  memory_order,
271  memory_order)
272  {
273  shared_ptr<_Tp> __x; // goes out of scope after __lock
274  _Sp_locker __lock{__p, __v};
276  if (*__p == *__v && !__less(*__p, *__v) && !__less(*__v, *__p))
277  {
278  __x = std::move(*__p);
279  *__p = std::move(__w);
280  return true;
281  }
282  __x = std::move(*__v);
283  *__v = *__p;
284  return false;
285  }
286 
287  template<typename _Tp>
288  _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr<T>>")
289  inline bool
290  atomic_compare_exchange_strong(shared_ptr<_Tp>* __p, shared_ptr<_Tp>* __v,
291  shared_ptr<_Tp> __w)
292  {
293  return std::atomic_compare_exchange_strong_explicit(__p, __v,
294  std::move(__w), memory_order_seq_cst, memory_order_seq_cst);
295  }
296 
297  template<typename _Tp>
298  _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr<T>>")
299  inline bool
300  atomic_compare_exchange_weak_explicit(shared_ptr<_Tp>* __p,
301  shared_ptr<_Tp>* __v,
302  shared_ptr<_Tp> __w,
303  memory_order __success,
304  memory_order __failure)
305  {
306  return std::atomic_compare_exchange_strong_explicit(__p, __v,
307  std::move(__w), __success, __failure);
308  }
309 
310  template<typename _Tp>
311  _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr<T>>")
312  inline bool
313  atomic_compare_exchange_weak(shared_ptr<_Tp>* __p, shared_ptr<_Tp>* __v,
314  shared_ptr<_Tp> __w)
315  {
316  return std::atomic_compare_exchange_weak_explicit(__p, __v,
317  std::move(__w), memory_order_seq_cst, memory_order_seq_cst);
318  }
319 
320  template<typename _Tp, _Lock_policy _Lp>
321  _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr<T>>")
322  bool
323  atomic_compare_exchange_strong_explicit(__shared_ptr<_Tp, _Lp>* __p,
324  __shared_ptr<_Tp, _Lp>* __v,
325  __shared_ptr<_Tp, _Lp> __w,
326  memory_order,
327  memory_order)
328  {
329  __shared_ptr<_Tp, _Lp> __x; // goes out of scope after __lock
330  _Sp_locker __lock{__p, __v};
332  if (*__p == *__v && !__less(*__p, *__v) && !__less(*__v, *__p))
333  {
334  __x = std::move(*__p);
335  *__p = std::move(__w);
336  return true;
337  }
338  __x = std::move(*__v);
339  *__v = *__p;
340  return false;
341  }
342 
343  template<typename _Tp, _Lock_policy _Lp>
344  _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr<T>>")
345  inline bool
346  atomic_compare_exchange_strong(__shared_ptr<_Tp, _Lp>* __p,
347  __shared_ptr<_Tp, _Lp>* __v,
348  __shared_ptr<_Tp, _Lp> __w)
349  {
350  return std::atomic_compare_exchange_strong_explicit(__p, __v,
351  std::move(__w), memory_order_seq_cst, memory_order_seq_cst);
352  }
353 
354  template<typename _Tp, _Lock_policy _Lp>
355  _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr<T>>")
356  inline bool
357  atomic_compare_exchange_weak_explicit(__shared_ptr<_Tp, _Lp>* __p,
358  __shared_ptr<_Tp, _Lp>* __v,
359  __shared_ptr<_Tp, _Lp> __w,
360  memory_order __success,
361  memory_order __failure)
362  {
363  return std::atomic_compare_exchange_strong_explicit(__p, __v,
364  std::move(__w), __success, __failure);
365  }
366 
367  template<typename _Tp, _Lock_policy _Lp>
368  _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr<T>>")
369  inline bool
370  atomic_compare_exchange_weak(__shared_ptr<_Tp, _Lp>* __p,
371  __shared_ptr<_Tp, _Lp>* __v,
372  __shared_ptr<_Tp, _Lp> __w)
373  {
374  return std::atomic_compare_exchange_weak_explicit(__p, __v,
375  std::move(__w), memory_order_seq_cst, memory_order_seq_cst);
376  }
377  /// @}
378 
379  /// @} group pointer_abstractions
380 
381 #ifdef __glibcxx_atomic_shared_ptr // C++ >= 20 && HOSTED
382  template<typename _Tp>
383  struct atomic;
384 
385  /**
386  * @addtogroup pointer_abstractions
387  * @relates shared_ptr
388  * @{
389  */
390 
391  template<typename _Tp>
392  class _Sp_atomic
393  {
394  using value_type = _Tp;
395  using element_type = typename _Tp::element_type;
396 
397  friend struct atomic<_Tp>;
398 
399  // An atomic version of __shared_count<> and __weak_count<>.
400  // Stores a _Sp_counted_base<>* but uses the LSB as a lock.
401  struct _Atomic_count
402  {
403  // Either __shared_count<> or __weak_count<>
404  using __count_type = decltype(_Tp::_M_refcount);
405  using uintptr_t = __UINTPTR_TYPE__;
406 
407  // _Sp_counted_base<>*
408  using pointer = decltype(__count_type::_M_pi);
409 
410  // Ensure we can use the LSB as the lock bit.
411  static_assert(alignof(remove_pointer_t<pointer>) > 1);
412 
413  constexpr _Atomic_count() noexcept = default;
414 
415  explicit
416  _Atomic_count(__count_type&& __c) noexcept
417  : _M_val(reinterpret_cast<uintptr_t>(__c._M_pi))
418  {
419  __c._M_pi = nullptr;
420  }
421 
422  ~_Atomic_count()
423  {
424  auto __val = _AtomicRef(&_M_val).load(memory_order_relaxed);
425  _GLIBCXX_TSAN_MUTEX_DESTROY(&_M_val);
426  __glibcxx_assert(!(__val & _S_lock_bit));
427  if (auto __pi = reinterpret_cast<pointer>(__val))
428  {
429  if constexpr (__is_shared_ptr<_Tp>)
430  __pi->_M_release();
431  else
432  __pi->_M_weak_release();
433  }
434  }
435 
436  _Atomic_count(const _Atomic_count&) = delete;
437  _Atomic_count& operator=(const _Atomic_count&) = delete;
438 
439  // Precondition: Caller does not hold lock!
440  // Returns the raw pointer value without the lock bit set.
441  pointer
442  lock(memory_order __o) const noexcept
443  {
444  // To acquire the lock we flip the LSB from 0 to 1.
445 
446  _AtomicRef __aref(&_M_val);
447  auto __current = __aref.load(memory_order_relaxed);
448  while (__current & _S_lock_bit)
449  {
450 #if __glibcxx_atomic_wait
451  __detail::__thread_relax();
452 #endif
453  __current = __aref.load(memory_order_relaxed);
454  }
455 
456  _GLIBCXX_TSAN_MUTEX_TRY_LOCK(&_M_val);
457 
458  while (!__aref.compare_exchange_strong(__current,
459  __current | _S_lock_bit,
460  __o,
461  memory_order_relaxed))
462  {
463  _GLIBCXX_TSAN_MUTEX_TRY_LOCK_FAILED(&_M_val);
464 #if __glibcxx_atomic_wait
465  __detail::__thread_relax();
466 #endif
467  __current = __current & ~_S_lock_bit;
468  _GLIBCXX_TSAN_MUTEX_TRY_LOCK(&_M_val);
469  }
470  _GLIBCXX_TSAN_MUTEX_LOCKED(&_M_val);
471  return reinterpret_cast<pointer>(__current);
472  }
473 
474  // Precondition: caller holds lock!
475  void
476  unlock(memory_order __o) const noexcept
477  {
478  _GLIBCXX_TSAN_MUTEX_PRE_UNLOCK(&_M_val);
479  _AtomicRef(&_M_val).fetch_sub(1, __o);
480  _GLIBCXX_TSAN_MUTEX_POST_UNLOCK(&_M_val);
481  }
482 
483  // Swaps the values of *this and __c, and unlocks *this.
484  // Precondition: caller holds lock!
485  void
486  _M_swap_unlock(__count_type& __c, memory_order __o) noexcept
487  {
488  if (__o != memory_order_seq_cst)
489  __o = memory_order_release;
490  auto __x = reinterpret_cast<uintptr_t>(__c._M_pi);
491  _GLIBCXX_TSAN_MUTEX_PRE_UNLOCK(&_M_val);
492  __x = _AtomicRef(&_M_val).exchange(__x, __o);
493  _GLIBCXX_TSAN_MUTEX_POST_UNLOCK(&_M_val);
494  __c._M_pi = reinterpret_cast<pointer>(__x & ~_S_lock_bit);
495  }
496 
497 #if __glibcxx_atomic_wait
498  // Precondition: caller holds lock!
499  void
500  _M_wait_unlock(const element_type* const& __ptr, memory_order __o) const noexcept
501  {
502  auto __old_ptr = __ptr;
503  _GLIBCXX_TSAN_MUTEX_PRE_UNLOCK(&_M_val);
504  uintptr_t __old_pi
505  = _AtomicRef(&_M_val).fetch_sub(1, memory_order_relaxed) - 1u;
506  _GLIBCXX_TSAN_MUTEX_POST_UNLOCK(&_M_val);
507 
508  // Ensure that the correct value of _M_ptr is visible after locking,
509  // by upgrading relaxed or consume to acquire.
510  auto __lo = __o;
511  if (__o != memory_order_seq_cst)
512  __lo = memory_order_acquire;
513 
514  std::__atomic_wait_address(
515  &_M_val,
516  [=, &__ptr, this](uintptr_t __new_pi)
517  {
518  if (__old_pi != (__new_pi & ~_S_lock_bit))
519  // control block changed, we can wake up
520  return true;
521 
522  // control block is same, we need to check if ptr changed,
523  // the lock needs to be taken first, the value of pi may have
524  // also been updated in meantime, so reload it
525  __new_pi = reinterpret_cast<uintptr_t>(this->lock(__lo));
526  auto __new_ptr = __ptr;
527  this->unlock(memory_order_relaxed);
528  // wake up if either of the values changed
529  return __new_pi != __old_pi || __new_ptr != __old_ptr;
530  },
531  [__o, this] { return _AtomicRef(&_M_val).load(__o); });
532  }
533 
534  void
535  notify_one() noexcept
536  {
537  _GLIBCXX_TSAN_MUTEX_PRE_SIGNAL(&_M_val);
538  _AtomicRef(&_M_val).notify_one();
539  _GLIBCXX_TSAN_MUTEX_POST_SIGNAL(&_M_val);
540  }
541 
542  void
543  notify_all() noexcept
544  {
545  _GLIBCXX_TSAN_MUTEX_PRE_SIGNAL(&_M_val);
546  _AtomicRef(&_M_val).notify_all();
547  _GLIBCXX_TSAN_MUTEX_POST_SIGNAL(&_M_val);
548  }
549 #endif
550 
551  private:
552  using _AtomicRef = __atomic_ref<uintptr_t>;
553  alignas(_AtomicRef::required_alignment) mutable uintptr_t _M_val{0};
554  static constexpr uintptr_t _S_lock_bit{1};
555  };
556 
557  element_type* _M_ptr = nullptr;
558  _Atomic_count _M_refcount;
559 
560  static typename _Atomic_count::pointer
561  _S_add_ref(typename _Atomic_count::pointer __p)
562  {
563  if (__p)
564  {
565  if constexpr (__is_shared_ptr<_Tp>)
566  __p->_M_add_ref_copy();
567  else
568  __p->_M_weak_add_ref();
569  }
570  return __p;
571  }
572 
573  constexpr _Sp_atomic() noexcept = default;
574 
575  explicit
576  _Sp_atomic(value_type __r) noexcept
577  : _M_ptr(__r._M_ptr), _M_refcount(std::move(__r._M_refcount))
578  { }
579 
580  ~_Sp_atomic() = default;
581 
582  _Sp_atomic(const _Sp_atomic&) = delete;
583  void operator=(const _Sp_atomic&) = delete;
584 
585  value_type
586  load(memory_order __o) const noexcept
587  {
588  __glibcxx_assert(__o != memory_order_release
589  && __o != memory_order_acq_rel);
590  // Ensure that the correct value of _M_ptr is visible after locking,
591  // by upgrading relaxed or consume to acquire.
592  if (__o != memory_order_seq_cst)
593  __o = memory_order_acquire;
594 
595  value_type __ret;
596  auto __pi = _M_refcount.lock(__o);
597  __ret._M_ptr = _M_ptr;
598  __ret._M_refcount._M_pi = _S_add_ref(__pi);
599  _M_refcount.unlock(memory_order_relaxed);
600  return __ret;
601  }
602 
603  void
604  swap(value_type& __r, memory_order __o) noexcept
605  {
606  _M_refcount.lock(memory_order_acquire);
607  std::swap(_M_ptr, __r._M_ptr);
608  _M_refcount._M_swap_unlock(__r._M_refcount, __o);
609  }
610 
611  bool
612  compare_exchange_strong(value_type& __expected, value_type __desired,
613  memory_order __o, memory_order __o2) noexcept
614  {
615  bool __result = true;
616  auto __pi = _M_refcount.lock(memory_order_acquire);
617  if (_M_ptr == __expected._M_ptr
618  && __pi == __expected._M_refcount._M_pi)
619  {
620  _M_ptr = __desired._M_ptr;
621  _M_refcount._M_swap_unlock(__desired._M_refcount, __o);
622  }
623  else
624  {
625  _Tp __sink = std::move(__expected);
626  __expected._M_ptr = _M_ptr;
627  __expected._M_refcount._M_pi = _S_add_ref(__pi);
628  _M_refcount.unlock(__o2);
629  __result = false;
630  }
631  return __result;
632  }
633 
634 #if __glibcxx_atomic_wait
635  void
636  wait(value_type __old, memory_order __o) const noexcept
637  {
638  auto __pi = _M_refcount.lock(memory_order_acquire);
639  if (_M_ptr == __old._M_ptr && __pi == __old._M_refcount._M_pi)
640  _M_refcount._M_wait_unlock(_M_ptr, __o);
641  else
642  _M_refcount.unlock(memory_order_relaxed);
643  }
644 
645  void
646  notify_one() noexcept
647  {
648  _M_refcount.notify_one();
649  }
650 
651  void
652  notify_all() noexcept
653  {
654  _M_refcount.notify_all();
655  }
656 #endif
657  };
658 
659  template<typename _Tp>
660  struct atomic<shared_ptr<_Tp>>
661  {
662  public:
663  using value_type = shared_ptr<_Tp>;
664 
665  static constexpr bool is_always_lock_free = false;
666 
667  bool
668  is_lock_free() const noexcept
669  { return false; }
670 
671  constexpr atomic() noexcept = default;
672 
673  // _GLIBCXX_RESOLVE_LIB_DEFECTS
674  // 3661. constinit atomic<shared_ptr<T>> a(nullptr); should work
675  constexpr atomic(nullptr_t) noexcept : atomic() { }
676 
677  atomic(shared_ptr<_Tp> __r) noexcept
678  : _M_impl(std::move(__r))
679  { }
680 
681  atomic(const atomic&) = delete;
682  void operator=(const atomic&) = delete;
683 
684  shared_ptr<_Tp>
685  load(memory_order __o = memory_order_seq_cst) const noexcept
686  { return _M_impl.load(__o); }
687 
688  operator shared_ptr<_Tp>() const noexcept
689  { return _M_impl.load(memory_order_seq_cst); }
690 
691  void
692  store(shared_ptr<_Tp> __desired,
693  memory_order __o = memory_order_seq_cst) noexcept
694  { _M_impl.swap(__desired, __o); }
695 
696  void
697  operator=(shared_ptr<_Tp> __desired) noexcept
698  { _M_impl.swap(__desired, memory_order_seq_cst); }
699 
700  // _GLIBCXX_RESOLVE_LIB_DEFECTS
701  // 3893. LWG 3661 broke atomic<shared_ptr<T>> a; a = nullptr;
702  void
703  operator=(nullptr_t) noexcept
704  { store(nullptr); }
705 
706  shared_ptr<_Tp>
707  exchange(shared_ptr<_Tp> __desired,
708  memory_order __o = memory_order_seq_cst) noexcept
709  {
710  _M_impl.swap(__desired, __o);
711  return __desired;
712  }
713 
714  bool
715  compare_exchange_strong(shared_ptr<_Tp>& __expected,
716  shared_ptr<_Tp> __desired,
717  memory_order __o, memory_order __o2) noexcept
718  {
719  return _M_impl.compare_exchange_strong(__expected, __desired, __o, __o2);
720  }
721 
722  bool
723  compare_exchange_strong(value_type& __expected, value_type __desired,
724  memory_order __o = memory_order_seq_cst) noexcept
725  {
726  memory_order __o2;
727  switch (__o)
728  {
729  case memory_order_acq_rel:
730  __o2 = memory_order_acquire;
731  break;
732  case memory_order_release:
733  __o2 = memory_order_relaxed;
734  break;
735  default:
736  __o2 = __o;
737  }
738  return compare_exchange_strong(__expected, std::move(__desired),
739  __o, __o2);
740  }
741 
742  bool
743  compare_exchange_weak(value_type& __expected, value_type __desired,
744  memory_order __o, memory_order __o2) noexcept
745  {
746  return compare_exchange_strong(__expected, std::move(__desired),
747  __o, __o2);
748  }
749 
750  bool
751  compare_exchange_weak(value_type& __expected, value_type __desired,
752  memory_order __o = memory_order_seq_cst) noexcept
753  {
754  return compare_exchange_strong(__expected, std::move(__desired), __o);
755  }
756 
757 #if __glibcxx_atomic_wait
758  void
759  wait(value_type __old,
760  memory_order __o = memory_order_seq_cst) const noexcept
761  {
762  _M_impl.wait(std::move(__old), __o);
763  }
764 
765  void
766  notify_one() noexcept
767  {
768  _M_impl.notify_one();
769  }
770 
771  void
772  notify_all() noexcept
773  {
774  _M_impl.notify_all();
775  }
776 #endif
777 
778  private:
779  _Sp_atomic<shared_ptr<_Tp>> _M_impl;
780  };
781 
782  template<typename _Tp>
783  struct atomic<weak_ptr<_Tp>>
784  {
785  public:
786  using value_type = weak_ptr<_Tp>;
787 
788  static constexpr bool is_always_lock_free = false;
789 
790  bool
791  is_lock_free() const noexcept
792  { return false; }
793 
794  constexpr atomic() noexcept = default;
795 
796  atomic(weak_ptr<_Tp> __r) noexcept
797  : _M_impl(move(__r))
798  { }
799 
800  atomic(const atomic&) = delete;
801  void operator=(const atomic&) = delete;
802 
803  weak_ptr<_Tp>
804  load(memory_order __o = memory_order_seq_cst) const noexcept
805  { return _M_impl.load(__o); }
806 
807  operator weak_ptr<_Tp>() const noexcept
808  { return _M_impl.load(memory_order_seq_cst); }
809 
810  void
811  store(weak_ptr<_Tp> __desired,
812  memory_order __o = memory_order_seq_cst) noexcept
813  { _M_impl.swap(__desired, __o); }
814 
815  void
816  operator=(weak_ptr<_Tp> __desired) noexcept
817  { _M_impl.swap(__desired, memory_order_seq_cst); }
818 
819  weak_ptr<_Tp>
820  exchange(weak_ptr<_Tp> __desired,
821  memory_order __o = memory_order_seq_cst) noexcept
822  {
823  _M_impl.swap(__desired, __o);
824  return __desired;
825  }
826 
827  bool
828  compare_exchange_strong(weak_ptr<_Tp>& __expected,
829  weak_ptr<_Tp> __desired,
830  memory_order __o, memory_order __o2) noexcept
831  {
832  return _M_impl.compare_exchange_strong(__expected, __desired, __o, __o2);
833  }
834 
835  bool
836  compare_exchange_strong(value_type& __expected, value_type __desired,
837  memory_order __o = memory_order_seq_cst) noexcept
838  {
839  memory_order __o2;
840  switch (__o)
841  {
842  case memory_order_acq_rel:
843  __o2 = memory_order_acquire;
844  break;
845  case memory_order_release:
846  __o2 = memory_order_relaxed;
847  break;
848  default:
849  __o2 = __o;
850  }
851  return compare_exchange_strong(__expected, std::move(__desired),
852  __o, __o2);
853  }
854 
855  bool
856  compare_exchange_weak(value_type& __expected, value_type __desired,
857  memory_order __o, memory_order __o2) noexcept
858  {
859  return compare_exchange_strong(__expected, std::move(__desired),
860  __o, __o2);
861  }
862 
863  bool
864  compare_exchange_weak(value_type& __expected, value_type __desired,
865  memory_order __o = memory_order_seq_cst) noexcept
866  {
867  return compare_exchange_strong(__expected, std::move(__desired), __o);
868  }
869 
870 #if __glibcxx_atomic_wait
871  void
872  wait(value_type __old,
873  memory_order __o = memory_order_seq_cst) const noexcept
874  {
875  _M_impl.wait(std::move(__old), __o);
876  }
877 
878  void
879  notify_one() noexcept
880  {
881  _M_impl.notify_one();
882  }
883 
884  void
885  notify_all() noexcept
886  {
887  _M_impl.notify_all();
888  }
889 #endif
890 
891  private:
892  _Sp_atomic<weak_ptr<_Tp>> _M_impl;
893  };
894  /// @} group pointer_abstractions
895 #endif // C++20
896 
897 _GLIBCXX_END_NAMESPACE_VERSION
898 } // namespace
899 
900 #endif // _SHARED_PTR_ATOMIC_H
constexpr std::remove_reference< _Tp >::type && move(_Tp &&__t) noexcept
Convert a value to an rvalue.
Definition: move.h:138
memory_order
Enumeration for memory_order.
Definition: atomic_base.h:66
void lock(_L1 &__l1, _L2 &__l2, _L3 &... __l3)
Generic lock.
Definition: mutex:686
ISO C++ entities toplevel namespace is std.
A smart pointer with reference-counted copy semantics.
Primary template owner_less.