libstdc++
bits/hashtable.h
Go to the documentation of this file.
1 // hashtable.h header -*- C++ -*-
2 
3 // Copyright (C) 2007-2026 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
9 // any later version.
10 
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
15 
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
19 
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
24 
25 /** @file bits/hashtable.h
26  * This is an internal header file, included by other library headers.
27  * Do not attempt to use it directly. @headername{unordered_map, unordered_set}
28  */
29 
30 #ifndef _HASHTABLE_H
31 #define _HASHTABLE_H 1
32 
33 #ifdef _GLIBCXX_SYSHDR
34 #pragma GCC system_header
35 #endif
36 
37 #include <bits/hashtable_policy.h>
39 #include <bits/stl_algobase.h> // fill_n, is_permutation
40 #include <bits/stl_function.h> // __has_is_transparent_t
41 #ifdef __glibcxx_node_extract // >= C++17 && HOSTED
42 # include <bits/node_handle.h>
43 #endif
44 
45 #pragma GCC diagnostic push
46 #pragma GCC diagnostic ignored "-Wc++11-extensions"
47 
48 namespace std _GLIBCXX_VISIBILITY(default)
49 {
50 _GLIBCXX_BEGIN_NAMESPACE_VERSION
51 /// @cond undocumented
52 
53  template<typename _Tp, typename _Hash>
54  using __cache_default
55  = __not_<__and_<// Do not cache for fast hasher.
56  __is_fast_hash<_Hash>,
57  // Mandatory for the rehash process.
58  __is_nothrow_invocable<const _Hash&, const _Tp&>>>;
59 
60  // Helper to conditionally delete the default constructor.
61  // The _Hash_node_base type is used to distinguish this specialization
62  // from any other potentially-overlapping subobjects of the hashtable.
63  template<typename _Equal, typename _Hash, typename _Allocator>
64  using _Hashtable_enable_default_ctor
65  = _Enable_default_constructor<__and_<is_default_constructible<_Equal>,
66  is_default_constructible<_Hash>,
67  is_default_constructible<_Allocator>>{},
68  __detail::_Hash_node_base>;
69 
70  /**
71  * Primary class template _Hashtable.
72  *
73  * @ingroup hashtable-detail
74  *
75  * @tparam _Value CopyConstructible type.
76  *
77  * @tparam _Key CopyConstructible type.
78  *
79  * @tparam _Alloc An allocator type
80  * ([lib.allocator.requirements]) whose _Alloc::value_type is
81  * _Value. As a conforming extension, we allow for
82  * _Alloc::value_type != _Value.
83  *
84  * @tparam _ExtractKey Function object that takes an object of type
85  * _Value and returns a value of type _Key.
86  *
87  * @tparam _Equal Function object that takes two objects of type k
88  * and returns a bool-like value that is true if the two objects
89  * are considered equal.
90  *
91  * @tparam _Hash The hash function. A unary function object with
92  * argument type _Key and result type size_t. Return values should
93  * be distributed over the entire range [0, numeric_limits<size_t>:::max()].
94  *
95  * @tparam _RangeHash The range-hashing function (in the terminology of
96  * Tavori and Dreizin). A binary function object whose argument
97  * types and result type are all size_t. Given arguments r and N,
98  * the return value is in the range [0, N).
99  *
100  * @tparam _Unused Not used.
101  *
102  * @tparam _RehashPolicy Policy class with three members, all of
103  * which govern the bucket count. _M_next_bkt(n) returns a bucket
104  * count no smaller than n. _M_bkt_for_elements(n) returns a
105  * bucket count appropriate for an element count of n.
106  * _M_need_rehash(n_bkt, n_elt, n_ins) determines whether, if the
107  * current bucket count is n_bkt and the current element count is
108  * n_elt, we need to increase the bucket count for n_ins insertions.
109  * If so, returns make_pair(true, n), where n is the new bucket count. If
110  * not, returns make_pair(false, <anything>)
111  *
112  * @tparam _Traits Compile-time class with three boolean
113  * std::integral_constant members: __cache_hash_code, __constant_iterators,
114  * __unique_keys.
115  *
116  * Each _Hashtable data structure has:
117  *
118  * - _Bucket[] _M_buckets
119  * - _Hash_node_base _M_before_begin
120  * - size_type _M_bucket_count
121  * - size_type _M_element_count
122  *
123  * with _Bucket being _Hash_node_base* and _Hash_node containing:
124  *
125  * - _Hash_node* _M_next
126  * - Tp _M_value
127  * - size_t _M_hash_code if cache_hash_code is true
128  *
129  * In terms of Standard containers the hashtable is like the aggregation of:
130  *
131  * - std::forward_list<_Node> containing the elements
132  * - std::vector<std::forward_list<_Node>::iterator> representing the buckets
133  *
134  * The non-empty buckets contain the node before the first node in the
135  * bucket. This design makes it possible to implement something like a
136  * std::forward_list::insert_after on container insertion and
137  * std::forward_list::erase_after on container erase
138  * calls. _M_before_begin is equivalent to
139  * std::forward_list::before_begin. Empty buckets contain
140  * nullptr. Note that one of the non-empty buckets contains
141  * &_M_before_begin which is not a dereferenceable node so the
142  * node pointer in a bucket shall never be dereferenced, only its
143  * next node can be.
144  *
145  * Walking through a bucket's nodes requires a check on the hash code to
146  * see if each node is still in the bucket. Such a design assumes a
147  * quite efficient hash functor and is one of the reasons it is
148  * highly advisable to set __cache_hash_code to true.
149  *
150  * The container iterators are simply built from nodes. This way
151  * incrementing the iterator is perfectly efficient independent of
152  * how many empty buckets there are in the container.
153  *
154  * On insert we compute the element's hash code and use it to find the
155  * bucket index. If the element must be inserted in an empty bucket
156  * we add it at the beginning of the singly linked list and make the
157  * bucket point to _M_before_begin. The bucket that used to point to
158  * _M_before_begin, if any, is updated to point to its new before
159  * begin node.
160  *
161  * Note that all equivalent values, if any, are next to each other, if
162  * we find a non-equivalent value after an equivalent one it means that
163  * we won't find any new equivalent value.
164  *
165  * On erase, the simple iterator design requires using the hash
166  * functor to get the index of the bucket to update. For this
167  * reason, when __cache_hash_code is set to false the hash functor must
168  * not throw and this is enforced by a static assertion.
169  *
170  * Functionality is implemented by decomposition into base classes,
171  * where the derived _Hashtable class is used in _Map_base and
172  * _Rehash_base base classes to access the
173  * "this" pointer. _Hashtable_base is used in the base classes as a
174  * non-recursive, fully-completed-type so that detailed nested type
175  * information, such as iterator type and node type, can be
176  * used. This is similar to the "Curiously Recurring Template
177  * Pattern" (CRTP) technique, but uses a reconstructed, not
178  * explicitly passed, template pattern.
179  *
180  * Base class templates are:
181  * - __detail::_Hashtable_base
182  * - __detail::_Map_base
183  * - __detail::_Rehash_base
184  */
185  template<typename _Key, typename _Value, typename _Alloc,
186  typename _ExtractKey, typename _Equal,
187  typename _Hash, typename _RangeHash, typename _Unused,
188  typename _RehashPolicy, typename _Traits>
189  class _Hashtable
190  : public __detail::_Hashtable_base<_Key, _Value, _ExtractKey, _Equal,
191  _Hash, _RangeHash, _Unused, _Traits>,
192  public __detail::_Map_base<_Key, _Value, _Alloc, _ExtractKey, _Equal,
193  _Hash, _RangeHash, _Unused,
194  _RehashPolicy, _Traits>,
195  public __detail::_Rehash_base<_Key, _Value, _Alloc, _ExtractKey, _Equal,
196  _Hash, _RangeHash, _Unused,
197  _RehashPolicy, _Traits>,
198  private __detail::_Hashtable_alloc<
199  __alloc_rebind<_Alloc,
200  __detail::_Hash_node<_Value,
201  _Traits::__hash_cached::value>>>,
202  private _Hashtable_enable_default_ctor<_Equal, _Hash, _Alloc>
203  {
204  static_assert(is_same<typename remove_cv<_Value>::type, _Value>::value,
205  "unordered container must have a non-const, non-volatile value_type");
206 #if __cplusplus > 201703L || defined __STRICT_ANSI__
207  static_assert(is_same<typename _Alloc::value_type, _Value>{},
208  "unordered container must have the same value_type as its allocator");
209 #endif
210  static_assert(is_copy_constructible<_Hash>::value,
211  "hash function must be copy constructible");
212 
213  using __traits_type = _Traits;
214  using __hash_cached = typename __traits_type::__hash_cached;
215  using __constant_iterators = typename __traits_type::__constant_iterators;
216  using __node_type = __detail::_Hash_node<_Value, __hash_cached::value>;
217  using __node_alloc_type = __alloc_rebind<_Alloc, __node_type>;
218 
219  using __hashtable_alloc = __detail::_Hashtable_alloc<__node_alloc_type>;
220 
221  using __node_value_type =
222  __detail::_Hash_node_value<_Value, __hash_cached::value>;
223  using __node_ptr = typename __hashtable_alloc::__node_ptr;
224  using __value_alloc_traits =
225  typename __hashtable_alloc::__value_alloc_traits;
226  using __node_alloc_traits =
227  typename __hashtable_alloc::__node_alloc_traits;
228  using __node_base = typename __hashtable_alloc::__node_base;
229  using __node_base_ptr = typename __hashtable_alloc::__node_base_ptr;
230  using __buckets_ptr = typename __hashtable_alloc::__buckets_ptr;
231 
232  using __enable_default_ctor
233  = _Hashtable_enable_default_ctor<_Equal, _Hash, _Alloc>;
234  using __rehash_guard_t
235  = __detail::_RehashStateGuard<_RehashPolicy>;
236 
237  public:
238  typedef _Key key_type;
239  typedef _Value value_type;
240  typedef _Alloc allocator_type;
241  typedef _Equal key_equal;
242 
243  // mapped_type, if present, comes from _Map_base.
244  // hasher, if present, comes from _Hash_code_base/_Hashtable_base.
245  typedef typename __value_alloc_traits::pointer pointer;
246  typedef typename __value_alloc_traits::const_pointer const_pointer;
247  typedef value_type& reference;
248  typedef const value_type& const_reference;
249 
250  using iterator
251  = __detail::_Node_iterator<_Value, __constant_iterators::value,
252  __hash_cached::value>;
253 
254  using const_iterator
255  = __detail::_Node_const_iterator<_Value, __constant_iterators::value,
256  __hash_cached::value>;
257 
258  using local_iterator = __detail::_Local_iterator<key_type, _Value,
259  _ExtractKey, _Hash, _RangeHash, _Unused,
260  __constant_iterators::value,
261  __hash_cached::value>;
262 
263  using const_local_iterator = __detail::_Local_const_iterator<
264  key_type, _Value,
265  _ExtractKey, _Hash, _RangeHash, _Unused,
266  __constant_iterators::value, __hash_cached::value>;
267 
268  private:
269  using __rehash_type = _RehashPolicy;
270 
271  using __unique_keys = typename __traits_type::__unique_keys;
272 
273  using __hashtable_base = __detail::
274  _Hashtable_base<_Key, _Value, _ExtractKey,
275  _Equal, _Hash, _RangeHash, _Unused, _Traits>;
276 
277  using __hash_code_base = typename __hashtable_base::__hash_code_base;
278  using __hash_code = typename __hashtable_base::__hash_code;
279  using __ireturn_type = __conditional_t<__unique_keys::value,
281  iterator>;
282 
283  using __map_base = __detail::_Map_base<_Key, _Value, _Alloc, _ExtractKey,
284  _Equal, _Hash, _RangeHash, _Unused,
285  _RehashPolicy, _Traits>;
286 
287  using __rehash_base = __detail::_Rehash_base<_Key, _Value, _Alloc,
288  _ExtractKey, _Equal,
289  _Hash, _RangeHash, _Unused,
290  _RehashPolicy, _Traits>;
291 
292  using __node_builder_t = __detail::_NodeBuilder<_ExtractKey>;
293 
294  // Simple RAII type for managing a node containing an element
295  struct _Scoped_node
296  {
297  // Take ownership of a node with a constructed element.
298  _Scoped_node(__node_ptr __n, __hashtable_alloc* __h)
299  : _M_h(__h), _M_node(__n) { }
300 
301  // Allocate a node and construct an element within it.
302  template<typename... _Args>
303  _Scoped_node(__hashtable_alloc* __h, _Args&&... __args)
304  : _M_h(__h),
305  _M_node(__h->_M_allocate_node(std::forward<_Args>(__args)...))
306  { }
307 
308  // Destroy element and deallocate node.
309  ~_Scoped_node() { if (_M_node) _M_h->_M_deallocate_node(_M_node); };
310 
311  _Scoped_node(const _Scoped_node&) = delete;
312  _Scoped_node& operator=(const _Scoped_node&) = delete;
313 
314  __hashtable_alloc* _M_h;
315  __node_ptr _M_node;
316  };
317 
318  // Compile-time diagnostics.
319 
320  // _Hash_code_base has everything protected, so use this derived type to
321  // access it.
322  struct __hash_code_base_access : __hash_code_base
323  { using __hash_code_base::_M_bucket_index; };
324 
325  // To get bucket index we need _RangeHash to be non-throwing.
326  static_assert(is_nothrow_default_constructible<_RangeHash>::value,
327  "Functor used to map hash code to bucket index"
328  " must be nothrow default constructible");
329  static_assert(noexcept(
330  std::declval<const _RangeHash&>()((std::size_t)0, (std::size_t)0)),
331  "Functor used to map hash code to bucket index must be"
332  " noexcept");
333 
334  // To compute bucket index we also need _ExtractKey to be non-throwing.
335  static_assert(is_nothrow_default_constructible<_ExtractKey>::value,
336  "_ExtractKey must be nothrow default constructible");
337  static_assert(noexcept(
338  std::declval<const _ExtractKey&>()(std::declval<_Value>())),
339  "_ExtractKey functor must be noexcept invocable");
340 
341  template<typename _Keya, typename _Valuea, typename _Alloca,
342  typename _ExtractKeya, typename _Equala,
343  typename _Hasha, typename _RangeHasha, typename _Unuseda,
344  typename _RehashPolicya, typename _Traitsa,
345  bool _Unique_keysa>
346  friend struct __detail::_Map_base;
347 
348  public:
349  using size_type = typename __hashtable_base::size_type;
350  using difference_type = typename __hashtable_base::difference_type;
351 
352 #ifdef __glibcxx_node_extract // >= C++17 && HOSTED
353  using node_type = _Node_handle<_Key, _Value, __node_alloc_type>;
354  using insert_return_type = _Node_insert_return<iterator, node_type>;
355 #endif
356 
357  private:
358  __buckets_ptr _M_buckets = &_M_single_bucket;
359  size_type _M_bucket_count = 1;
360  __node_base _M_before_begin;
361  size_type _M_element_count = 0;
362  _RehashPolicy _M_rehash_policy;
363 
364  // A single bucket used when only need for 1 bucket. Especially
365  // interesting in move semantic to leave hashtable with only 1 bucket
366  // which is not allocated so that we can have those operations noexcept
367  // qualified.
368  // Note that we can't leave hashtable with 0 bucket without adding
369  // numerous checks in the code to avoid 0 modulus.
370  __node_base_ptr _M_single_bucket = nullptr;
371 
372  void
373  _M_update_bbegin()
374  {
375  if (auto __begin = _M_begin())
376  _M_buckets[_M_bucket_index(*__begin)] = &_M_before_begin;
377  }
378 
379  void
380  _M_update_bbegin(__node_ptr __n)
381  {
382  _M_before_begin._M_nxt = __n;
383  _M_update_bbegin();
384  }
385 
386  bool
387  _M_uses_single_bucket(__buckets_ptr __bkts) const
388  { return __builtin_expect(__bkts == &_M_single_bucket, false); }
389 
390  bool
391  _M_uses_single_bucket() const
392  { return _M_uses_single_bucket(_M_buckets); }
393 
394  static constexpr size_t
395  __small_size_threshold() noexcept
396  {
397  return
398  __detail::_Hashtable_hash_traits<_Hash>::__small_size_threshold();
399  }
400 
401  __hashtable_alloc&
402  _M_base_alloc() { return *this; }
403 
404  __buckets_ptr
405  _M_allocate_buckets(size_type __bkt_count)
406  {
407  if (__builtin_expect(__bkt_count == 1, false))
408  {
409  _M_single_bucket = nullptr;
410  return &_M_single_bucket;
411  }
412 
413  return __hashtable_alloc::_M_allocate_buckets(__bkt_count);
414  }
415 
416  void
417  _M_deallocate_buckets(__buckets_ptr __bkts, size_type __bkt_count)
418  {
419  if (_M_uses_single_bucket(__bkts))
420  return;
421 
422  __hashtable_alloc::_M_deallocate_buckets(__bkts, __bkt_count);
423  }
424 
425  void
426  _M_deallocate_buckets()
427  { _M_deallocate_buckets(_M_buckets, _M_bucket_count); }
428 
429  // Gets bucket begin, deals with the fact that non-empty buckets contain
430  // their before begin node.
431  __node_ptr
432  _M_bucket_begin(size_type __bkt) const
433  {
434  __node_base_ptr __n = _M_buckets[__bkt];
435  return __n ? static_cast<__node_ptr>(__n->_M_nxt) : nullptr;
436  }
437 
438  __node_ptr
439  _M_begin() const
440  { return static_cast<__node_ptr>(_M_before_begin._M_nxt); }
441 
442  // Assign *this using another _Hashtable instance. Whether elements
443  // are copied or moved depends on the _Ht reference.
444  template<typename _Ht>
445  void
446  _M_assign_elements(_Ht&&);
447 
448  template<typename _Ht>
449  void
450  _M_assign(_Ht&& __ht)
451  {
452  __detail::_AllocNode<__node_alloc_type> __alloc_node_gen(*this);
453  _M_assign(std::forward<_Ht>(__ht), __alloc_node_gen);
454  }
455 
456  template<typename _Ht, typename _NodeGenerator>
457  void
458  _M_assign(_Ht&&, _NodeGenerator&);
459 
460  void
461  _M_move_assign(_Hashtable&&, true_type);
462 
463  void
464  _M_move_assign(_Hashtable&&, false_type);
465 
466  void
467  _M_reset() noexcept;
468 
469  _Hashtable(const _Hash& __h, const _Equal& __eq,
470  const allocator_type& __a)
471  : __hashtable_base(__h, __eq),
472  __hashtable_alloc(__node_alloc_type(__a)),
473  __enable_default_ctor(_Enable_default_constructor_tag{})
474  { }
475 
476  template<bool _No_realloc = true>
477  static constexpr bool
478  _S_nothrow_move()
479  {
480 #if __cpp_constexpr >= 201304 // >= C++14
481 # pragma GCC diagnostic push
482 # pragma GCC diagnostic ignored "-Wc++17-extensions" // if constexpr
483  if constexpr (_No_realloc)
484  if constexpr (is_nothrow_copy_constructible<_Hash>::value)
485  return is_nothrow_copy_constructible<_Equal>::value;
486  return false;
487 # pragma GCC diagnostic pop
488 #else // In C++11 a constexpr function must be a single statement.
489  return __and_<__bool_constant<_No_realloc>,
490  is_nothrow_copy_constructible<_Hash>,
491  is_nothrow_copy_constructible<_Equal>>::value;
492 #endif
493  }
494 
495  _Hashtable(_Hashtable&& __ht, __node_alloc_type&& __a,
496  true_type /* alloc always equal */)
497  noexcept(_S_nothrow_move());
498 
499  _Hashtable(_Hashtable&&, __node_alloc_type&&,
500  false_type /* alloc always equal */);
501 
502  template<typename _InputIterator>
503  _Hashtable(_InputIterator __first, _InputIterator __last,
504  size_type __bkt_count_hint,
505  const _Hash&, const _Equal&, const allocator_type&,
506  true_type __uks);
507 
508  template<typename _InputIterator>
509  _Hashtable(_InputIterator __first, _InputIterator __last,
510  size_type __bkt_count_hint,
511  const _Hash&, const _Equal&, const allocator_type&,
512  false_type __uks);
513 
514  public:
515  // Constructor, destructor, assignment, swap
516  _Hashtable() = default;
517 
518  _Hashtable(const _Hashtable&);
519 
520  _Hashtable(const _Hashtable&, const allocator_type&);
521 
522  explicit
523  _Hashtable(size_type __bkt_count_hint,
524  const _Hash& __hf = _Hash(),
525  const key_equal& __eql = key_equal(),
526  const allocator_type& __a = allocator_type());
527 
528  // Use delegating constructors.
529  _Hashtable(_Hashtable&& __ht)
530  noexcept(_S_nothrow_move())
531  : _Hashtable(std::move(__ht), std::move(__ht._M_node_allocator()),
532  true_type{})
533  { }
534 
535  _Hashtable(_Hashtable&& __ht, const allocator_type& __a)
536  noexcept(_S_nothrow_move<__node_alloc_traits::_S_always_equal()>())
537  : _Hashtable(std::move(__ht), __node_alloc_type(__a),
538  typename __node_alloc_traits::is_always_equal{})
539  { }
540 
541  explicit
542  _Hashtable(const allocator_type& __a)
543  : __hashtable_alloc(__node_alloc_type(__a)),
544  __enable_default_ctor(_Enable_default_constructor_tag{})
545  { }
546 
547  template<typename _InputIterator>
548  _Hashtable(_InputIterator __f, _InputIterator __l,
549  size_type __bkt_count_hint = 0,
550  const _Hash& __hf = _Hash(),
551  const key_equal& __eql = key_equal(),
552  const allocator_type& __a = allocator_type())
553  : _Hashtable(__f, __l, __bkt_count_hint, __hf, __eql, __a,
554  __unique_keys{})
555  { }
556 
557  _Hashtable(initializer_list<value_type> __l,
558  size_type __bkt_count_hint = 0,
559  const _Hash& __hf = _Hash(),
560  const key_equal& __eql = key_equal(),
561  const allocator_type& __a = allocator_type())
562  : _Hashtable(__l.begin(), __l.end(), __bkt_count_hint,
563  __hf, __eql, __a, __unique_keys{})
564  { }
565 
566  _Hashtable&
567  operator=(const _Hashtable& __ht);
568 
569  _Hashtable&
570  operator=(_Hashtable&& __ht)
571  noexcept(__node_alloc_traits::_S_nothrow_move()
572  && is_nothrow_move_assignable<_Hash>::value
573  && is_nothrow_move_assignable<_Equal>::value)
574  {
575  constexpr bool __move_storage =
576  __node_alloc_traits::_S_propagate_on_move_assign()
577  || __node_alloc_traits::_S_always_equal();
578  _M_move_assign(std::move(__ht), __bool_constant<__move_storage>());
579  return *this;
580  }
581 
582 #pragma GCC diagnostic push
583 #pragma GCC diagnostic ignored "-Wc++17-extensions" // if constexpr
584  _Hashtable&
585  operator=(initializer_list<value_type> __l)
586  {
587  using __reuse_or_alloc_node_gen_t =
588  __detail::_ReuseOrAllocNode<__node_alloc_type>;
589 
590  __reuse_or_alloc_node_gen_t __roan(_M_begin(), *this);
591  _M_before_begin._M_nxt = nullptr;
592  clear();
593 
594  // We assume that all elements of __l are likely to be inserted.
595  auto __l_bkt_count = _M_rehash_policy._M_bkt_for_elements(__l.size());
596 
597  // Excess buckets might have been intentionally reserved by the user,
598  // so rehash if we need to grow, but don't shrink.
599  if (_M_bucket_count < __l_bkt_count)
600  rehash(__l_bkt_count);
601 
602  __hash_code __code;
603  size_type __bkt;
604  for (auto& __e : __l)
605  {
606  const key_type& __k = _ExtractKey{}(__e);
607 
608  if constexpr (__unique_keys::value)
609  {
610  if (auto __loc = _M_locate(__k))
611  continue; // Found existing element with equivalent key
612  else
613  {
614  __code = __loc._M_hash_code;
615  __bkt = __loc._M_bucket_index;
616  }
617  }
618  else
619  {
620  __code = this->_M_hash_code(__k);
621  __bkt = _M_bucket_index(__code);
622  }
623 
624  _M_insert_unique_node(__bkt, __code, __roan(__e));
625  }
626 
627  return *this;
628  }
629 #pragma GCC diagnostic pop
630 
631  ~_Hashtable() noexcept;
632 
633  void
634  swap(_Hashtable&)
635  noexcept(__and_<__is_nothrow_swappable<_Hash>,
636  __is_nothrow_swappable<_Equal>>::value);
637 
638  // Basic container operations
639  iterator
640  begin() noexcept
641  { return iterator(_M_begin()); }
642 
643  const_iterator
644  begin() const noexcept
645  { return const_iterator(_M_begin()); }
646 
647  iterator
648  end() noexcept
649  { return iterator(nullptr); }
650 
651  const_iterator
652  end() const noexcept
653  { return const_iterator(nullptr); }
654 
655  const_iterator
656  cbegin() const noexcept
657  { return const_iterator(_M_begin()); }
658 
659  const_iterator
660  cend() const noexcept
661  { return const_iterator(nullptr); }
662 
663  size_type
664  size() const noexcept
665  { return _M_element_count; }
666 
667  _GLIBCXX_NODISCARD bool
668  empty() const noexcept
669  { return size() == 0; }
670 
671  allocator_type
672  get_allocator() const noexcept
673  { return allocator_type(this->_M_node_allocator()); }
674 
675  size_type
676  max_size() const noexcept
677  { return __node_alloc_traits::max_size(this->_M_node_allocator()); }
678 
679  // Observers
680  key_equal
681  key_eq() const
682  { return this->_M_eq(); }
683 
684  // hash_function, if present, comes from _Hash_code_base.
685 
686  // Bucket operations
687  size_type
688  bucket_count() const noexcept
689  { return _M_bucket_count; }
690 
691  size_type
692  max_bucket_count() const noexcept
693  { return max_size(); }
694 
695  size_type
696  bucket_size(size_type __bkt) const
697  { return std::distance(begin(__bkt), end(__bkt)); }
698 
699  size_type
700  bucket(const key_type& __k) const
701  { return _M_bucket_index(this->_M_hash_code(__k)); }
702 
703 #ifdef __glibcxx_associative_heterogeneous_insertion // C++26
704  template <typename _Kt>
705  size_type
706  _M_bucket_tr(const _Kt& __k) const
707  { return _M_bucket_index(this->_M_hash_code_tr(__k)); }
708 #endif
709 
710  local_iterator
711  begin(size_type __bkt)
712  {
713  return local_iterator(*this, _M_bucket_begin(__bkt),
714  __bkt, _M_bucket_count);
715  }
716 
717  local_iterator
718  end(size_type __bkt)
719  { return local_iterator(*this, nullptr, __bkt, _M_bucket_count); }
720 
721  const_local_iterator
722  begin(size_type __bkt) const
723  {
724  return const_local_iterator(*this, _M_bucket_begin(__bkt),
725  __bkt, _M_bucket_count);
726  }
727 
728  const_local_iterator
729  end(size_type __bkt) const
730  { return const_local_iterator(*this, nullptr, __bkt, _M_bucket_count); }
731 
732  // DR 691.
733  const_local_iterator
734  cbegin(size_type __bkt) const
735  {
736  return const_local_iterator(*this, _M_bucket_begin(__bkt),
737  __bkt, _M_bucket_count);
738  }
739 
740  const_local_iterator
741  cend(size_type __bkt) const
742  { return const_local_iterator(*this, nullptr, __bkt, _M_bucket_count); }
743 
744  float
745  load_factor() const noexcept
746  {
747  return static_cast<float>(size()) / static_cast<float>(bucket_count());
748  }
749 
750  // max_load_factor, if present, comes from _Rehash_base.
751 
752  // Generalization of max_load_factor. Extension, not found in
753  // TR1. Only useful if _RehashPolicy is something other than
754  // the default.
755  const _RehashPolicy&
756  __rehash_policy() const
757  { return _M_rehash_policy; }
758 
759  void
760  __rehash_policy(const _RehashPolicy& __pol)
761  { _M_rehash_policy = __pol; }
762 
763  // Lookup.
764  iterator
765  find(const key_type& __k);
766 
767  const_iterator
768  find(const key_type& __k) const;
769 
770  size_type
771  count(const key_type& __k) const;
772 
774  equal_range(const key_type& __k);
775 
777  equal_range(const key_type& __k) const;
778 
779 #ifdef __glibcxx_generic_unordered_lookup // C++ >= 20 && HOSTED
780  template<typename _Kt,
781  typename = __has_is_transparent_t<_Hash, _Kt>,
782  typename = __has_is_transparent_t<_Equal, _Kt>>
783  iterator
784  _M_find_tr(const _Kt& __k);
785 
786  template<typename _Kt,
787  typename = __has_is_transparent_t<_Hash, _Kt>,
788  typename = __has_is_transparent_t<_Equal, _Kt>>
789  const_iterator
790  _M_find_tr(const _Kt& __k) const;
791 
792  template<typename _Kt,
793  typename = __has_is_transparent_t<_Hash, _Kt>,
794  typename = __has_is_transparent_t<_Equal, _Kt>>
795  size_type
796  _M_count_tr(const _Kt& __k) const;
797 
798  template<typename _Kt,
799  typename = __has_is_transparent_t<_Hash, _Kt>,
800  typename = __has_is_transparent_t<_Equal, _Kt>>
801  pair<iterator, iterator>
802  _M_equal_range_tr(const _Kt& __k);
803 
804  template<typename _Kt,
805  typename = __has_is_transparent_t<_Hash, _Kt>,
806  typename = __has_is_transparent_t<_Equal, _Kt>>
807  pair<const_iterator, const_iterator>
808  _M_equal_range_tr(const _Kt& __k) const;
809 #endif // __glibcxx_generic_unordered_lookup
810 
811  void _M_rehash_insert(size_type __n);
812 
813  private:
814  // Bucket index computation helpers.
815  size_type
816  _M_bucket_index(const __node_value_type& __n) const noexcept
817  { return __hash_code_base::_M_bucket_index(__n, _M_bucket_count); }
818 
819  size_type
820  _M_bucket_index(__hash_code __c) const
821  { return __hash_code_base::_M_bucket_index(__c, _M_bucket_count); }
822 
823 #pragma GCC diagnostic push
824 #pragma GCC diagnostic ignored "-Wc++17-extensions" // if constexpr
825  // Get hash code for a node that comes from another _Hashtable.
826  // Reuse a cached hash code if the hash function is stateless,
827  // otherwise recalculate it using our own hash function.
828  __hash_code
829  _M_hash_code_ext(const __node_value_type& __from) const
830  {
831  if constexpr (__and_<__hash_cached, is_empty<_Hash>>::value)
832  return __from._M_hash_code;
833  else
834  return this->_M_hash_code(_ExtractKey{}(__from._M_v()));
835  }
836 
837  // Like _M_bucket_index but when the node is coming from another
838  // container instance.
839  size_type
840  _M_bucket_index_ext(const __node_value_type& __from) const
841  { return _RangeHash{}(_M_hash_code_ext(__from), _M_bucket_count); }
842 
843  void
844  _M_copy_code(__node_value_type& __to,
845  const __node_value_type& __from) const
846  {
847  if constexpr (__hash_cached::value)
848  __to._M_hash_code = _M_hash_code_ext(__from);
849  }
850 
851  void
852  _M_store_code(__node_value_type& __to, __hash_code __code) const
853  {
854  if constexpr (__hash_cached::value)
855  __to._M_hash_code = __code;
856  }
857 #pragma GCC diagnostic pop
858 
859  // Find and insert helper functions and types
860 
861  // Find the node before the one matching the criteria.
862  __node_base_ptr
863  _M_find_before_node(
864  size_type __bkt, const key_type& __k, __hash_code __code) const
865  { return _M_find_before_node_tr<key_type>(__bkt, __k, __code); }
866 
867  template<typename _Kt>
868  __node_base_ptr
869  _M_find_before_node_tr(size_type, const _Kt&, __hash_code) const;
870 
871  // A pointer to a particular node and/or a hash code and bucket index
872  // where such a node would be found in the container.
873  struct __location_type
874  {
875  // True if _M_node() is a valid node pointer.
876  explicit operator bool() const noexcept
877  { return static_cast<bool>(_M_before); }
878 
879  // An iterator that refers to the node, or end().
880  explicit operator iterator() const noexcept
881  { return iterator(_M_node()); }
882 
883  // A const_iterator that refers to the node, or cend().
884  explicit operator const_iterator() const noexcept
885  { return const_iterator(_M_node()); }
886 
887  // A pointer to the node, or null.
888  __node_ptr _M_node() const
889  {
890  if (_M_before)
891  return static_cast<__node_ptr>(_M_before->_M_nxt);
892  return __node_ptr();
893  }
894 
895  __node_base_ptr _M_before{}; // Must only be used to get _M_nxt
896  __hash_code _M_hash_code{}; // Only valid if _M_bucket_index != -1
897  size_type _M_bucket_index = size_type(-1);
898  };
899 
900  // Adaptive lookup to find key, or which bucket it would be in.
901  // For a container smaller than the small size threshold use a linear
902  // search through the whole container, just testing for equality.
903  // Otherwise, calculate the hash code and bucket index for the key,
904  // and search in that bucket.
905  // The return value will have a pointer to the node _before_ the first
906  // node matching the key, if any such node exists. Returning the node
907  // before the desired one allows the result to be used for erasure.
908  // If no matching element is present, the hash code and bucket for the
909  // key will be set, allowing a new node to be inserted at that location.
910  // (The hash code and bucket might also be set when a node is found.)
911  // The _M_before pointer might point to _M_before_begin, so must not be
912  // cast to __node_ptr, and it must not be used to modify *_M_before
913  // except in non-const member functions, such as erase.
914 
915  __location_type
916  _M_locate(const key_type& __k) const
917  { return _M_locate_tr<key_type>(__k); }
918 
919  template <typename _Kt>
920  __location_type
921  _M_locate_tr(const _Kt& __k) const;
922 
923  __node_ptr
924  _M_find_node(size_type __bkt, const key_type& __key,
925  __hash_code __c) const
926  {
927  if (__node_base_ptr __before_n = _M_find_before_node(__bkt, __key, __c))
928  return static_cast<__node_ptr>(__before_n->_M_nxt);
929  return nullptr;
930  }
931 
932  template<typename _Kt>
933  __node_ptr
934  _M_find_node_tr(size_type __bkt, const _Kt& __key,
935  __hash_code __c) const
936  {
937  if (auto __before_n = _M_find_before_node_tr(__bkt, __key, __c))
938  return static_cast<__node_ptr>(__before_n->_M_nxt);
939  return nullptr;
940  }
941 
942  // Insert a node at the beginning of a bucket.
943  void
944  _M_insert_bucket_begin(size_type __bkt, __node_ptr __node)
945  {
946  if (_M_buckets[__bkt])
947  {
948  // Bucket is not empty, we just need to insert the new node
949  // after the bucket before begin.
950  __node->_M_nxt = _M_buckets[__bkt]->_M_nxt;
951  _M_buckets[__bkt]->_M_nxt = __node;
952  }
953  else
954  {
955  // The bucket is empty, the new node is inserted at the
956  // beginning of the singly-linked list and the bucket will
957  // contain _M_before_begin pointer.
958  __node->_M_nxt = _M_before_begin._M_nxt;
959  _M_before_begin._M_nxt = __node;
960 
961  if (__node->_M_nxt)
962  // We must update former begin bucket that is pointing to
963  // _M_before_begin.
964  _M_buckets[_M_bucket_index(*__node->_M_next())] = __node;
965 
966  _M_buckets[__bkt] = &_M_before_begin;
967  }
968  }
969 
970  // Remove the bucket first node
971  void
972  _M_remove_bucket_begin(size_type __bkt, __node_ptr __next_n,
973  size_type __next_bkt)
974  {
975  if (!__next_n)
976  _M_buckets[__bkt] = nullptr;
977  else if (__next_bkt != __bkt)
978  {
979  _M_buckets[__next_bkt] = _M_buckets[__bkt];
980  _M_buckets[__bkt] = nullptr;
981  }
982  }
983 
984  // Get the node before __n in the bucket __bkt
985  __node_base_ptr
986  _M_get_previous_node(size_type __bkt, __node_ptr __n);
987 
988  pair<__node_ptr, __hash_code>
989  _M_compute_hash_code(__node_ptr __hint, const key_type& __k) const;
990 
991  // Insert node __n with hash code __code, in bucket __bkt (or another
992  // bucket if rehashing is needed).
993  // Assumes no element with equivalent key is already present.
994  // Takes ownership of __n if insertion succeeds, throws otherwise.
995  // __n_elt is an estimated number of elements we expect to insert,
996  // used as a hint for rehashing when inserting a range.
997  iterator
998  _M_insert_unique_node(size_type __bkt, __hash_code,
999  __node_ptr __n, size_type __n_elt = 1);
1000 
1001  // Insert node __n with key __k and hash code __code.
1002  // Takes ownership of __n if insertion succeeds, throws otherwise.
1003  iterator
1004  _M_insert_multi_node(__node_ptr __hint,
1005  __hash_code __code, __node_ptr __n);
1006 
1007  template<typename... _Args>
1008  std::pair<iterator, bool>
1009  _M_emplace_uniq(_Args&&... __args);
1010 
1011 #pragma GCC diagnostic push
1012 #pragma GCC diagnostic ignored "-Wc++14-extensions" // variable templates
1013  template<typename _Arg, typename _DArg = __remove_cvref_t<_Arg>,
1014  typename = _ExtractKey>
1015  static constexpr bool __is_key_type = false;
1016 
1017  template<typename _Arg>
1018  static constexpr bool
1019  __is_key_type<_Arg, key_type, __detail::_Identity> = true;
1020 
1021  template<typename _Arg, typename _Arg1, typename _Arg2>
1022  static constexpr bool
1023  __is_key_type<_Arg, pair<_Arg1, _Arg2>, __detail::_Select1st>
1024  = is_same<__remove_cvref_t<_Arg1>, key_type>::value;
1025 #pragma GCC diagnostic pop
1026 
1027  template<typename... _Args>
1028  iterator
1029  _M_emplace_multi(const_iterator, _Args&&... __args);
1030 
1031  iterator
1032  _M_erase(size_type __bkt, __node_base_ptr __prev_n, __node_ptr __n);
1033 
1034  size_type
1035  _M_erase_some(size_type __bkt, __node_base_ptr __prev_n, __node_ptr __n);
1036 
1037  template<typename _InputIterator>
1038  void
1039  _M_insert_range_multi(_InputIterator __first, _InputIterator __last);
1040 
1041  public:
1042 #pragma GCC diagnostic push
1043 #pragma GCC diagnostic ignored "-Wc++17-extensions" // if constexpr
1044  // Emplace
1045  template<typename... _Args>
1046  __ireturn_type
1047  emplace(_Args&&... __args)
1048  {
1049  if constexpr (__unique_keys::value)
1050  return _M_emplace_uniq(std::forward<_Args>(__args)...);
1051  else
1052  return _M_emplace_multi(cend(), std::forward<_Args>(__args)...);
1053  }
1054 
1055  template<typename... _Args>
1056  iterator
1057  emplace_hint(const_iterator __hint, _Args&&... __args)
1058  {
1059  if constexpr (__unique_keys::value)
1060  return _M_emplace_uniq(std::forward<_Args>(__args)...).first;
1061  else
1062  return _M_emplace_multi(__hint, std::forward<_Args>(__args)...);
1063  }
1064 
1065  // Insert
1066  __ireturn_type
1067  insert(const value_type& __v)
1068  {
1069  if constexpr (__unique_keys::value)
1070  return _M_emplace_uniq(__v);
1071  else
1072  return _M_emplace_multi(cend(), __v);
1073  }
1074 
1075  iterator
1076  insert(const_iterator __hint, const value_type& __v)
1077  {
1078  if constexpr (__unique_keys::value)
1079  return _M_emplace_uniq(__v).first;
1080  else
1081  return _M_emplace_multi(__hint, __v);
1082  }
1083 
1084  __ireturn_type
1085  insert(value_type&& __v)
1086  {
1087  if constexpr (__unique_keys::value)
1088  return _M_emplace_uniq(std::move(__v));
1089  else
1090  return _M_emplace_multi(cend(), std::move(__v));
1091  }
1092 
1093  iterator
1094  insert(const_iterator __hint, value_type&& __v)
1095  {
1096  if constexpr (__unique_keys::value)
1097  return _M_emplace_uniq(std::move(__v)).first;
1098  else
1099  return _M_emplace_multi(__hint, std::move(__v));
1100  }
1101 
1102 #ifdef __glibcxx_unordered_map_try_emplace // C++ >= 17 && HOSTED
1103  template<typename _KType, typename... _Args>
1104  std::pair<iterator, bool>
1105  try_emplace(const_iterator, _KType&& __k, _Args&&... __args)
1106  {
1107  // Note we ignore the hint argument.
1108  __hash_code __code;
1109  size_type __bkt;
1110  if (auto __loc = _M_locate(__k))
1111  return { iterator(__loc), false };
1112  else
1113  {
1114  __code = __loc._M_hash_code;
1115  __bkt = __loc._M_bucket_index;
1116  }
1117 
1118  _Scoped_node __node {
1119  this,
1121  std::forward_as_tuple(std::forward<_KType>(__k)),
1122  std::forward_as_tuple(std::forward<_Args>(__args)...)
1123  };
1124  auto __it = _M_insert_unique_node(__bkt, __code, __node._M_node);
1125  __node._M_node = nullptr;
1126  return { __it, true };
1127  }
1128 
1129 #ifdef __glibcxx_associative_heterogeneous_insertion // C++26
1130  template<typename _Kt>
1131  std::pair<iterator, bool>
1132  _M_insert_tr(_Kt&& __k)
1133  {
1134  auto __loc = _M_locate_tr(__k);
1135  if (__loc)
1136  return { iterator(__loc), false };
1137 
1138  _Scoped_node __node(
1139  this->_M_allocate_node(std::forward<_Kt>(__k)), this);
1140  auto __it = _M_insert_unique_node(
1141  __loc._M_bucket_index, __loc._M_hash_code, __node._M_node);
1142  __node._M_node = nullptr;
1143  return { __it, true };
1144  }
1145 #endif
1146 #endif
1147 
1148  void
1149  insert(initializer_list<value_type> __l)
1150  { this->insert(__l.begin(), __l.end()); }
1151 
1152  template<typename _InputIterator>
1153  void
1154  insert(_InputIterator __first, _InputIterator __last)
1155  {
1156  if constexpr (__unique_keys::value)
1157  for (; __first != __last; ++__first)
1158  _M_emplace_uniq(*__first);
1159  else
1160  return _M_insert_range_multi(__first, __last);
1161  }
1162 
1163  // This overload is only defined for maps, not sets.
1164  template<typename _Pair,
1165  typename = _Require<__not_<is_same<_Key, _Value>>,
1166  is_constructible<value_type, _Pair&&>>>
1167  __ireturn_type
1168  insert(_Pair&& __v)
1169  {
1170  if constexpr (__unique_keys::value)
1171  return _M_emplace_uniq(std::forward<_Pair>(__v));
1172  else
1173  return _M_emplace_multi(cend(), std::forward<_Pair>(__v));
1174  }
1175 
1176  // This overload is only defined for maps, not sets.
1177  template<typename _Pair,
1178  typename = _Require<__not_<is_same<_Key, _Value>>,
1179  is_constructible<value_type, _Pair&&>>>
1180  iterator
1181  insert(const_iterator __hint, _Pair&& __v)
1182  {
1183  if constexpr (__unique_keys::value)
1184  return _M_emplace_uniq(std::forward<_Pair>(__v));
1185  else
1186  return _M_emplace_multi(__hint, std::forward<_Pair>(__v));
1187  }
1188 #pragma GCC diagnostic pop
1189 
1190  // Erase
1191  iterator
1192  erase(const_iterator);
1193 
1194  // _GLIBCXX_RESOLVE_LIB_DEFECTS
1195  // 2059. C++0x ambiguity problem with map::erase
1196  iterator
1197  erase(iterator __it)
1198  { return erase(const_iterator(__it)); }
1199 
1200  size_type
1201  erase(const key_type& __k);
1202 
1203  template <typename _Kt>
1204  size_type
1205  _M_erase_tr(const _Kt& __k);
1206 
1207  iterator
1208  erase(const_iterator, const_iterator);
1209 
1210  void
1211  clear() noexcept;
1212 
1213  // Set number of buckets keeping it appropriate for container's number
1214  // of elements.
1215  void rehash(size_type __bkt_count);
1216 
1217  // DR 1189.
1218  // reserve, if present, comes from _Rehash_base.
1219 
1220 #if __glibcxx_node_extract // >= C++17 && HOSTED
1221  /// Re-insert an extracted node into a container with unique keys.
1222  insert_return_type
1223  _M_reinsert_node(node_type&& __nh)
1224  {
1225  insert_return_type __ret;
1226  if (__nh.empty())
1227  __ret.position = end();
1228  else
1229  {
1230  __glibcxx_assert(get_allocator() == __nh.get_allocator());
1231 
1232  if (auto __loc = _M_locate(__nh._M_key()))
1233  {
1234  __ret.node = std::move(__nh);
1235  __ret.position = iterator(__loc);
1236  __ret.inserted = false;
1237  }
1238  else
1239  {
1240  auto __code = __loc._M_hash_code;
1241  auto __bkt = __loc._M_bucket_index;
1242  __ret.position
1243  = _M_insert_unique_node(__bkt, __code, __nh._M_ptr);
1244  __ret.inserted = true;
1245  __nh.release();
1246  }
1247  }
1248  return __ret;
1249  }
1250 
1251  /// Re-insert an extracted node into a container with equivalent keys.
1252  iterator
1253  _M_reinsert_node_multi(const_iterator __hint, node_type&& __nh)
1254  {
1255  if (__nh.empty())
1256  return end();
1257 
1258  __glibcxx_assert(get_allocator() == __nh.get_allocator());
1259 
1260  const key_type& __k = __nh._M_key();
1261  auto __code = this->_M_hash_code(__k);
1262  auto __ret
1263  = _M_insert_multi_node(__hint._M_cur, __code, __nh._M_ptr);
1264  __nh.release();
1265  return __ret;
1266  }
1267 
1268  private:
1269  node_type
1270  _M_extract_node(size_t __bkt, __node_base_ptr __prev_n)
1271  {
1272  __node_ptr __n = static_cast<__node_ptr>(__prev_n->_M_nxt);
1273  if (__prev_n == _M_buckets[__bkt])
1274  _M_remove_bucket_begin(__bkt, __n->_M_next(),
1275  __n->_M_nxt ? _M_bucket_index(*__n->_M_next()) : 0);
1276  else if (__n->_M_nxt)
1277  {
1278  size_type __next_bkt = _M_bucket_index(*__n->_M_next());
1279  if (__next_bkt != __bkt)
1280  _M_buckets[__next_bkt] = __prev_n;
1281  }
1282 
1283  __prev_n->_M_nxt = __n->_M_nxt;
1284  __n->_M_nxt = nullptr;
1285  --_M_element_count;
1286  return { __n, this->_M_node_allocator() };
1287  }
1288 
1289  // Hash code for node __src_n with key __k, using this->hash_function().
1290  // Will use a hash code cached in the node if safe to do so. This is
1291  // for use in _M_merge_multi where the node comes from another container
1292  // with a hash function that might not match this->hash_function().
1293  template<typename _H2>
1294  __hash_code
1295  _M_src_hash_code(const _H2&, const __node_value_type& __src_n) const
1296  {
1297  if constexpr (__and_<__hash_cached,
1298  is_same<_H2, _Hash>, is_empty<_Hash>>::value)
1299  // If the node has a cached hash code, it's OK to use it.
1300  return __src_n._M_hash_code;
1301  else
1302  return this->_M_hash_code(_ExtractKey{}(__src_n._M_v()));
1303  }
1304 
1305  public:
1306  // Extract a node.
1307  node_type
1308  extract(const_iterator __pos)
1309  {
1310  size_t __bkt = _M_bucket_index(*__pos._M_cur);
1311  return _M_extract_node(__bkt,
1312  _M_get_previous_node(__bkt, __pos._M_cur));
1313  }
1314 
1315  /// Extract a node.
1316  node_type
1317  extract(const _Key& __k)
1318  { return _M_extract_tr<_Key>(__k); }
1319 
1320  template <typename _Kt>
1321  node_type
1322  _M_extract_tr(const _Kt& __k)
1323  {
1324  node_type __nh;
1325  __hash_code __code = this->_M_hash_code_tr(__k);
1326  std::size_t __bkt = _M_bucket_index(__code);
1327  if (__node_base_ptr __prev_node =
1328  _M_find_before_node_tr(__bkt, __k, __code))
1329  __nh = _M_extract_node(__bkt, __prev_node);
1330  return __nh;
1331  }
1332 
1333  /// Merge from another container of the same type.
1334  void
1335  _M_merge_unique(_Hashtable& __src)
1336  {
1337  __glibcxx_assert(get_allocator() == __src.get_allocator());
1338 
1339  using _PTr = pointer_traits<__node_base_ptr>;
1340 
1341  auto __n_elt = __src.size();
1342  size_type __first = 1;
1343  // For a container of identical type we can use its private members,
1344  // __src._M_before_begin, __src._M_bucket_index etc.
1345  auto __prev = _PTr::pointer_to(__src._M_before_begin);
1346  while (__n_elt--)
1347  {
1348  const auto __next = __prev->_M_nxt;
1349  const auto& __node = static_cast<__node_type&>(*__next);
1350  const key_type& __k = _ExtractKey{}(__node._M_v());
1351  const auto __loc = _M_locate(__k);
1352  if (__loc)
1353  {
1354  __prev = __next;
1355  continue;
1356  }
1357 
1358  auto __src_bkt = __src._M_bucket_index(__node);
1359  auto __nh = __src._M_extract_node(__src_bkt, __prev);
1360  _M_insert_unique_node(__loc._M_bucket_index, __loc._M_hash_code,
1361  __nh._M_ptr, __first * __n_elt + 1);
1362  __nh.release();
1363  __first = 0;
1364  }
1365  }
1366 
1367  /// Merge from a compatible container into one with unique keys.
1368  template<typename _Compatible_Hashtable>
1369  void
1370  _M_merge_unique(_Compatible_Hashtable& __src)
1371  {
1372  static_assert(is_same_v<typename _Compatible_Hashtable::node_type,
1373  node_type>, "Node types are compatible");
1374  __glibcxx_assert(get_allocator() == __src.get_allocator());
1375 
1376  auto __n_elt = __src.size();
1377  size_type __first = 1;
1378  // For a compatible container we can only use the public API,
1379  // so cbegin(), cend(), hash_function(), and extract(iterator).
1380  for (auto __i = __src.cbegin(), __end = __src.cend(); __i != __end;)
1381  {
1382  --__n_elt;
1383  auto __pos = __i++;
1384  const key_type& __k = _ExtractKey{}(*__pos);
1385  const auto __loc = _M_locate(__k);
1386  if (__loc)
1387  continue;
1388 
1389  auto __nh = __src.extract(__pos);
1390  _M_insert_unique_node(__loc._M_bucket_index,
1391  __loc._M_hash_code, __nh._M_ptr,
1392  __first * __n_elt + 1);
1393  __nh.release();
1394  __first = 0;
1395  }
1396  }
1397 
1398  /// Merge from another container of the same type.
1399  void
1400  _M_merge_multi(_Hashtable& __src)
1401  {
1402  __glibcxx_assert(get_allocator() == __src.get_allocator());
1403 
1404  if (__src.size() == 0) [[__unlikely__]]
1405  return;
1406 
1407  using _PTr = pointer_traits<__node_base_ptr>;
1408 
1409  __node_ptr __hint = nullptr;
1410  this->reserve(size() + __src.size());
1411  // For a container of identical type we can use its private members,
1412  // __src._M_before_begin, __src._M_bucket_index etc.
1413  auto __prev = _PTr::pointer_to(__src._M_before_begin);
1414  do
1415  {
1416  const auto& __node = static_cast<__node_type&>(*__prev->_M_nxt);
1417  // Hash code from this:
1418  auto __code = _M_hash_code_ext(__node);
1419  // Bucket index in __src, using code from __src.hash_function():
1420  size_type __src_bkt = __src._M_bucket_index(__node);
1421  auto __nh = __src._M_extract_node(__src_bkt, __prev);
1422  __hint = _M_insert_multi_node(__hint, __code, __nh._M_ptr)._M_cur;
1423  __nh.release();
1424  }
1425  while (__prev->_M_nxt != nullptr);
1426  }
1427 
1428  /// Merge from a compatible container into one with equivalent keys.
1429  template<typename _Compatible_Hashtable>
1430  void
1431  _M_merge_multi(_Compatible_Hashtable& __src)
1432  {
1433  static_assert(is_same_v<typename _Compatible_Hashtable::node_type,
1434  node_type>, "Node types are compatible");
1435  __glibcxx_assert(get_allocator() == __src.get_allocator());
1436 
1437  __node_ptr __hint = nullptr;
1438  this->reserve(size() + __src.size());
1439  // For a compatible container we can only use the public API,
1440  // so cbegin(), cend(), hash_function(), and extract(iterator).
1441  for (auto __i = __src.cbegin(), __end = __src.cend(); __i != __end;)
1442  {
1443  auto __pos = __i++;
1444  __hash_code __code
1445  = _M_src_hash_code(__src.hash_function(), *__pos._M_cur);
1446  auto __nh = __src.extract(__pos);
1447  __hint = _M_insert_multi_node(__hint, __code, __nh._M_ptr)._M_cur;
1448  __nh.release();
1449  }
1450  }
1451 #endif // C++17 __glibcxx_node_extract
1452 
1453  bool
1454  _M_equal(const _Hashtable& __other) const;
1455 
1456  private:
1457  // Helper rehash method used when keys are unique.
1458  void _M_rehash(size_type __bkt_count, true_type __uks);
1459 
1460  // Helper rehash method used when keys can be non-unique.
1461  void _M_rehash(size_type __bkt_count, false_type __uks);
1462  };
1463 
1464  // Definitions of class template _Hashtable's out-of-line member functions.
1465  template<typename _Key, typename _Value, typename _Alloc,
1466  typename _ExtractKey, typename _Equal,
1467  typename _Hash, typename _RangeHash, typename _Unused,
1468  typename _RehashPolicy, typename _Traits>
1469  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1470  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1471  _Hashtable(size_type __bkt_count_hint,
1472  const _Hash& __h, const _Equal& __eq, const allocator_type& __a)
1473  : _Hashtable(__h, __eq, __a)
1474  {
1475  auto __bkt_count = _M_rehash_policy._M_next_bkt(__bkt_count_hint);
1476  if (__bkt_count > _M_bucket_count)
1477  {
1478  _M_buckets = _M_allocate_buckets(__bkt_count);
1479  _M_bucket_count = __bkt_count;
1480  }
1481  }
1482 
1483  template<typename _Key, typename _Value, typename _Alloc,
1484  typename _ExtractKey, typename _Equal,
1485  typename _Hash, typename _RangeHash, typename _Unused,
1486  typename _RehashPolicy, typename _Traits>
1487  template<typename _InputIterator>
1488  inline
1489  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1490  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1491  _Hashtable(_InputIterator __f, _InputIterator __l,
1492  size_type __bkt_count_hint,
1493  const _Hash& __h, const _Equal& __eq,
1494  const allocator_type& __a, true_type /* __uks */)
1495  : _Hashtable(__bkt_count_hint, __h, __eq, __a)
1496  { this->insert(__f, __l); }
1497 
1498  template<typename _Key, typename _Value, typename _Alloc,
1499  typename _ExtractKey, typename _Equal,
1500  typename _Hash, typename _RangeHash, typename _Unused,
1501  typename _RehashPolicy, typename _Traits>
1502  template<typename _InputIterator>
1503  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1504  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1505  _Hashtable(_InputIterator __f, _InputIterator __l,
1506  size_type __bkt_count_hint,
1507  const _Hash& __h, const _Equal& __eq,
1508  const allocator_type& __a, false_type __uks)
1509  : _Hashtable(__h, __eq, __a)
1510  {
1511  auto __nb_elems = __detail::__distance_fw(__f, __l);
1512  auto __bkt_count =
1513  _M_rehash_policy._M_next_bkt(
1514  std::max(_M_rehash_policy._M_bkt_for_elements(__nb_elems),
1515  __bkt_count_hint));
1516 
1517  if (__bkt_count > _M_bucket_count)
1518  {
1519  _M_buckets = _M_allocate_buckets(__bkt_count);
1520  _M_bucket_count = __bkt_count;
1521  }
1522 
1523  for (; __f != __l; ++__f)
1524  _M_emplace_multi(cend(), *__f);
1525  }
1526 
1527  template<typename _Key, typename _Value, typename _Alloc,
1528  typename _ExtractKey, typename _Equal,
1529  typename _Hash, typename _RangeHash, typename _Unused,
1530  typename _RehashPolicy, typename _Traits>
1531  auto
1532  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1533  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1534  operator=(const _Hashtable& __ht)
1535  -> _Hashtable&
1536  {
1537  if (&__ht == this)
1538  return *this;
1539 
1540  if (__node_alloc_traits::_S_propagate_on_copy_assign())
1541  {
1542  auto& __this_alloc = this->_M_node_allocator();
1543  auto& __that_alloc = __ht._M_node_allocator();
1544  if (!__node_alloc_traits::_S_always_equal()
1545  && __this_alloc != __that_alloc)
1546  {
1547  // Replacement allocator cannot free existing storage.
1548  this->_M_deallocate_nodes(_M_begin());
1549  _M_before_begin._M_nxt = nullptr;
1550  _M_deallocate_buckets();
1551  _M_buckets = nullptr;
1552  std::__alloc_on_copy(__this_alloc, __that_alloc);
1553  __hashtable_base::operator=(__ht);
1554  _M_bucket_count = __ht._M_bucket_count;
1555  _M_element_count = __ht._M_element_count;
1556  _M_rehash_policy = __ht._M_rehash_policy;
1557 
1558  struct _Guard
1559  {
1560  ~_Guard() { if (_M_ht) _M_ht->_M_reset(); }
1561  _Hashtable* _M_ht;
1562  };
1563  // If _M_assign exits via an exception it will have deallocated
1564  // all memory. This guard will ensure *this is in a usable state.
1565  _Guard __guard{this};
1566  _M_assign(__ht);
1567  __guard._M_ht = nullptr;
1568  return *this;
1569  }
1570  std::__alloc_on_copy(__this_alloc, __that_alloc);
1571  }
1572 
1573  // Reuse allocated buckets and nodes.
1574  _M_assign_elements(__ht);
1575  return *this;
1576  }
1577 
1578  template<typename _Key, typename _Value, typename _Alloc,
1579  typename _ExtractKey, typename _Equal,
1580  typename _Hash, typename _RangeHash, typename _Unused,
1581  typename _RehashPolicy, typename _Traits>
1582  template<typename _Ht>
1583  void
1584  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1585  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1586  _M_assign_elements(_Ht&& __ht)
1587  {
1588  using __reuse_or_alloc_node_gen_t =
1589  __detail::_ReuseOrAllocNode<__node_alloc_type>;
1590 
1591  __buckets_ptr __former_buckets = nullptr;
1592  std::size_t __former_bucket_count = _M_bucket_count;
1593  __rehash_guard_t __rehash_guard(_M_rehash_policy);
1594 
1595  if (_M_bucket_count != __ht._M_bucket_count)
1596  {
1597  __former_buckets = _M_buckets;
1598  _M_buckets = _M_allocate_buckets(__ht._M_bucket_count);
1599  _M_bucket_count = __ht._M_bucket_count;
1600  }
1601  else
1602  std::fill_n(_M_buckets, _M_bucket_count, nullptr);
1603 
1604  __try
1605  {
1606  __hashtable_base::operator=(std::forward<_Ht>(__ht));
1607  _M_element_count = __ht._M_element_count;
1608  _M_rehash_policy = __ht._M_rehash_policy;
1609  __reuse_or_alloc_node_gen_t __roan(_M_begin(), *this);
1610  _M_before_begin._M_nxt = nullptr;
1611  _M_assign(std::forward<_Ht>(__ht), __roan);
1612  if (__former_buckets)
1613  _M_deallocate_buckets(__former_buckets, __former_bucket_count);
1614  __rehash_guard._M_guarded_obj = nullptr;
1615  }
1616  __catch(...)
1617  {
1618  if (__former_buckets)
1619  {
1620  // Restore previous buckets.
1621  _M_deallocate_buckets();
1622  _M_buckets = __former_buckets;
1623  _M_bucket_count = __former_bucket_count;
1624  }
1625  std::fill_n(_M_buckets, _M_bucket_count, nullptr);
1626  __throw_exception_again;
1627  }
1628  }
1629 
1630  template<typename _Key, typename _Value, typename _Alloc,
1631  typename _ExtractKey, typename _Equal,
1632  typename _Hash, typename _RangeHash, typename _Unused,
1633  typename _RehashPolicy, typename _Traits>
1634  template<typename _Ht, typename _NodeGenerator>
1635  void
1636  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1637  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1638  _M_assign(_Ht&& __ht, _NodeGenerator& __node_gen)
1639  {
1640  struct _Guard
1641  {
1642  ~_Guard()
1643  {
1644  if (_M_ht)
1645  {
1646  _M_ht->clear();
1647  if (_M_dealloc_buckets)
1648  _M_ht->_M_deallocate_buckets();
1649  }
1650  }
1651  _Hashtable* _M_ht = nullptr;
1652  bool _M_dealloc_buckets = false;
1653  };
1654  _Guard __guard;
1655 
1656  if (!_M_buckets)
1657  {
1658  _M_buckets = _M_allocate_buckets(_M_bucket_count);
1659  __guard._M_dealloc_buckets = true;
1660  }
1661 
1662  if (!__ht._M_before_begin._M_nxt)
1663  return;
1664 
1665  __guard._M_ht = this;
1666 
1667  using _FromVal = __conditional_t<is_lvalue_reference<_Ht>::value,
1668  const value_type&, value_type&&>;
1669 
1670  // First deal with the special first node pointed to by
1671  // _M_before_begin.
1672  __node_ptr __ht_n = __ht._M_begin();
1673  __node_ptr __this_n
1674  = __node_gen(static_cast<_FromVal>(__ht_n->_M_v()));
1675  _M_copy_code(*__this_n, *__ht_n);
1676  _M_update_bbegin(__this_n);
1677 
1678  // Then deal with other nodes.
1679  __node_ptr __prev_n = __this_n;
1680  for (__ht_n = __ht_n->_M_next(); __ht_n; __ht_n = __ht_n->_M_next())
1681  {
1682  __this_n = __node_gen(static_cast<_FromVal>(__ht_n->_M_v()));
1683  __prev_n->_M_nxt = __this_n;
1684  _M_copy_code(*__this_n, *__ht_n);
1685  size_type __bkt = _M_bucket_index(*__this_n);
1686  if (!_M_buckets[__bkt])
1687  _M_buckets[__bkt] = __prev_n;
1688  __prev_n = __this_n;
1689  }
1690  __guard._M_ht = nullptr;
1691  }
1692 
1693  template<typename _Key, typename _Value, typename _Alloc,
1694  typename _ExtractKey, typename _Equal,
1695  typename _Hash, typename _RangeHash, typename _Unused,
1696  typename _RehashPolicy, typename _Traits>
1697  void
1698  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1699  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1700  _M_reset() noexcept
1701  {
1702  _M_rehash_policy._M_reset();
1703  _M_bucket_count = 1;
1704  _M_single_bucket = nullptr;
1705  _M_buckets = &_M_single_bucket;
1706  _M_before_begin._M_nxt = nullptr;
1707  _M_element_count = 0;
1708  }
1709 
1710  template<typename _Key, typename _Value, typename _Alloc,
1711  typename _ExtractKey, typename _Equal,
1712  typename _Hash, typename _RangeHash, typename _Unused,
1713  typename _RehashPolicy, typename _Traits>
1714  void
1715  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1716  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1717  _M_move_assign(_Hashtable&& __ht, true_type)
1718  {
1719  if (__builtin_expect(std::__addressof(__ht) == this, false))
1720  return;
1721 
1722  this->_M_deallocate_nodes(_M_begin());
1723  _M_deallocate_buckets();
1724  __hashtable_base::operator=(std::move(__ht));
1725  _M_rehash_policy = __ht._M_rehash_policy;
1726  if (!__ht._M_uses_single_bucket())
1727  _M_buckets = __ht._M_buckets;
1728  else
1729  {
1730  _M_buckets = &_M_single_bucket;
1731  _M_single_bucket = __ht._M_single_bucket;
1732  }
1733 
1734  _M_bucket_count = __ht._M_bucket_count;
1735  _M_before_begin._M_nxt = __ht._M_before_begin._M_nxt;
1736  _M_element_count = __ht._M_element_count;
1737  std::__alloc_on_move(this->_M_node_allocator(), __ht._M_node_allocator());
1738 
1739  // Fix bucket containing the _M_before_begin pointer that can't be moved.
1740  _M_update_bbegin();
1741  __ht._M_reset();
1742  }
1743 
1744  template<typename _Key, typename _Value, typename _Alloc,
1745  typename _ExtractKey, typename _Equal,
1746  typename _Hash, typename _RangeHash, typename _Unused,
1747  typename _RehashPolicy, typename _Traits>
1748  void
1749  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1750  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1751  _M_move_assign(_Hashtable&& __ht, false_type)
1752  {
1753  if (__ht._M_node_allocator() == this->_M_node_allocator())
1754  _M_move_assign(std::move(__ht), true_type{});
1755  else
1756  {
1757  // Can't move memory, move elements then.
1758  _M_assign_elements(std::move(__ht));
1759  __ht.clear();
1760  }
1761  }
1762 
1763  template<typename _Key, typename _Value, typename _Alloc,
1764  typename _ExtractKey, typename _Equal,
1765  typename _Hash, typename _RangeHash, typename _Unused,
1766  typename _RehashPolicy, typename _Traits>
1767  inline
1768  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1769  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1770  _Hashtable(const _Hashtable& __ht)
1771  : __hashtable_base(__ht),
1772  __map_base(__ht),
1773  __rehash_base(__ht),
1774  __hashtable_alloc(
1775  __node_alloc_traits::_S_select_on_copy(__ht._M_node_allocator())),
1776  __enable_default_ctor(__ht),
1777  _M_buckets(nullptr),
1778  _M_bucket_count(__ht._M_bucket_count),
1779  _M_element_count(__ht._M_element_count),
1780  _M_rehash_policy(__ht._M_rehash_policy)
1781  {
1782  _M_assign(__ht);
1783  }
1784 
1785  template<typename _Key, typename _Value, typename _Alloc,
1786  typename _ExtractKey, typename _Equal,
1787  typename _Hash, typename _RangeHash, typename _Unused,
1788  typename _RehashPolicy, typename _Traits>
1789  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1790  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1791  _Hashtable(_Hashtable&& __ht, __node_alloc_type&& __a,
1792  true_type /* alloc always equal */)
1793  noexcept(_S_nothrow_move())
1794  : __hashtable_base(__ht),
1795  __map_base(__ht),
1796  __rehash_base(__ht),
1797  __hashtable_alloc(std::move(__a)),
1798  __enable_default_ctor(__ht),
1799  _M_buckets(__ht._M_buckets),
1800  _M_bucket_count(__ht._M_bucket_count),
1801  _M_before_begin(__ht._M_before_begin._M_nxt),
1802  _M_element_count(__ht._M_element_count),
1803  _M_rehash_policy(__ht._M_rehash_policy)
1804  {
1805  // Update buckets if __ht is using its single bucket.
1806  if (__ht._M_uses_single_bucket())
1807  {
1808  _M_buckets = &_M_single_bucket;
1809  _M_single_bucket = __ht._M_single_bucket;
1810  }
1811 
1812  // Fix bucket containing the _M_before_begin pointer that can't be moved.
1813  _M_update_bbegin();
1814 
1815  __ht._M_reset();
1816  }
1817 
1818  template<typename _Key, typename _Value, typename _Alloc,
1819  typename _ExtractKey, typename _Equal,
1820  typename _Hash, typename _RangeHash, typename _Unused,
1821  typename _RehashPolicy, typename _Traits>
1822  inline
1823  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1824  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1825  _Hashtable(const _Hashtable& __ht, const allocator_type& __a)
1826  : __hashtable_base(__ht),
1827  __map_base(__ht),
1828  __rehash_base(__ht),
1829  __hashtable_alloc(__node_alloc_type(__a)),
1830  __enable_default_ctor(__ht),
1831  _M_buckets(),
1832  _M_bucket_count(__ht._M_bucket_count),
1833  _M_element_count(__ht._M_element_count),
1834  _M_rehash_policy(__ht._M_rehash_policy)
1835  {
1836  _M_assign(__ht);
1837  }
1838 
1839  template<typename _Key, typename _Value, typename _Alloc,
1840  typename _ExtractKey, typename _Equal,
1841  typename _Hash, typename _RangeHash, typename _Unused,
1842  typename _RehashPolicy, typename _Traits>
1843  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1844  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1845  _Hashtable(_Hashtable&& __ht, __node_alloc_type&& __a,
1846  false_type /* alloc always equal */)
1847  : __hashtable_base(__ht),
1848  __map_base(__ht),
1849  __rehash_base(__ht),
1850  __hashtable_alloc(std::move(__a)),
1851  __enable_default_ctor(__ht),
1852  _M_buckets(nullptr),
1853  _M_bucket_count(__ht._M_bucket_count),
1854  _M_element_count(__ht._M_element_count),
1855  _M_rehash_policy(__ht._M_rehash_policy)
1856  {
1857  if (__ht._M_node_allocator() == this->_M_node_allocator())
1858  {
1859  if (__ht._M_uses_single_bucket())
1860  {
1861  _M_buckets = &_M_single_bucket;
1862  _M_single_bucket = __ht._M_single_bucket;
1863  }
1864  else
1865  _M_buckets = __ht._M_buckets;
1866 
1867  // Fix bucket containing the _M_before_begin pointer that can't be
1868  // moved.
1869  _M_update_bbegin(__ht._M_begin());
1870 
1871  __ht._M_reset();
1872  }
1873  else
1874  {
1875  using _Fwd_Ht = __conditional_t<
1876  __move_if_noexcept_cond<value_type>::value,
1877  const _Hashtable&, _Hashtable&&>;
1878  _M_assign(std::forward<_Fwd_Ht>(__ht));
1879  __ht.clear();
1880  }
1881  }
1882 
1883  template<typename _Key, typename _Value, typename _Alloc,
1884  typename _ExtractKey, typename _Equal,
1885  typename _Hash, typename _RangeHash, typename _Unused,
1886  typename _RehashPolicy, typename _Traits>
1887  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1888  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1889  ~_Hashtable() noexcept
1890  {
1891  // Getting a bucket index from a node shall not throw because it is used
1892  // during the rehash process. This static_assert purpose is limited to usage
1893  // of _Hashtable with _Hashtable_traits requesting non-cached hash code.
1894  // Need a complete type to check this, so do it in the destructor not at
1895  // class scope.
1896  static_assert(noexcept(declval<const __hash_code_base_access&>()
1897  ._M_bucket_index(declval<const __node_value_type&>(),
1898  (std::size_t)0)),
1899  "Cache the hash code or qualify your functors involved"
1900  " in hash code and bucket index computation with noexcept");
1901 
1902  this->_M_deallocate_nodes(_M_begin());
1903  _M_deallocate_buckets();
1904  }
1905 
1906  template<typename _Key, typename _Value, typename _Alloc,
1907  typename _ExtractKey, typename _Equal,
1908  typename _Hash, typename _RangeHash, typename _Unused,
1909  typename _RehashPolicy, typename _Traits>
1910  void
1911  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1912  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1913  swap(_Hashtable& __x)
1914  noexcept(__and_<__is_nothrow_swappable<_Hash>,
1915  __is_nothrow_swappable<_Equal>>::value)
1916  {
1917  using std::swap;
1918  swap(__hash_code_base::_M_hash._M_obj,
1919  __x.__hash_code_base::_M_hash._M_obj);
1920  swap(__hashtable_base::_M_equal._M_obj,
1921  __x.__hashtable_base::_M_equal._M_obj);
1922 
1923 #pragma GCC diagnostic push
1924 #pragma GCC diagnostic ignored "-Wc++17-extensions" // if constexpr
1925  if constexpr (__node_alloc_traits::propagate_on_container_swap::value)
1926  swap(this->_M_node_allocator(), __x._M_node_allocator());
1927 #pragma GCC diagnostic pop
1928 
1929  std::swap(_M_rehash_policy, __x._M_rehash_policy);
1930 
1931  // Deal properly with potentially moved instances.
1932  if (this->_M_uses_single_bucket())
1933  {
1934  if (!__x._M_uses_single_bucket())
1935  {
1936  _M_buckets = __x._M_buckets;
1937  __x._M_buckets = &__x._M_single_bucket;
1938  }
1939  }
1940  else if (__x._M_uses_single_bucket())
1941  {
1942  __x._M_buckets = _M_buckets;
1943  _M_buckets = &_M_single_bucket;
1944  }
1945  else
1946  std::swap(_M_buckets, __x._M_buckets);
1947 
1948  std::swap(_M_bucket_count, __x._M_bucket_count);
1949  std::swap(_M_before_begin._M_nxt, __x._M_before_begin._M_nxt);
1950  std::swap(_M_element_count, __x._M_element_count);
1951  std::swap(_M_single_bucket, __x._M_single_bucket);
1952 
1953  // Fix buckets containing the _M_before_begin pointers that can't be
1954  // swapped.
1955  _M_update_bbegin();
1956  __x._M_update_bbegin();
1957  }
1958 
1959  template<typename _Key, typename _Value, typename _Alloc,
1960  typename _ExtractKey, typename _Equal,
1961  typename _Hash, typename _RangeHash, typename _Unused,
1962  typename _RehashPolicy, typename _Traits>
1963  auto
1964  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1965  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1966  find(const key_type& __k)
1967  -> iterator
1968  { return iterator(_M_locate(__k)); }
1969 
1970  template<typename _Key, typename _Value, typename _Alloc,
1971  typename _ExtractKey, typename _Equal,
1972  typename _Hash, typename _RangeHash, typename _Unused,
1973  typename _RehashPolicy, typename _Traits>
1974  auto
1975  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1976  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1977  find(const key_type& __k) const
1978  -> const_iterator
1979  { return const_iterator(_M_locate(__k)); }
1980 
1981 #ifdef __glibcxx_generic_unordered_lookup // C++ >= 20 && HOSTED
1982  template<typename _Key, typename _Value, typename _Alloc,
1983  typename _ExtractKey, typename _Equal,
1984  typename _Hash, typename _RangeHash, typename _Unused,
1985  typename _RehashPolicy, typename _Traits>
1986  template<typename _Kt, typename, typename>
1987  auto
1988  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
1989  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
1990  _M_find_tr(const _Kt& __k)
1991  -> iterator
1992  {
1993  if (size() <= __small_size_threshold())
1994  {
1995  for (auto __n = _M_begin(); __n; __n = __n->_M_next())
1996  if (this->_M_key_equals_tr(__k, *__n))
1997  return iterator(__n);
1998  return end();
1999  }
2000 
2001  __hash_code __code = this->_M_hash_code_tr(__k);
2002  std::size_t __bkt = _M_bucket_index(__code);
2003  return iterator(_M_find_node_tr(__bkt, __k, __code));
2004  }
2005 
2006  template<typename _Key, typename _Value, typename _Alloc,
2007  typename _ExtractKey, typename _Equal,
2008  typename _Hash, typename _RangeHash, typename _Unused,
2009  typename _RehashPolicy, typename _Traits>
2010  template<typename _Kt, typename, typename>
2011  auto
2012  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2013  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2014  _M_find_tr(const _Kt& __k) const
2015  -> const_iterator
2016  {
2017  if (size() <= __small_size_threshold())
2018  {
2019  for (auto __n = _M_begin(); __n; __n = __n->_M_next())
2020  if (this->_M_key_equals_tr(__k, *__n))
2021  return const_iterator(__n);
2022  return end();
2023  }
2024 
2025  __hash_code __code = this->_M_hash_code_tr(__k);
2026  std::size_t __bkt = _M_bucket_index(__code);
2027  return const_iterator(_M_find_node_tr(__bkt, __k, __code));
2028  }
2029 #endif // C++20 __glibcxx_generic_unordered_lookup
2030 
2031  template<typename _Key, typename _Value, typename _Alloc,
2032  typename _ExtractKey, typename _Equal,
2033  typename _Hash, typename _RangeHash, typename _Unused,
2034  typename _RehashPolicy, typename _Traits>
2035  auto
2036  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2037  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2038  count(const key_type& __k) const
2039  -> size_type
2040  {
2041  auto __it = find(__k);
2042  if (!__it._M_cur)
2043  return 0;
2044 
2045  if (__unique_keys::value)
2046  return 1;
2047 
2048  size_type __result = 1;
2049  for (auto __ref = __it++;
2050  __it._M_cur && this->_M_node_equals(*__ref._M_cur, *__it._M_cur);
2051  ++__it)
2052  ++__result;
2053 
2054  return __result;
2055  }
2056 
2057 #ifdef __glibcxx_generic_unordered_lookup // C++ >= 20 && HOSTED
2058  template<typename _Key, typename _Value, typename _Alloc,
2059  typename _ExtractKey, typename _Equal,
2060  typename _Hash, typename _RangeHash, typename _Unused,
2061  typename _RehashPolicy, typename _Traits>
2062  template<typename _Kt, typename, typename>
2063  auto
2064  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2065  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2066  _M_count_tr(const _Kt& __k) const
2067  -> size_type
2068  {
2069  if (size() <= __small_size_threshold())
2070  {
2071  size_type __result = 0;
2072  for (auto __n = _M_begin(); __n; __n = __n->_M_next())
2073  {
2074  if (this->_M_key_equals_tr(__k, *__n))
2075  {
2076  ++__result;
2077  continue;
2078  }
2079 
2080  if (__result)
2081  break;
2082  }
2083 
2084  return __result;
2085  }
2086 
2087  __hash_code __code = this->_M_hash_code_tr(__k);
2088  std::size_t __bkt = _M_bucket_index(__code);
2089  auto __n = _M_find_node_tr(__bkt, __k, __code);
2090  if (!__n)
2091  return 0;
2092 
2093  iterator __it(__n);
2094  size_type __result = 1;
2095  for (++__it;
2096  __it._M_cur && this->_M_equals_tr(__k, __code, *__it._M_cur);
2097  ++__it)
2098  ++__result;
2099 
2100  return __result;
2101  }
2102 #endif // C++20 __glibcxx_generic_unordered_lookup
2103 
2104  template<typename _Key, typename _Value, typename _Alloc,
2105  typename _ExtractKey, typename _Equal,
2106  typename _Hash, typename _RangeHash, typename _Unused,
2107  typename _RehashPolicy, typename _Traits>
2108  auto
2109  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2110  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2111  equal_range(const key_type& __k)
2112  -> pair<iterator, iterator>
2113  {
2114  auto __ite = find(__k);
2115  if (!__ite._M_cur)
2116  return { __ite, __ite };
2117 
2118  auto __beg = __ite++;
2119  if (__unique_keys::value)
2120  return { __beg, __ite };
2121 
2122  while (__ite._M_cur && this->_M_node_equals(*__beg._M_cur, *__ite._M_cur))
2123  ++__ite;
2124 
2125  return { __beg, __ite };
2126  }
2127 
2128  template<typename _Key, typename _Value, typename _Alloc,
2129  typename _ExtractKey, typename _Equal,
2130  typename _Hash, typename _RangeHash, typename _Unused,
2131  typename _RehashPolicy, typename _Traits>
2132  auto
2133  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2134  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2135  equal_range(const key_type& __k) const
2136  -> pair<const_iterator, const_iterator>
2137  {
2138  auto __ite = find(__k);
2139  if (!__ite._M_cur)
2140  return { __ite, __ite };
2141 
2142  auto __beg = __ite++;
2143  if (__unique_keys::value)
2144  return { __beg, __ite };
2145 
2146  while (__ite._M_cur && this->_M_node_equals(*__beg._M_cur, *__ite._M_cur))
2147  ++__ite;
2148 
2149  return { __beg, __ite };
2150  }
2151 
2152 #ifdef __glibcxx_generic_unordered_lookup // C++ >= 20 && HOSTED
2153  template<typename _Key, typename _Value, typename _Alloc,
2154  typename _ExtractKey, typename _Equal,
2155  typename _Hash, typename _RangeHash, typename _Unused,
2156  typename _RehashPolicy, typename _Traits>
2157  template<typename _Kt, typename, typename>
2158  auto
2159  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2160  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2161  _M_equal_range_tr(const _Kt& __k)
2162  -> pair<iterator, iterator>
2163  {
2164  if (size() <= __small_size_threshold())
2165  {
2166  __node_ptr __n, __beg = nullptr;
2167  for (__n = _M_begin(); __n; __n = __n->_M_next())
2168  {
2169  if (this->_M_key_equals_tr(__k, *__n))
2170  {
2171  if (!__beg)
2172  __beg = __n;
2173  continue;
2174  }
2175 
2176  if (__beg)
2177  break;
2178  }
2179 
2180  return { iterator(__beg), iterator(__n) };
2181  }
2182 
2183  __hash_code __code = this->_M_hash_code_tr(__k);
2184  std::size_t __bkt = _M_bucket_index(__code);
2185  auto __n = _M_find_node_tr(__bkt, __k, __code);
2186  iterator __ite(__n);
2187  if (!__n)
2188  return { __ite, __ite };
2189 
2190  auto __beg = __ite++;
2191  while (__ite._M_cur && this->_M_equals_tr(__k, __code, *__ite._M_cur))
2192  ++__ite;
2193 
2194  return { __beg, __ite };
2195  }
2196 
2197  template<typename _Key, typename _Value, typename _Alloc,
2198  typename _ExtractKey, typename _Equal,
2199  typename _Hash, typename _RangeHash, typename _Unused,
2200  typename _RehashPolicy, typename _Traits>
2201  template<typename _Kt, typename, typename>
2202  auto
2203  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2204  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2205  _M_equal_range_tr(const _Kt& __k) const
2206  -> pair<const_iterator, const_iterator>
2207  {
2208  if (size() <= __small_size_threshold())
2209  {
2210  __node_ptr __n, __beg = nullptr;
2211  for (__n = _M_begin(); __n; __n = __n->_M_next())
2212  {
2213  if (this->_M_key_equals_tr(__k, *__n))
2214  {
2215  if (!__beg)
2216  __beg = __n;
2217  continue;
2218  }
2219 
2220  if (__beg)
2221  break;
2222  }
2223 
2224  return { const_iterator(__beg), const_iterator(__n) };
2225  }
2226 
2227  __hash_code __code = this->_M_hash_code_tr(__k);
2228  std::size_t __bkt = _M_bucket_index(__code);
2229  auto __n = _M_find_node_tr(__bkt, __k, __code);
2230  const_iterator __ite(__n);
2231  if (!__n)
2232  return { __ite, __ite };
2233 
2234  auto __beg = __ite++;
2235  while (__ite._M_cur && this->_M_equals_tr(__k, __code, *__ite._M_cur))
2236  ++__ite;
2237 
2238  return { __beg, __ite };
2239  }
2240 #endif // C++20 __glibcxx_generic_unordered_lookup
2241 
2242  // Find the node before the one whose key compares equal to k in the bucket
2243  // bkt. Return nullptr if no node is found.
2244  template<typename _Key, typename _Value, typename _Alloc,
2245  typename _ExtractKey, typename _Equal,
2246  typename _Hash, typename _RangeHash, typename _Unused,
2247  typename _RehashPolicy, typename _Traits>
2248  template<typename _Kt>
2249  auto
2250  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2251  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2252  _M_find_before_node_tr(size_type __bkt, const _Kt& __k,
2253  __hash_code __code) const
2254  -> __node_base_ptr
2255  {
2256  __node_base_ptr __prev_p = _M_buckets[__bkt];
2257  if (!__prev_p)
2258  return nullptr;
2259 
2260  for (__node_ptr __p = static_cast<__node_ptr>(__prev_p->_M_nxt);;
2261  __p = __p->_M_next())
2262  {
2263  if (this->_M_equals_tr(__k, __code, *__p))
2264  return __prev_p;
2265 
2266  if (__builtin_expect (
2267  !__p->_M_nxt || _M_bucket_index(*__p->_M_next()) != __bkt, 0))
2268  break;
2269  __prev_p = __p;
2270  }
2271 
2272  return nullptr;
2273  }
2274 
2275  template<typename _Key, typename _Value, typename _Alloc,
2276  typename _ExtractKey, typename _Equal,
2277  typename _Hash, typename _RangeHash, typename _Unused,
2278  typename _RehashPolicy, typename _Traits>
2279  template <typename _Kt>
2280  inline auto
2281  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2282  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2283  _M_locate_tr(const _Kt& __k) const
2284  -> __location_type
2285  {
2286  __location_type __loc;
2287  const auto __size = size();
2288 
2289  if (__size <= __small_size_threshold())
2290  {
2291  __loc._M_before = pointer_traits<__node_base_ptr>::
2292  pointer_to(const_cast<__node_base&>(_M_before_begin));
2293  while (__loc._M_before->_M_nxt)
2294  {
2295  if (this->_M_key_equals_tr(__k, *__loc._M_node()))
2296  return __loc;
2297  __loc._M_before = __loc._M_before->_M_nxt;
2298  }
2299  __loc._M_before = nullptr; // Didn't find it.
2300  }
2301 
2302  __loc._M_hash_code = this->_M_hash_code_tr(__k);
2303  __loc._M_bucket_index = _M_bucket_index(__loc._M_hash_code);
2304 
2305  if (__size > __small_size_threshold())
2306  __loc._M_before = _M_find_before_node_tr(
2307  __loc._M_bucket_index, __k, __loc._M_hash_code);
2308 
2309  return __loc;
2310  }
2311 
2312  template<typename _Key, typename _Value, typename _Alloc,
2313  typename _ExtractKey, typename _Equal,
2314  typename _Hash, typename _RangeHash, typename _Unused,
2315  typename _RehashPolicy, typename _Traits>
2316  auto
2317  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2318  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2319  _M_get_previous_node(size_type __bkt, __node_ptr __n)
2320  -> __node_base_ptr
2321  {
2322  __node_base_ptr __prev_n = _M_buckets[__bkt];
2323  while (__prev_n->_M_nxt != __n)
2324  __prev_n = __prev_n->_M_nxt;
2325  return __prev_n;
2326  }
2327 
2328 #pragma GCC diagnostic push
2329 #pragma GCC diagnostic ignored "-Wc++17-extensions" // if constexpr
2330  template<typename _Key, typename _Value, typename _Alloc,
2331  typename _ExtractKey, typename _Equal,
2332  typename _Hash, typename _RangeHash, typename _Unused,
2333  typename _RehashPolicy, typename _Traits>
2334  template<typename... _Args>
2335  auto
2336  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2337  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2338  _M_emplace_uniq(_Args&&... __args)
2339  -> pair<iterator, bool>
2340  {
2341  const key_type* __kp = nullptr;
2342 
2343  if constexpr (sizeof...(_Args) == 1)
2344  {
2345  if constexpr (__is_key_type<_Args...>)
2346  {
2347  const auto& __key = _ExtractKey{}(__args...);
2348  __kp = std::__addressof(__key);
2349  }
2350  }
2351  else if constexpr (sizeof...(_Args) == 2)
2352  {
2353  if constexpr (__is_key_type<pair<const _Args&...>>)
2354  {
2355  pair<const _Args&...> __refs(__args...);
2356  const auto& __key = _ExtractKey{}(__refs);
2357  __kp = std::__addressof(__key);
2358  }
2359  }
2360 
2361  _Scoped_node __node { __node_ptr(), this }; // Do not create node yet.
2362  __hash_code __code = 0;
2363  size_type __bkt = 0;
2364 
2365  if (__kp == nullptr)
2366  {
2367  // Didn't extract a key from the args, so build the node.
2368  __node._M_node
2369  = this->_M_allocate_node(std::forward<_Args>(__args)...);
2370  const key_type& __key = _ExtractKey{}(__node._M_node->_M_v());
2371  __kp = std::__addressof(__key);
2372  }
2373 
2374  if (auto __loc = _M_locate(*__kp))
2375  // There is already an equivalent node, no insertion.
2376  return { iterator(__loc), false };
2377  else
2378  {
2379  __code = __loc._M_hash_code;
2380  __bkt = __loc._M_bucket_index;
2381  }
2382 
2383  if (!__node._M_node)
2384  __node._M_node
2385  = this->_M_allocate_node(std::forward<_Args>(__args)...);
2386 
2387  // Insert the node
2388  auto __pos = _M_insert_unique_node(__bkt, __code, __node._M_node);
2389  __node._M_node = nullptr;
2390  return { __pos, true };
2391  }
2392 
2393 #pragma GCC diagnostic pop
2394 
2395  template<typename _Key, typename _Value, typename _Alloc,
2396  typename _ExtractKey, typename _Equal,
2397  typename _Hash, typename _RangeHash, typename _Unused,
2398  typename _RehashPolicy, typename _Traits>
2399  template<typename... _Args>
2400  auto
2401  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2402  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2403  _M_emplace_multi(const_iterator __hint, _Args&&... __args)
2404  -> iterator
2405  {
2406  // First build the node to get its hash code.
2407  _Scoped_node __node { this, std::forward<_Args>(__args)... };
2408  const key_type& __k = _ExtractKey{}(__node._M_node->_M_v());
2409 
2410  auto __res = this->_M_compute_hash_code(__hint._M_cur, __k);
2411  auto __pos
2412  = _M_insert_multi_node(__res.first, __res.second, __node._M_node);
2413  __node._M_node = nullptr;
2414  return __pos;
2415  }
2416 
2417  template<typename _Key, typename _Value, typename _Alloc,
2418  typename _ExtractKey, typename _Equal,
2419  typename _Hash, typename _RangeHash, typename _Unused,
2420  typename _RehashPolicy, typename _Traits>
2421  void
2422  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2423  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2424  _M_rehash_insert(size_type __n)
2425  {
2426  using __pair_type = std::pair<bool, std::size_t>;
2427  if (__n == 0)
2428  return;
2429 
2430  __rehash_guard_t __rehash_guard(_M_rehash_policy);
2431  __pair_type __do_rehash
2432  = _M_rehash_policy._M_need_rehash(_M_bucket_count, _M_element_count, __n);
2433 
2434  if (__do_rehash.first)
2435  _M_rehash(__do_rehash.second, false_type{});
2436 
2437  __rehash_guard._M_guarded_obj = nullptr;
2438  }
2439 
2440 
2441  template<typename _Key, typename _Value, typename _Alloc,
2442  typename _ExtractKey, typename _Equal,
2443  typename _Hash, typename _RangeHash, typename _Unused,
2444  typename _RehashPolicy, typename _Traits>
2445  template<typename _InputIterator>
2446  void
2447  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2448  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2449  _M_insert_range_multi(_InputIterator __first, _InputIterator __last)
2450  {
2451  _M_rehash_insert(__detail::__distance_fw(__first, __last));
2452  for (; __first != __last; ++__first)
2453  _M_emplace_multi(cend(), *__first);
2454  }
2455 
2456  template<typename _Key, typename _Value, typename _Alloc,
2457  typename _ExtractKey, typename _Equal,
2458  typename _Hash, typename _RangeHash, typename _Unused,
2459  typename _RehashPolicy, typename _Traits>
2460  auto
2461  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2462  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2463  _M_compute_hash_code(__node_ptr __hint, const key_type& __k) const
2464  -> pair<__node_ptr, __hash_code>
2465  {
2466  if (size() <= __small_size_threshold())
2467  {
2468  if (__hint)
2469  {
2470  for (auto __it = __hint; __it; __it = __it->_M_next())
2471  if (this->_M_key_equals(__k, *__it))
2472  return { __it, this->_M_hash_code(*__it) };
2473  }
2474 
2475  for (auto __it = _M_begin(); __it != __hint; __it = __it->_M_next())
2476  if (this->_M_key_equals(__k, *__it))
2477  return { __it, this->_M_hash_code(*__it) };
2478 
2479  __hint = nullptr;
2480  }
2481 
2482  return { __hint, this->_M_hash_code(__k) };
2483  }
2484 
2485  template<typename _Key, typename _Value, typename _Alloc,
2486  typename _ExtractKey, typename _Equal,
2487  typename _Hash, typename _RangeHash, typename _Unused,
2488  typename _RehashPolicy, typename _Traits>
2489  auto
2490  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2491  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2492  _M_insert_unique_node(size_type __bkt, __hash_code __code,
2493  __node_ptr __node, size_type __n_elt)
2494  -> iterator
2495  {
2496  __rehash_guard_t __rehash_guard(_M_rehash_policy);
2497  std::pair<bool, std::size_t> __do_rehash
2498  = _M_rehash_policy._M_need_rehash(_M_bucket_count, _M_element_count,
2499  __n_elt);
2500 
2501  if (__do_rehash.first)
2502  {
2503  _M_rehash(__do_rehash.second, true_type{});
2504  __bkt = _M_bucket_index(__code);
2505  }
2506 
2507  __rehash_guard._M_guarded_obj = nullptr;
2508  _M_store_code(*__node, __code);
2509 
2510  // Always insert at the beginning of the bucket.
2511  _M_insert_bucket_begin(__bkt, __node);
2512  ++_M_element_count;
2513  return iterator(__node);
2514  }
2515 
2516  template<typename _Key, typename _Value, typename _Alloc,
2517  typename _ExtractKey, typename _Equal,
2518  typename _Hash, typename _RangeHash, typename _Unused,
2519  typename _RehashPolicy, typename _Traits>
2520  auto
2521  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2522  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2523  _M_insert_multi_node(__node_ptr __hint,
2524  __hash_code __code, __node_ptr __node)
2525  -> iterator
2526  {
2527  __rehash_guard_t __rehash_guard(_M_rehash_policy);
2528  std::pair<bool, std::size_t> __do_rehash
2529  = _M_rehash_policy._M_need_rehash(_M_bucket_count, _M_element_count, 1);
2530 
2531  if (__do_rehash.first)
2532  _M_rehash(__do_rehash.second, false_type{});
2533 
2534  __rehash_guard._M_guarded_obj = nullptr;
2535  _M_store_code(*__node, __code);
2536  const key_type& __k = _ExtractKey{}(__node->_M_v());
2537  size_type __bkt = _M_bucket_index(__code);
2538 
2539  // Find the node before an equivalent one or use hint if it exists and
2540  // if it is equivalent.
2541  __node_base_ptr __prev
2542  = __builtin_expect(__hint != nullptr, false)
2543  && this->_M_equals(__k, __code, *__hint)
2544  ? __hint
2545  : _M_find_before_node(__bkt, __k, __code);
2546 
2547  if (__prev)
2548  {
2549  // Insert after the node before the equivalent one.
2550  __node->_M_nxt = __prev->_M_nxt;
2551  __prev->_M_nxt = __node;
2552  if (__builtin_expect(__prev == __hint, false))
2553  // hint might be the last bucket node, in this case we need to
2554  // update next bucket.
2555  if (__node->_M_nxt
2556  && !this->_M_equals(__k, __code, *__node->_M_next()))
2557  {
2558  size_type __next_bkt = _M_bucket_index(*__node->_M_next());
2559  if (__next_bkt != __bkt)
2560  _M_buckets[__next_bkt] = __node;
2561  }
2562  }
2563  else
2564  // The inserted node has no equivalent in the hashtable. We must
2565  // insert the new node at the beginning of the bucket to preserve
2566  // equivalent elements' relative positions.
2567  _M_insert_bucket_begin(__bkt, __node);
2568  ++_M_element_count;
2569  return iterator(__node);
2570  }
2571 
2572  template<typename _Key, typename _Value, typename _Alloc,
2573  typename _ExtractKey, typename _Equal,
2574  typename _Hash, typename _RangeHash, typename _Unused,
2575  typename _RehashPolicy, typename _Traits>
2576  auto
2577  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2578  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2579  erase(const_iterator __it)
2580  -> iterator
2581  {
2582  __node_ptr __n = __it._M_cur;
2583  std::size_t __bkt = _M_bucket_index(*__n);
2584 
2585  // Look for previous node to unlink it from the erased one, this
2586  // is why we need buckets to contain the before begin to make
2587  // this search fast.
2588  __node_base_ptr __prev_n = _M_get_previous_node(__bkt, __n);
2589  return _M_erase(__bkt, __prev_n, __n);
2590  }
2591 
2592  template<typename _Key, typename _Value, typename _Alloc,
2593  typename _ExtractKey, typename _Equal,
2594  typename _Hash, typename _RangeHash, typename _Unused,
2595  typename _RehashPolicy, typename _Traits>
2596  auto
2597  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2598  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2599  _M_erase(size_type __bkt, __node_base_ptr __prev_n, __node_ptr __n)
2600  -> iterator
2601  {
2602  if (__prev_n == _M_buckets[__bkt])
2603  _M_remove_bucket_begin(__bkt, __n->_M_next(),
2604  __n->_M_nxt ? _M_bucket_index(*__n->_M_next()) : 0);
2605  else if (__n->_M_nxt)
2606  {
2607  size_type __next_bkt = _M_bucket_index(*__n->_M_next());
2608  if (__next_bkt != __bkt)
2609  _M_buckets[__next_bkt] = __prev_n;
2610  }
2611 
2612  __prev_n->_M_nxt = __n->_M_nxt;
2613  iterator __result(__n->_M_next());
2614  this->_M_deallocate_node(__n);
2615  --_M_element_count;
2616 
2617  return __result;
2618  }
2619 
2620 #pragma GCC diagnostic push
2621 #pragma GCC diagnostic ignored "-Wc++17-extensions" // if constexpr
2622 
2623  template<typename _Key, typename _Value, typename _Alloc,
2624  typename _ExtractKey, typename _Equal,
2625  typename _Hash, typename _RangeHash, typename _Unused,
2626  typename _RehashPolicy, typename _Traits>
2627  auto
2628  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2629  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2630  erase(const key_type& __k)
2631  -> size_type
2632  {
2633  auto __loc = _M_locate(__k);
2634  if (!__loc)
2635  return 0;
2636 
2637  __node_base_ptr __prev_n = __loc._M_before;
2638  __node_ptr __n = __loc._M_node();
2639  auto __bkt = __loc._M_bucket_index;
2640  if (__bkt == size_type(-1))
2641  __bkt = _M_bucket_index(*__n);
2642  if constexpr (__unique_keys::value)
2643  {
2644  _M_erase(__bkt, __prev_n, __n);
2645  return 1;
2646  }
2647  else
2648  return _M_erase_some(__bkt, __prev_n, __n);
2649  }
2650 
2651  template<typename _Key, typename _Value, typename _Alloc,
2652  typename _ExtractKey, typename _Equal,
2653  typename _Hash, typename _RangeHash, typename _Unused,
2654  typename _RehashPolicy, typename _Traits>
2655  auto
2656  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2657  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2658  _M_erase_some(size_type __bkt, __node_base_ptr __prev_n, __node_ptr __n)
2659  -> size_type
2660  {
2661  // _GLIBCXX_RESOLVE_LIB_DEFECTS
2662  // 526. Is it undefined if a function in the standard changes
2663  // in parameters?
2664  // We use one loop to find all matching nodes and another to
2665  // deallocate them so that the key stays valid during the first loop.
2666  // It might be invalidated indirectly when destroying nodes.
2667  __node_ptr __n_last = __n->_M_next();
2668  while (__n_last && this->_M_node_equals(*__n, *__n_last))
2669  __n_last = __n_last->_M_next();
2670 
2671  std::size_t __n_last_bkt
2672  = __n_last ? _M_bucket_index(*__n_last) : __bkt;
2673 
2674  // Deallocate nodes.
2675  size_type __result = 0;
2676  do
2677  {
2678  __node_ptr __p = __n->_M_next();
2679  this->_M_deallocate_node(__n);
2680  __n = __p;
2681  ++__result;
2682  }
2683  while (__n != __n_last);
2684 
2685  _M_element_count -= __result;
2686  if (__prev_n == _M_buckets[__bkt])
2687  _M_remove_bucket_begin(__bkt, __n_last, __n_last_bkt);
2688  else if (__n_last_bkt != __bkt)
2689  _M_buckets[__n_last_bkt] = __prev_n;
2690  __prev_n->_M_nxt = __n_last;
2691  return __result;
2692  }
2693 
2694  template<typename _Key, typename _Value, typename _Alloc,
2695  typename _ExtractKey, typename _Equal,
2696  typename _Hash, typename _RangeHash, typename _Unused,
2697  typename _RehashPolicy, typename _Traits>
2698  template <typename _Kt>
2699  auto
2700  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2701  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2702  _M_erase_tr(const _Kt& __k)
2703  -> size_type
2704  {
2705  auto __loc = _M_locate_tr(__k);
2706  if (!__loc)
2707  return 0;
2708 
2709  __node_base_ptr __prev_n = __loc._M_before;
2710  __node_ptr __n = __loc._M_node();
2711  auto __bkt = __loc._M_bucket_index;
2712  if (__bkt == size_type(-1))
2713  __bkt = _M_bucket_index(*__n);
2714  if constexpr (__unique_keys::value)
2715  {
2716  _M_erase(__bkt, __prev_n, __n);
2717  return 1;
2718  }
2719  else
2720  return _M_erase_some(__bkt, __prev_n, __n);
2721  }
2722 
2723 #pragma GCC diagnostic pop
2724 
2725  template<typename _Key, typename _Value, typename _Alloc,
2726  typename _ExtractKey, typename _Equal,
2727  typename _Hash, typename _RangeHash, typename _Unused,
2728  typename _RehashPolicy, typename _Traits>
2729  auto
2730  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2731  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2732  erase(const_iterator __first, const_iterator __last)
2733  -> iterator
2734  {
2735  __node_ptr __n = __first._M_cur;
2736  __node_ptr __last_n = __last._M_cur;
2737  if (__n == __last_n)
2738  return iterator(__n);
2739 
2740  std::size_t __bkt = _M_bucket_index(*__n);
2741 
2742  __node_base_ptr __prev_n = _M_get_previous_node(__bkt, __n);
2743  bool __is_bucket_begin = __n == _M_bucket_begin(__bkt);
2744  std::size_t __n_bkt = __bkt;
2745  for (;;)
2746  {
2747  do
2748  {
2749  __node_ptr __tmp = __n;
2750  __n = __n->_M_next();
2751  this->_M_deallocate_node(__tmp);
2752  --_M_element_count;
2753  if (!__n)
2754  break;
2755  __n_bkt = _M_bucket_index(*__n);
2756  }
2757  while (__n != __last_n && __n_bkt == __bkt);
2758  if (__is_bucket_begin)
2759  _M_remove_bucket_begin(__bkt, __n, __n_bkt);
2760  if (__n == __last_n)
2761  break;
2762  __is_bucket_begin = true;
2763  __bkt = __n_bkt;
2764  }
2765 
2766  if (__n && (__n_bkt != __bkt || __is_bucket_begin))
2767  _M_buckets[__n_bkt] = __prev_n;
2768  __prev_n->_M_nxt = __n;
2769  return iterator(__n);
2770  }
2771 
2772  template<typename _Key, typename _Value, typename _Alloc,
2773  typename _ExtractKey, typename _Equal,
2774  typename _Hash, typename _RangeHash, typename _Unused,
2775  typename _RehashPolicy, typename _Traits>
2776  void
2777  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2778  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2779  clear() noexcept
2780  {
2781  this->_M_deallocate_nodes(_M_begin());
2782  std::fill_n(_M_buckets, _M_bucket_count, nullptr);
2783  _M_element_count = 0;
2784  _M_before_begin._M_nxt = nullptr;
2785  }
2786 
2787  template<typename _Key, typename _Value, typename _Alloc,
2788  typename _ExtractKey, typename _Equal,
2789  typename _Hash, typename _RangeHash, typename _Unused,
2790  typename _RehashPolicy, typename _Traits>
2791  void
2792  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2793  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2794  rehash(size_type __bkt_count)
2795  {
2796  __rehash_guard_t __rehash_guard(_M_rehash_policy);
2797  __bkt_count
2798  = std::max(_M_rehash_policy._M_bkt_for_elements(_M_element_count + 1),
2799  __bkt_count);
2800  __bkt_count = _M_rehash_policy._M_next_bkt(__bkt_count);
2801 
2802  if (__bkt_count != _M_bucket_count)
2803  {
2804  _M_rehash(__bkt_count, __unique_keys{});
2805  __rehash_guard._M_guarded_obj = nullptr;
2806  }
2807  }
2808 
2809  // Rehash when there is no equivalent elements.
2810  template<typename _Key, typename _Value, typename _Alloc,
2811  typename _ExtractKey, typename _Equal,
2812  typename _Hash, typename _RangeHash, typename _Unused,
2813  typename _RehashPolicy, typename _Traits>
2814  void
2815  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2816  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2817  _M_rehash(size_type __bkt_count, true_type /* __uks */)
2818  {
2819  __buckets_ptr __new_buckets = _M_allocate_buckets(__bkt_count);
2820  __node_ptr __p = _M_begin();
2821  _M_before_begin._M_nxt = nullptr;
2822  std::size_t __bbegin_bkt = 0;
2823  while (__p)
2824  {
2825  __node_ptr __next = __p->_M_next();
2826  std::size_t __bkt
2827  = __hash_code_base::_M_bucket_index(*__p, __bkt_count);
2828  if (!__new_buckets[__bkt])
2829  {
2830  __p->_M_nxt = _M_before_begin._M_nxt;
2831  _M_before_begin._M_nxt = __p;
2832  __new_buckets[__bkt] = &_M_before_begin;
2833  if (__p->_M_nxt)
2834  __new_buckets[__bbegin_bkt] = __p;
2835  __bbegin_bkt = __bkt;
2836  }
2837  else
2838  {
2839  __p->_M_nxt = __new_buckets[__bkt]->_M_nxt;
2840  __new_buckets[__bkt]->_M_nxt = __p;
2841  }
2842 
2843  __p = __next;
2844  }
2845 
2846  _M_deallocate_buckets();
2847  _M_bucket_count = __bkt_count;
2848  _M_buckets = __new_buckets;
2849  }
2850 
2851  // Rehash when there can be equivalent elements, preserve their relative
2852  // order.
2853  template<typename _Key, typename _Value, typename _Alloc,
2854  typename _ExtractKey, typename _Equal,
2855  typename _Hash, typename _RangeHash, typename _Unused,
2856  typename _RehashPolicy, typename _Traits>
2857  void
2858  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2859  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2860  _M_rehash(size_type __bkt_count, false_type /* __uks */)
2861  {
2862  __buckets_ptr __new_buckets = _M_allocate_buckets(__bkt_count);
2863  __node_ptr __p = _M_begin();
2864  _M_before_begin._M_nxt = nullptr;
2865  std::size_t __bbegin_bkt = 0;
2866  std::size_t __prev_bkt = 0;
2867  __node_ptr __prev_p = nullptr;
2868  bool __check_bucket = false;
2869 
2870  while (__p)
2871  {
2872  __node_ptr __next = __p->_M_next();
2873  std::size_t __bkt
2874  = __hash_code_base::_M_bucket_index(*__p, __bkt_count);
2875 
2876  if (__prev_p && __prev_bkt == __bkt)
2877  {
2878  // Previous insert was already in this bucket, we insert after
2879  // the previously inserted one to preserve equivalent elements
2880  // relative order.
2881  __p->_M_nxt = __prev_p->_M_nxt;
2882  __prev_p->_M_nxt = __p;
2883 
2884  // Inserting after a node in a bucket require to check that we
2885  // haven't change the bucket last node, in this case next
2886  // bucket containing its before begin node must be updated. We
2887  // schedule a check as soon as we move out of the sequence of
2888  // equivalent nodes to limit the number of checks.
2889  __check_bucket = true;
2890  }
2891  else
2892  {
2893  if (__check_bucket)
2894  {
2895  // Check if we shall update the next bucket because of
2896  // insertions into __prev_bkt bucket.
2897  if (__prev_p->_M_nxt)
2898  {
2899  std::size_t __next_bkt
2900  = __hash_code_base::_M_bucket_index(
2901  *__prev_p->_M_next(), __bkt_count);
2902  if (__next_bkt != __prev_bkt)
2903  __new_buckets[__next_bkt] = __prev_p;
2904  }
2905  __check_bucket = false;
2906  }
2907 
2908  if (!__new_buckets[__bkt])
2909  {
2910  __p->_M_nxt = _M_before_begin._M_nxt;
2911  _M_before_begin._M_nxt = __p;
2912  __new_buckets[__bkt] = &_M_before_begin;
2913  if (__p->_M_nxt)
2914  __new_buckets[__bbegin_bkt] = __p;
2915  __bbegin_bkt = __bkt;
2916  }
2917  else
2918  {
2919  __p->_M_nxt = __new_buckets[__bkt]->_M_nxt;
2920  __new_buckets[__bkt]->_M_nxt = __p;
2921  }
2922  }
2923  __prev_p = __p;
2924  __prev_bkt = __bkt;
2925  __p = __next;
2926  }
2927 
2928  if (__check_bucket && __prev_p->_M_nxt)
2929  {
2930  std::size_t __next_bkt
2931  = __hash_code_base::_M_bucket_index(*__prev_p->_M_next(),
2932  __bkt_count);
2933  if (__next_bkt != __prev_bkt)
2934  __new_buckets[__next_bkt] = __prev_p;
2935  }
2936 
2937  _M_deallocate_buckets();
2938  _M_bucket_count = __bkt_count;
2939  _M_buckets = __new_buckets;
2940  }
2941 
2942 #pragma GCC diagnostic push
2943 #pragma GCC diagnostic ignored "-Wc++17-extensions" // if constexpr
2944 
2945  // This is for implementing equality comparison for unordered containers,
2946  // per N3068, by John Lakos and Pablo Halpern.
2947  // Algorithmically, we follow closely the reference implementations therein.
2948  template<typename _Key, typename _Value, typename _Alloc,
2949  typename _ExtractKey, typename _Equal,
2950  typename _Hash, typename _RangeHash, typename _Unused,
2951  typename _RehashPolicy, typename _Traits>
2952  bool
2953  _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
2954  _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
2955  _M_equal(const _Hashtable& __other) const
2956  {
2957  if (size() != __other.size())
2958  return false;
2959 
2960  if constexpr (__unique_keys::value)
2961  for (auto __x_n = _M_begin(); __x_n; __x_n = __x_n->_M_next())
2962  {
2963  std::size_t __ybkt = __other._M_bucket_index_ext(*__x_n);
2964  auto __prev_n = __other._M_buckets[__ybkt];
2965  if (!__prev_n)
2966  return false;
2967 
2968  for (__node_ptr __n = static_cast<__node_ptr>(__prev_n->_M_nxt);;
2969  __n = __n->_M_next())
2970  {
2971  if (__n->_M_v() == __x_n->_M_v())
2972  break;
2973 
2974  if (!__n->_M_nxt
2975  || __other._M_bucket_index(*__n->_M_next()) != __ybkt)
2976  return false;
2977  }
2978  }
2979  else // non-unique keys
2980  for (auto __x_n = _M_begin(); __x_n;)
2981  {
2982  std::size_t __x_count = 1;
2983  auto __x_n_end = __x_n->_M_next();
2984  for (; __x_n_end
2985  && key_eq()(_ExtractKey{}(__x_n->_M_v()),
2986  _ExtractKey{}(__x_n_end->_M_v()));
2987  __x_n_end = __x_n_end->_M_next())
2988  ++__x_count;
2989 
2990  std::size_t __ybkt = __other._M_bucket_index_ext(*__x_n);
2991  auto __y_prev_n = __other._M_buckets[__ybkt];
2992  if (!__y_prev_n)
2993  return false;
2994 
2995  __node_ptr __y_n = static_cast<__node_ptr>(__y_prev_n->_M_nxt);
2996  for (;;)
2997  {
2998  if (key_eq()(_ExtractKey{}(__y_n->_M_v()),
2999  _ExtractKey{}(__x_n->_M_v())))
3000  break;
3001 
3002  auto __y_ref_n = __y_n;
3003  for (__y_n = __y_n->_M_next(); __y_n; __y_n = __y_n->_M_next())
3004  if (!__other._M_node_equals(*__y_ref_n, *__y_n))
3005  break;
3006 
3007  if (!__y_n || __other._M_bucket_index(*__y_n) != __ybkt)
3008  return false;
3009  }
3010 
3011  auto __y_n_end = __y_n;
3012  for (; __y_n_end; __y_n_end = __y_n_end->_M_next())
3013  if (--__x_count == 0)
3014  break;
3015 
3016  if (__x_count != 0)
3017  return false;
3018 
3019  const_iterator __itx(__x_n), __itx_end(__x_n_end);
3020  const_iterator __ity(__y_n);
3021  if (!std::is_permutation(__itx, __itx_end, __ity))
3022  return false;
3023 
3024  __x_n = __x_n_end;
3025  }
3026 
3027  return true;
3028  }
3029 #pragma GCC diagnostic pop
3030 
3031 #ifdef __glibcxx_node_extract // >= C++17 && HOSTED
3032  template<typename, typename, typename> class _Hash_merge_helper { };
3033 #endif // C++17
3034 
3035 #if __cpp_deduction_guides >= 201606
3036  // Used to constrain deduction guides
3037  template<typename _Hash>
3038  using _RequireNotAllocatorOrIntegral
3039  = __enable_if_t<!__or_<is_integral<_Hash>, __is_allocator<_Hash>>::value>;
3040 #endif
3041 
3042 #ifdef __glibcxx_associative_heterogeneous_erasure // C++ >= 23
3043 template <typename _Kt, typename _Container>
3044  concept __heterogeneous_hash_key =
3045  __transparent_comparator<typename _Container::hasher> &&
3046  __transparent_comparator<typename _Container::key_equal> &&
3047  __heterogeneous_key<_Kt, _Container>;
3048 #endif
3049 
3050 /// @endcond
3051 _GLIBCXX_END_NAMESPACE_VERSION
3052 } // namespace std
3053 
3054 #pragma GCC diagnostic pop
3055 
3056 #endif // _HASHTABLE_H
constexpr const _Tp & max(const _Tp &, const _Tp &)
This does what you think it does.
Definition: stl_algobase.h:257
constexpr auto size(const _Container &__cont) noexcept(noexcept(__cont.size())) -> decltype(__cont.size())
Return the size of a container.
Definition: range_access.h:274
constexpr tuple< _Elements &&... > forward_as_tuple(_Elements &&... __args) noexcept
Create a tuple of lvalue or rvalue references to the arguments.
Definition: tuple:2733
constexpr iterator_traits< _InputIterator >::difference_type distance(_InputIterator __first, _InputIterator __last)
A generalization of pointer arithmetic.
__bool_constant< true > true_type
The type used as a compile-time boolean with true value.
Definition: type_traits:119
constexpr _OI fill_n(_OI __first, _Size __n, const _Tp &__value)
Fills the range [first,first+n) with copies of value.
constexpr auto empty(const _Container &__cont) noexcept(noexcept(__cont.empty())) -> decltype(__cont.empty())
Return whether a container is empty.
Definition: range_access.h:294
ISO C++ entities toplevel namespace is std.
constexpr auto cbegin(const _Container &__cont) noexcept(noexcept(std::begin(__cont))) -> decltype(std::begin(__cont))
Return an iterator pointing to the first element of the const container.
Definition: range_access.h:132
__bool_constant< false > false_type
The type used as a compile-time boolean with false value.
Definition: type_traits:122
_Tp * end(valarray< _Tp > &__va) noexcept
Return an iterator pointing to one past the last element of the valarray.
Definition: valarray:1251
Struct holding two objects of arbitrary type.
constexpr auto cend(const _Container &__cont) noexcept(noexcept(std::end(__cont))) -> decltype(std::end(__cont))
Return an iterator pointing to one past the last element of the const container.
Definition: range_access.h:144
constexpr std::remove_reference< _Tp >::type && move(_Tp &&__t) noexcept
Convert a value to an rvalue.
Definition: move.h:138
constexpr _Tp * __addressof(_Tp &__r) noexcept
Same as C++11 std::addressof.
Definition: move.h:52
constexpr piecewise_construct_t piecewise_construct
Tag for piecewise construction of std::pair objects.
Definition: stl_pair.h:82
constexpr _Tp && forward(typename std::remove_reference< _Tp >::type &__t) noexcept
Forward an lvalue.
Definition: move.h:72
_T2 second
The second member.
Definition: stl_pair.h:309
_T1 first
The first member.
Definition: stl_pair.h:308
_Tp * begin(valarray< _Tp > &__va) noexcept
Return an iterator pointing to the first element of the valarray.
Definition: valarray:1229