PMDK C++ bindings  1.9
This is the C++ bindings documentation for PMDK's libpmemobj.
concurrent_hash_map.hpp
Go to the documentation of this file.
1 /*
2  * Copyright 2019-2020, Intel Corporation
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  *
8  * * Redistributions of source code must retain the above copyright
9  * notice, this list of conditions and the following disclaimer.
10  *
11  * * Redistributions in binary form must reproduce the above copyright
12  * notice, this list of conditions and the following disclaimer in
13  * the documentation and/or other materials provided with the
14  * distribution.
15  *
16  * * Neither the name of the copyright holder nor the names of its
17  * contributors may be used to endorse or promote products derived
18  * from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
39 #ifndef PMEMOBJ_CONCURRENT_HASH_MAP_HPP
40 #define PMEMOBJ_CONCURRENT_HASH_MAP_HPP
41 
44 #include <libpmemobj++/detail/pair.hpp>
46 
47 #include <libpmemobj++/defrag.hpp>
49 #include <libpmemobj++/mutex.hpp>
50 #include <libpmemobj++/p.hpp>
53 
54 #include <libpmemobj++/detail/persistent_pool_ptr.hpp>
56 
58 
59 #include <atomic>
60 #include <cassert>
61 #include <functional>
62 #include <initializer_list>
63 #include <iterator> // for std::distance
64 #include <memory>
65 #include <mutex>
66 #include <thread>
67 #include <type_traits>
68 #include <utility>
69 #include <vector>
70 
71 namespace std
72 {
76 template <typename T>
77 struct hash<pmem::obj::p<T>> {
78  size_t
79  operator()(const pmem::obj::p<T> &x) const
80  {
81  return hash<T>()(x.get_ro());
82  }
83 };
84 } /* namespace std */
85 
86 namespace pmem
87 {
88 namespace obj
89 {
90 
91 namespace concurrent_hash_map_internal
92 {
93 template <typename SharedMutexT>
94 class shared_mutex_scoped_lock {
95  using rw_mutex_type = SharedMutexT;
96 
97 public:
98  shared_mutex_scoped_lock(const shared_mutex_scoped_lock &) = delete;
99  shared_mutex_scoped_lock &
100  operator=(const shared_mutex_scoped_lock &) = delete;
101 
103  shared_mutex_scoped_lock() : mutex(nullptr), is_writer(false)
104  {
105  }
106 
108  shared_mutex_scoped_lock(rw_mutex_type &m, bool write = true)
109  : mutex(nullptr)
110  {
111  acquire(m, write);
112  }
113 
115  ~shared_mutex_scoped_lock()
116  {
117  if (mutex)
118  release();
119  }
120 
122  void
123  acquire(rw_mutex_type &m, bool write = true)
124  {
125  is_writer = write;
126  mutex = &m;
127  if (write)
128  mutex->lock();
129  else
130  mutex->lock_shared();
131  }
132 
136  void
137  release()
138  {
139  assert(mutex);
140  rw_mutex_type *m = mutex;
141  mutex = nullptr;
142  if (is_writer) {
143  m->unlock();
144  } else {
145  m->unlock_shared();
146  }
147  }
148 
153  bool
154  try_acquire(rw_mutex_type &m, bool write = true)
155  {
156  assert(!mutex);
157  bool result;
158  is_writer = write;
159  result = write ? m.try_lock() : m.try_lock_shared();
160  if (result)
161  mutex = &m;
162  return result;
163  }
164 
165 protected:
170  rw_mutex_type *mutex;
175  bool is_writer;
176 }; /* class shared_mutex_scoped_lock */
177 
178 template <typename ScopedLockType>
179 using scoped_lock_upgrade_to_writer =
180  decltype(std::declval<ScopedLockType>().upgrade_to_writer());
181 
182 template <typename ScopedLockType>
183 using scoped_lock_has_upgrade_to_writer =
184  detail::supports<ScopedLockType, scoped_lock_upgrade_to_writer>;
185 
186 template <typename ScopedLockType>
187 using scoped_lock_downgrade_to_reader =
188  decltype(std::declval<ScopedLockType>().downgrade_to_reader());
189 
190 template <typename ScopedLockType>
191 using scoped_lock_has_downgrade_to_reader =
192  detail::supports<ScopedLockType, scoped_lock_downgrade_to_reader>;
193 
194 template <typename ScopedLockType,
195  bool = scoped_lock_has_upgrade_to_writer<ScopedLockType>::value
196  &&scoped_lock_has_downgrade_to_reader<ScopedLockType>::value>
197 class scoped_lock_traits {
198 public:
199  using scope_lock_type = ScopedLockType;
200 
201  static bool
202  initial_rw_state(bool write)
203  {
204  /* For upgradeable locks, initial state is always read */
205  return false;
206  }
207 
208  static bool
209  upgrade_to_writer(scope_lock_type &lock)
210  {
211  return lock.upgrade_to_writer();
212  }
213 
214  static bool
215  downgrade_to_reader(scope_lock_type &lock)
216  {
217  return lock.downgrade_to_reader();
218  }
219 };
220 
221 template <typename ScopedLockType>
222 class scoped_lock_traits<ScopedLockType, false> {
223 public:
224  using scope_lock_type = ScopedLockType;
225 
226  static bool
227  initial_rw_state(bool write)
228  {
229  /* For non-upgradeable locks, we take lock in required mode
230  * immediately */
231  return write;
232  }
233 
234  static bool
235  upgrade_to_writer(scope_lock_type &lock)
236  {
237  /* This overload is for locks which do not support upgrade
238  * operation. For those locks, upgrade_to_writer should not be
239  * called when holding a read lock */
240  return true;
241  }
242 
243  static bool
244  downgrade_to_reader(scope_lock_type &lock)
245  {
246  /* This overload is for locks which do not support downgrade
247  * operation. For those locks, downgrade_to_reader should never
248  * be called */
249  assert(false);
250 
251  return false;
252  }
253 };
254 }
255 
256 template <typename Key, typename T, typename Hash = std::hash<Key>,
257  typename KeyEqual = std::equal_to<Key>,
258  typename MutexType = pmem::obj::shared_mutex,
259  typename ScopedLockType = concurrent_hash_map_internal::
260  shared_mutex_scoped_lock<MutexType>>
262 
264 namespace concurrent_hash_map_internal
265 {
266 /* Helper method which throws an exception when called in a tx */
267 static inline void
268 check_outside_tx()
269 {
270  if (pmemobj_tx_stage() != TX_STAGE_NONE)
272  "Function called inside transaction scope.");
273 }
274 
275 template <typename Hash>
276 using transparent_key_equal = typename Hash::transparent_key_equal;
277 
278 template <typename Hash>
279 using has_transparent_key_equal = detail::supports<Hash, transparent_key_equal>;
280 
281 template <typename Hash, typename Pred,
282  bool = has_transparent_key_equal<Hash>::value>
283 struct key_equal_type {
284  using type = typename Hash::transparent_key_equal;
285 };
286 
287 template <typename Hash, typename Pred>
288 struct key_equal_type<Hash, Pred, false> {
289  using type = Pred;
290 };
291 
292 template <typename Mutex, typename ScopedLockType>
293 void
294 assert_not_locked(Mutex &mtx)
295 {
296 #ifndef NDEBUG
297  ScopedLockType scoped_lock;
298  assert(scoped_lock.try_acquire(mtx));
299  scoped_lock.release();
300 #else
301  (void)mtx;
302 #endif
303 }
304 
305 template <typename Key, typename T, typename MutexType, typename ScopedLockType>
306 struct hash_map_node {
308  using mutex_t = MutexType;
309 
311  using scoped_t = ScopedLockType;
312 
313  using value_type = detail::pair<const Key, T>;
314 
316  using node_ptr_t = detail::persistent_pool_ptr<
317  hash_map_node<Key, T, mutex_t, scoped_t>>;
318 
320  node_ptr_t next;
321 
323  mutex_t mutex;
324 
326  value_type item;
327 
328  hash_map_node(const node_ptr_t &_next, const Key &key)
329  : next(_next),
330  item(std::piecewise_construct, std::forward_as_tuple(key),
331  std::forward_as_tuple())
332  {
333  }
334 
335  hash_map_node(const node_ptr_t &_next, const Key &key, const T &t)
336  : next(_next), item(key, t)
337  {
338  }
339 
340  hash_map_node(const node_ptr_t &_next, value_type &&i)
341  : next(_next), item(std::move(i))
342  {
343  }
344 
345  template <typename... Args>
346  hash_map_node(const node_ptr_t &_next, Args &&... args)
347  : next(_next), item(std::forward<Args>(args)...)
348  {
349  }
350 
351  hash_map_node(const node_ptr_t &_next, const value_type &i)
352  : next(_next), item(i)
353  {
354  }
355 
357  hash_map_node(const hash_map_node &) = delete;
358 
360  hash_map_node &operator=(const hash_map_node &) = delete;
361 }; /* struct node */
362 
367 template <typename Bucket>
368 class segment_traits {
369 public:
371  using segment_index_t = size_t;
372  using size_type = size_t;
373  using bucket_type = Bucket;
374 
375 protected:
377  constexpr static size_type max_allocation_size = PMEMOBJ_MAX_ALLOC_SIZE;
378 
380  constexpr static segment_index_t first_big_block = 27;
381  /* TODO: avoid hardcoded value; need constexpr similar to:
382  * Log2(max_allocation_size / sizeof(bucket_type)) */
383 
385  constexpr static size_type big_block_size = size_type(1)
386  << first_big_block;
387 
388  /* Block size in bytes cannot exceed max_allocation_size */
389  static_assert((big_block_size * sizeof(bucket_type)) <
390  max_allocation_size,
391  "Block size exceeds max_allocation_size");
392 
394  constexpr static segment_index_t
395  first_block_in_segment(segment_index_t seg)
396  {
397  return seg < first_big_block
398  ? seg
399  : (first_big_block +
400  (segment_index_t(1) << (seg - first_big_block)) - 1);
401  }
402 
404  constexpr static size_type
405  blocks_in_segment(segment_index_t seg)
406  {
407  return seg < first_big_block
408  ? segment_index_t(1)
409  : segment_index_t(1) << (seg - first_big_block);
410  }
411 
413  constexpr static size_type
414  block_size(segment_index_t b)
415  {
416  return b < first_big_block ? segment_size(b ? b : 1)
417  : big_block_size;
418  }
419 
420 public:
422  constexpr static segment_index_t embedded_segments = 1;
423 
425  constexpr static size_type embedded_buckets = 1 << embedded_segments;
426 
428  constexpr static segment_index_t number_of_segments = 32;
429 
431  static const size_type first_block = 8;
432 
434  constexpr static segment_index_t
435  number_of_blocks()
436  {
437  return first_block_in_segment(number_of_segments);
438  }
439 
441  static segment_index_t
442  segment_index_of(size_type index)
443  {
444  return segment_index_t(detail::Log2(index | 1));
445  }
446 
448  constexpr static segment_index_t
449  segment_base(segment_index_t k)
450  {
451  return (segment_index_t(1) << k) & ~segment_index_t(1);
452  }
453 
455  constexpr static size_type
456  segment_size(segment_index_t k)
457  {
458  return size_type(1) << k; // fake value for k == 0
459  }
460  static_assert(
461  embedded_segments < first_big_block,
462  "Number of embedded segments cannot exceed max_allocation_size");
463 }; /* End of class segment_traits */
464 
481 template <typename BlockTable, typename SegmentTraits, bool is_const>
482 class segment_facade_impl : public SegmentTraits {
483 private:
484  using traits_type = SegmentTraits;
485  using traits_type::block_size;
486  using traits_type::blocks_in_segment;
487  using traits_type::embedded_buckets;
488  using traits_type::embedded_segments;
489  using traits_type::first_block;
490  using traits_type::first_block_in_segment;
491  using traits_type::segment_base;
492  using traits_type::segment_size;
493 
494 public:
495  using table_reference =
496  typename std::conditional<is_const, const BlockTable &,
497  BlockTable &>::type;
498 
499  using table_pointer =
500  typename std::conditional<is_const, const BlockTable *,
501  BlockTable *>::type;
502 
503  using bucket_type = typename traits_type::bucket_type;
504  using segment_index_t = typename traits_type::segment_index_t;
505  using size_type = typename traits_type::size_type;
506 
508  segment_facade_impl(table_reference table, segment_index_t s)
509  : my_table(&table), my_seg(s)
510  {
511  assert(my_seg < traits_type::number_of_segments);
512  }
513 
515  segment_facade_impl(const segment_facade_impl &src)
516  : my_table(src.my_table), my_seg(src.my_seg)
517  {
518  }
519 
520  segment_facade_impl(segment_facade_impl &&src) = default;
521 
523  segment_facade_impl &
524  operator=(const segment_facade_impl &src)
525  {
526  my_table = src.my_table;
527  my_seg = src.my_seg;
528  return *this;
529  }
530 
532  segment_facade_impl &
533  operator=(segment_facade_impl &&src)
534  {
535  my_table = src.my_table;
536  my_seg = src.my_seg;
537  return *this;
538  }
539 
546  bucket_type &operator[](size_type i) const
547  {
548  assert(i < size());
549 
550  segment_index_t table_block = first_block_in_segment(my_seg);
551  size_type b_size = block_size(table_block);
552 
553  table_block += i / b_size;
554  i = i % b_size;
555 
556  return (*my_table)[table_block][static_cast<std::ptrdiff_t>(i)];
557  }
558 
562  segment_facade_impl &
563  operator++()
564  {
565  ++my_seg;
566  return *this;
567  }
568 
572  segment_facade_impl
573  operator++(int)
574  {
575  segment_facade_impl tmp = *this;
576  ++(*this);
577  return tmp;
578  }
579 
583  segment_facade_impl &
584  operator--()
585  {
586  --my_seg;
587  return *this;
588  }
589 
593  segment_facade_impl
594  operator--(int)
595  {
596  segment_facade_impl tmp = *this;
597  --(*this);
598  return tmp;
599  }
600 
604  segment_facade_impl &
605  operator+=(segment_index_t off)
606  {
607  my_seg += off;
608  return *this;
609  }
610 
614  segment_facade_impl &
615  operator-=(segment_index_t off)
616  {
617  my_seg -= off;
618  return *this;
619  }
620 
624  segment_facade_impl
625  operator+(segment_index_t off) const
626  {
627  return segment_facade_impl(*(this->my_table),
628  this->my_seg + off);
629  }
630 
634  segment_facade_impl
635  operator-(segment_index_t off) const
636  {
637  return segment_facade_impl(*(this->my_table),
638  this->my_seg - off);
639  }
640 
644  void
645  enable(pool_base &pop)
646  {
647  assert(my_seg >= embedded_segments);
648 
649  if (my_seg < first_block) {
650  enable_first_block(pop);
651  } else {
652  enable_big_segment(pop);
653  }
654  }
655 
659  void
660  disable()
661  {
662  assert(my_seg >= embedded_segments);
663 
664  if (my_seg < first_block) {
665  if (my_seg == embedded_segments) {
666  size_type sz = segment_size(first_block) -
667  embedded_buckets;
668  delete_persistent<bucket_type[]>(
669  (*my_table)[my_seg], sz);
670  }
671  (*my_table)[my_seg] = nullptr;
672  } else {
673  block_range blocks = segment_blocks(my_seg);
674 
675  for (segment_index_t b = blocks.first;
676  b < blocks.second; ++b) {
677  if ((*my_table)[b] != nullptr) {
678  delete_persistent<bucket_type[]>(
679  (*my_table)[b], block_size(b));
680  (*my_table)[b] = nullptr;
681  }
682  }
683  }
684  }
685 
689  constexpr size_type
690  size() const
691  {
692  return segment_size(my_seg ? my_seg : 1);
693  }
694 
700  bool
701  is_valid() const
702  {
703  block_range blocks = segment_blocks(my_seg);
704 
705  for (segment_index_t b = blocks.first; b < blocks.second; ++b) {
706  if ((*my_table)[b] == nullptr)
707  return false;
708  }
709 
710  return true;
711  }
712 
713 private:
714  using block_range = std::pair<segment_index_t, segment_index_t>;
715 
719  static block_range
720  segment_blocks(segment_index_t seg)
721  {
722  segment_index_t begin = first_block_in_segment(seg);
723 
724  return block_range(begin, begin + blocks_in_segment(seg));
725  }
726 
727  void
728  enable_first_block(pool_base &pop)
729  {
730  assert(my_seg == embedded_segments);
731  {
732  transaction::manual tx(pop);
733 
734  size_type sz =
735  segment_size(first_block) - embedded_buckets;
736  (*my_table)[my_seg] =
737  make_persistent<bucket_type[]>(sz);
738 
739  persistent_ptr<bucket_type> base =
740  (*my_table)[embedded_segments].raw();
741 
742  for (segment_index_t s = my_seg + 1; s < first_block;
743  ++s) {
744  std::ptrdiff_t off =
745  static_cast<std::ptrdiff_t>(
746  segment_base(s) -
747  segment_base(my_seg));
748 
749  (*my_table)[s] = (base + off).raw();
750  }
751 
753  }
754  }
755 
756  void
757  enable_big_segment(pool_base &pop)
758  {
759  block_range blocks = segment_blocks(my_seg);
760  {
761  transaction::manual tx(pop);
762 
763  for (segment_index_t b = blocks.first;
764  b < blocks.second; ++b) {
765  assert((*my_table)[b] == nullptr);
766  (*my_table)[b] = make_persistent<bucket_type[]>(
767  block_size(b));
768  }
769 
771  }
772  }
773 
775  table_pointer my_table;
776 
778  segment_index_t my_seg;
779 }; /* End of class segment_facade_impl */
780 
787 template <typename Key, typename T, typename MutexType, typename ScopedLockType>
788 class hash_map_base {
789 public:
790  using mutex_t = MutexType;
791  using scoped_t = ScopedLockType;
792 
794  using size_type = size_t;
795 
797  using hashcode_type = size_t;
798 
800  using node = hash_map_node<Key, T, mutex_t, scoped_t>;
801 
803  using node_ptr_t = detail::persistent_pool_ptr<node>;
804 
806  struct bucket {
807  using mutex_t = MutexType;
808  using scoped_t = ScopedLockType;
809 
811  mutex_t mutex;
812 
814  p<std::atomic<uint64_t>> rehashed;
815 
817  node_ptr_t node_list;
818 
820  bucket() : node_list(nullptr)
821  {
822 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
823  VALGRIND_HG_DISABLE_CHECKING(&rehashed,
824  sizeof(rehashed));
825 #endif
826  rehashed.get_rw() = false;
827  }
828 
834  bool
835  is_rehashed(std::memory_order order)
836  {
837  return rehashed.get_ro().load(order);
838  }
839 
840  void
841  set_rehashed(std::memory_order order)
842  {
843  rehashed.get_rw().store(true, order);
844  }
845 
847  bucket(const bucket &) = delete;
848 
850  bucket &operator=(const bucket &) = delete;
851  }; /* End of struct bucket */
852 
854  using segment_traits_t = segment_traits<bucket>;
855 
857  using segment_index_t = typename segment_traits_t::segment_index_t;
858 
860  static const size_type embedded_buckets =
861  segment_traits_t::embedded_buckets;
862 
864  static const size_type first_block = segment_traits_t::first_block;
865 
867  constexpr static size_type block_table_size =
868  segment_traits_t::number_of_blocks();
869 
871  using segment_ptr_t = persistent_ptr<bucket[]>;
872 
874  using bucket_ptr_t = persistent_ptr<bucket>;
875 
877  using blocks_table_t = segment_ptr_t[block_table_size];
878 
880  using segment_enable_mutex_t = pmem::obj::mutex;
881 
883  struct tls_data_t {
884  p<int64_t> size_diff = 0;
885  std::aligned_storage<56, 8> padding;
886  };
887 
888  using tls_t = detail::enumerable_thread_specific<tls_data_t>;
889 
890  enum feature_flags : uint32_t { FEATURE_CONSISTENT_SIZE = 1 };
891 
893  struct features {
894  p<uint32_t> compat;
895  p<uint32_t> incompat;
896  };
897 
898  /* --------------------------------------------------------- */
899 
901  p<uint64_t> my_pool_uuid;
902 
905  features layout_features;
906 
909  std::aligned_storage<sizeof(size_t), sizeof(size_t)>::type
910  my_mask_reserved;
911 
913  /* my_mask always restored on restart. */
914  std::atomic<hashcode_type> my_mask;
915 
916  /* Size of value (key and value pair) stored in a pool */
917  std::size_t value_size;
918 
920  std::aligned_storage<24, 8>::type padding1;
921 
926  blocks_table_t my_table;
927 
928  /* It must be in separate cache line from my_mask due to performance
929  * effects */
931  std::atomic<size_type> my_size;
932 
934  std::aligned_storage<24, 8>::type padding2;
935 
937  persistent_ptr<tls_t> tls_ptr;
938 
944  p<size_t> on_init_size;
945 
947  std::aligned_storage<40, 8>::type reserved;
948 
950  segment_enable_mutex_t my_segment_enable_mutex;
951 
953  bucket my_embedded_segment[embedded_buckets];
954 
955  /* --------------------------------------------------------- */
956 
958  static constexpr features
959  header_features()
960  {
961  return {FEATURE_CONSISTENT_SIZE, 0};
962  }
963 
964  const std::atomic<hashcode_type> &
965  mask() const noexcept
966  {
967  return my_mask;
968  }
969 
970  std::atomic<hashcode_type> &
971  mask() noexcept
972  {
973  return my_mask;
974  }
975 
976  size_t
977  size() const
978  {
979  return my_size.load(std::memory_order_relaxed);
980  }
981 
982  p<int64_t> &
983  thread_size_diff()
984  {
985  assert(this->tls_ptr != nullptr);
986  return this->tls_ptr->local().size_diff;
987  }
988 
990  void
991  tls_restore()
992  {
993  assert(this->tls_ptr != nullptr);
994 
995  pool_base pop = pool_base{pmemobj_pool_by_ptr(this)};
996 
997  int64_t last_run_size = 0;
998  for (auto &data : *tls_ptr)
999  last_run_size += data.size_diff;
1000 
1001  /* Make sure that on_init_size + last_run_size >= 0 */
1002  assert(last_run_size >= 0 ||
1003  static_cast<int64_t>(static_cast<size_t>(last_run_size) +
1004  on_init_size) >= 0);
1005 
1006  transaction::run(pop, [&] {
1007  on_init_size += static_cast<size_t>(last_run_size);
1008  tls_ptr->clear();
1009  });
1010 
1011  this->my_size = on_init_size;
1012  }
1013 
1015  using const_segment_facade_t =
1016  segment_facade_impl<blocks_table_t, segment_traits_t, true>;
1017 
1019  using segment_facade_t =
1020  segment_facade_impl<blocks_table_t, segment_traits_t, false>;
1021 
1023  hash_map_base()
1024  {
1025  static_assert(
1026  sizeof(size_type) == sizeof(std::atomic<size_type>),
1027  "std::atomic should have the same layout as underlying integral type");
1028 
1029 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
1030  VALGRIND_HG_DISABLE_CHECKING(&my_mask, sizeof(my_mask));
1031 #endif
1032  layout_features = {0, 0};
1033 
1034  PMEMoid oid = pmemobj_oid(this);
1035 
1036  assert(!OID_IS_NULL(oid));
1037 
1038  my_pool_uuid = oid.pool_uuid_lo;
1039 
1040  pool_base pop = get_pool_base();
1041  /* enable embedded segments */
1042  for (size_type i = 0; i < segment_traits_t::embedded_segments;
1043  ++i) {
1044  my_table[i] =
1045  pmemobj_oid(my_embedded_segment +
1046  segment_traits_t::segment_base(i));
1047  segment_facade_t seg(my_table, i);
1048  mark_rehashed<false>(pop, seg);
1049  }
1050 
1051  on_init_size = 0;
1052 
1053  value_size = 0;
1054 
1055  this->tls_ptr = nullptr;
1056  }
1057 
1058  /*
1059  * Should be called before concurrent_hash_map destructor is called.
1060  * Otherwise, program can terminate if an exception occurs wile freeing
1061  * memory inside dtor.
1062  */
1063  void
1064  free_tls()
1065  {
1066  auto pop = get_pool_base();
1067 
1068  if ((layout_features.compat & FEATURE_CONSISTENT_SIZE) &&
1069  tls_ptr) {
1070  transaction::run(pop, [&] {
1071  delete_persistent<tls_t>(tls_ptr);
1072  tls_ptr = nullptr;
1073  });
1074  }
1075  }
1076 
1080  void
1081  calculate_mask()
1082  {
1083 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
1084  VALGRIND_HG_DISABLE_CHECKING(&my_size, sizeof(my_size));
1085  VALGRIND_HG_DISABLE_CHECKING(&my_mask, sizeof(my_mask));
1086 #endif
1087 #if LIBPMEMOBJ_CPP_VG_PMEMCHECK_ENABLED
1088  VALGRIND_PMC_REMOVE_PMEM_MAPPING(&my_size, sizeof(my_size));
1089  VALGRIND_PMC_REMOVE_PMEM_MAPPING(&my_mask, sizeof(my_mask));
1090 #endif
1091 
1092  hashcode_type m = embedded_buckets - 1;
1093 
1094  const_segment_facade_t segment(
1095  my_table, segment_traits_t::embedded_segments);
1096 
1097  while (segment.is_valid()) {
1098  m += segment.size();
1099  ++segment;
1100  }
1101 
1102  mask().store(m, std::memory_order_relaxed);
1103  }
1104 
1108  template <bool Flush = true>
1109  void
1110  mark_rehashed(pool_base &pop, segment_facade_t &segment)
1111  {
1112  for (size_type i = 0; i < segment.size(); ++i) {
1113  bucket *b = &(segment[i]);
1114 
1115  assert_not_locked<mutex_t, scoped_t>(b->mutex);
1116 
1117  b->set_rehashed(std::memory_order_relaxed);
1118  }
1119 
1120  if (Flush) {
1121  /* Flush in separate loop to avoid read-after-flush */
1122  for (size_type i = 0; i < segment.size(); ++i) {
1123  bucket *b = &(segment[i]);
1124  pop.flush(b->rehashed);
1125  }
1126 
1127  pop.drain();
1128  }
1129  }
1130 
1134  void
1135  enable_segment(segment_index_t k, bool is_initial = false)
1136  {
1137  assert(k);
1138 
1139  pool_base pop = get_pool_base();
1140  size_type sz;
1141 
1142  if (k >= first_block) {
1143  segment_facade_t new_segment(my_table, k);
1144 
1145  sz = new_segment.size();
1146  if (!new_segment.is_valid())
1147  new_segment.enable(pop);
1148 
1149  if (is_initial) {
1150  mark_rehashed(pop, new_segment);
1151  }
1152 
1153  /* double it to get entire capacity of the container */
1154  sz <<= 1;
1155  } else {
1156  /* the first block */
1157  assert(k == segment_traits_t::embedded_segments);
1158 
1159  for (segment_index_t i = k; i < first_block; ++i) {
1160  segment_facade_t new_segment(my_table, i);
1161 
1162  if (!new_segment.is_valid())
1163  new_segment.enable(pop);
1164 
1165  if (is_initial) {
1166  mark_rehashed(pop, new_segment);
1167  }
1168  }
1169 
1170  sz = segment_traits_t::segment_size(first_block);
1171  }
1172 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
1173  ANNOTATE_HAPPENS_BEFORE(&my_mask);
1174 #endif
1175  mask().store(sz - 1, std::memory_order_release);
1176  }
1177 
1182  bucket *
1183  get_bucket(hashcode_type h) const
1184  {
1185  segment_index_t s = segment_traits_t::segment_index_of(h);
1186 
1187  h -= segment_traits_t::segment_base(s);
1188 
1189  const_segment_facade_t segment(my_table, s);
1190 
1191  assert(segment.is_valid());
1192 
1193  return &(segment[h]);
1194  }
1195 
1199  inline bool
1200  check_mask_race(hashcode_type h, hashcode_type &m) const
1201  {
1202  hashcode_type m_now, m_old = m;
1203 
1204  m_now = mask().load(std::memory_order_acquire);
1205 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
1206  ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
1207 #endif
1208 
1209  if (m_old != m_now)
1210  return check_rehashing_collision(h, m_old, m = m_now);
1211 
1212  return false;
1213  }
1214 
1218  bool
1219  check_rehashing_collision(hashcode_type h, hashcode_type m_old,
1220  hashcode_type m) const
1221  {
1222  assert(m_old != m);
1223 
1224  if ((h & m_old) != (h & m)) {
1225  /* mask changed for this hashcode, rare event condition
1226  * above proves that 'h' has some other bits set beside
1227  * 'm_old', find next applicable mask after m_old */
1228 
1229  for (++m_old; !(h & m_old); m_old <<= 1)
1230  ;
1231 
1232  m_old = (m_old << 1) - 1; /* get full mask from a bit */
1233 
1234  assert((m_old & (m_old + 1)) == 0 && m_old <= m);
1235 
1236  /* check whether it is rehashing/ed */
1237  bucket *b = get_bucket(h & m_old);
1238  return b->is_rehashed(std::memory_order_acquire);
1239  }
1240 
1241  return false;
1242  }
1243 
1248  template <typename Node, typename... Args>
1249  void
1250  insert_new_node_internal(bucket *b,
1251  detail::persistent_pool_ptr<Node> &new_node,
1252  Args &&... args)
1253  {
1254  assert(pmemobj_tx_stage() == TX_STAGE_WORK);
1255 
1256  new_node = pmem::obj::make_persistent<Node>(
1257  b->node_list, std::forward<Args>(args)...);
1258  b->node_list = new_node; /* bucket is locked */
1259  }
1260 
1265  template <typename Node, typename... Args>
1266  size_type
1267  insert_new_node(bucket *b, detail::persistent_pool_ptr<Node> &new_node,
1268  Args &&... args)
1269  {
1270  pool_base pop = get_pool_base();
1271 
1272  /*
1273  * This is only true when called from singlethreaded methods
1274  * like swap() or operator=. In that case it's safe to directly
1275  * modify on_init_size.
1276  */
1277  if (pmemobj_tx_stage() == TX_STAGE_WORK) {
1278  insert_new_node_internal(b, new_node,
1279  std::forward<Args>(args)...);
1280  this->on_init_size++;
1281  } else {
1282  auto &size_diff = thread_size_diff();
1283 
1284  pmem::obj::transaction::run(pop, [&] {
1285  insert_new_node_internal(
1286  b, new_node,
1287  std::forward<Args>(args)...);
1288  ++size_diff;
1289  });
1290  }
1291 
1292  /* Increment volatile size */
1293  return ++(this->my_size);
1294  }
1295 
1300  bool
1301  check_growth(hashcode_type m, size_type sz)
1302  {
1303  if (sz >= m) {
1304  segment_index_t new_seg =
1305  static_cast<segment_index_t>(detail::Log2(
1306  m +
1307  1)); /* optimized segment_index_of */
1308 
1309  assert(segment_facade_t(my_table, new_seg - 1)
1310  .is_valid());
1311 
1312  std::unique_lock<segment_enable_mutex_t> lock(
1313  my_segment_enable_mutex, std::try_to_lock);
1314 
1315  if (lock) {
1316  if (mask().load(std::memory_order_relaxed) ==
1317  m) {
1318  /* Otherwise, other thread enable this
1319  * segment */
1320  enable_segment(new_seg);
1321 
1322  return true;
1323  }
1324  }
1325  }
1326 
1327  return false;
1328  }
1329 
1333  void
1334  reserve(size_type buckets)
1335  {
1336  if (buckets == 0)
1337  return;
1338 
1339  --buckets;
1340 
1341  bool is_initial = this->size() == 0;
1342 
1343  for (size_type m = mask(); buckets > m; m = mask())
1344  enable_segment(
1345  segment_traits_t::segment_index_of(m + 1),
1346  is_initial);
1347  }
1348 
1353  void
1354  internal_swap(hash_map_base<Key, T, mutex_t, scoped_t> &table)
1355  {
1356  pool_base p = get_pool_base();
1357  {
1358  transaction::manual tx(p);
1359 
1360  this->my_pool_uuid.swap(table.my_pool_uuid);
1361 
1362  /*
1363  * As internal_swap can only be called
1364  * from one thread, and there can be an outer
1365  * transaction we must make sure that mask and size
1366  * changes are transactional
1367  */
1368  transaction::snapshot((size_t *)&this->my_mask);
1369  transaction::snapshot((size_t *)&this->my_size);
1370 
1371  this->mask() = table.mask().exchange(
1372  this->mask(), std::memory_order_relaxed);
1373 
1374  this->my_size = table.my_size.exchange(
1375  this->my_size, std::memory_order_relaxed);
1376 
1377  /* Swap consistent size */
1378  std::swap(this->tls_ptr, table.tls_ptr);
1379 
1380  for (size_type i = 0; i < embedded_buckets; ++i)
1381  this->my_embedded_segment[i].node_list.swap(
1382  table.my_embedded_segment[i].node_list);
1383 
1384  for (size_type i = segment_traits_t::embedded_segments;
1385  i < block_table_size; ++i)
1386  this->my_table[i].swap(table.my_table[i]);
1387 
1389  }
1390  }
1391 
1396  pool_base
1397  get_pool_base()
1398  {
1399  PMEMobjpool *pop =
1400  pmemobj_pool_by_oid(PMEMoid{my_pool_uuid, 0});
1401 
1402  return pool_base(pop);
1403  }
1404 }; /* End of class hash_map_base */
1405 
1411 template <typename Container, bool is_const>
1412 class hash_map_iterator {
1413 public:
1414  using iterator_category = std::forward_iterator_tag;
1415  using difference_type = ptrdiff_t;
1416  using map_type = Container;
1417  using value_type = typename map_type::value_type;
1418  using node = typename map_type::node;
1419  using bucket = typename map_type::bucket;
1420  using map_ptr = typename std::conditional<is_const, const map_type *,
1421  map_type *>::type;
1422  using reference =
1423  typename std::conditional<is_const,
1424  typename map_type::const_reference,
1425  typename map_type::reference>::type;
1426  using pointer =
1427  typename std::conditional<is_const,
1428  typename map_type::const_pointer,
1429  typename map_type::pointer>::type;
1430 
1431  template <typename C, bool M, bool U>
1432  friend bool operator==(const hash_map_iterator<C, M> &i,
1433  const hash_map_iterator<C, U> &j);
1434 
1435  template <typename C, bool M, bool U>
1436  friend bool operator!=(const hash_map_iterator<C, M> &i,
1437  const hash_map_iterator<C, U> &j);
1438 
1439  friend class hash_map_iterator<map_type, true>;
1440 
1441 #if !defined(_MSC_VER) || defined(__INTEL_COMPILER)
1442 private:
1443  template <typename Key, typename T, typename Hash, typename KeyEqual,
1444  typename MutexType, typename ScopedLockType>
1445  friend class ::pmem::obj::concurrent_hash_map;
1446 #else
1447 public: /* workaround */
1448 #endif
1449  hash_map_iterator(map_ptr map, size_t index)
1450  : my_map(map), my_index(index), my_bucket(nullptr), my_node(nullptr)
1451  {
1452  if (my_index <= my_map->mask()) {
1453  bucket_accessor acc(my_map, my_index);
1454  my_bucket = acc.get();
1455  my_node = static_cast<node *>(
1456  my_bucket->node_list.get(my_map->my_pool_uuid));
1457 
1458  if (!my_node) {
1459  advance_to_next_bucket();
1460  }
1461  }
1462  }
1463 
1464 public:
1466  hash_map_iterator() = default;
1467 
1469  hash_map_iterator(const hash_map_iterator &other)
1470  : my_map(other.my_map),
1471  my_index(other.my_index),
1472  my_bucket(other.my_bucket),
1473  my_node(other.my_node)
1474  {
1475  }
1476 
1478  template <typename U = void,
1479  typename = typename std::enable_if<is_const, U>::type>
1480  hash_map_iterator(const hash_map_iterator<map_type, false> &other)
1481  : my_map(other.my_map),
1482  my_index(other.my_index),
1483  my_bucket(other.my_bucket),
1484  my_node(other.my_node)
1485  {
1486  }
1487 
1488  hash_map_iterator &operator=(const hash_map_iterator &it) = default;
1489 
1491  reference operator*() const
1492  {
1493  assert(my_node);
1494  return my_node->item;
1495  }
1496 
1498  pointer operator->() const
1499  {
1500  return &operator*();
1501  }
1502 
1504  hash_map_iterator &
1505  operator++()
1506  {
1507  my_node = static_cast<node *>(
1508  my_node->next.get((my_map->my_pool_uuid)));
1509 
1510  if (!my_node)
1511  advance_to_next_bucket();
1512 
1513  return *this;
1514  }
1515 
1517  hash_map_iterator
1518  operator++(int)
1519  {
1520  hash_map_iterator old(*this);
1521  operator++();
1522  return old;
1523  }
1524 
1525 private:
1527  map_ptr my_map = nullptr;
1528 
1530  size_t my_index = 0;
1531 
1533  bucket *my_bucket = nullptr;
1534 
1536  node *my_node = nullptr;
1537 
1538  class bucket_accessor {
1539  public:
1540  bucket_accessor(map_ptr m, size_t index)
1541  {
1542  my_bucket = m->get_bucket(index);
1543  }
1544 
1545  bucket *
1546  get() const
1547  {
1548  return my_bucket;
1549  }
1550 
1551  private:
1552  bucket *my_bucket;
1553  };
1554 
1555  void
1556  advance_to_next_bucket()
1557  {
1558  size_t k = my_index + 1;
1559 
1560  assert(my_bucket);
1561 
1562  while (k <= my_map->mask()) {
1563  bucket_accessor acc(my_map, k);
1564  my_bucket = acc.get();
1565 
1566  if (my_bucket->node_list) {
1567  my_node = static_cast<node *>(
1568  my_bucket->node_list.get(
1569  my_map->my_pool_uuid));
1570 
1571  my_index = k;
1572 
1573  return;
1574  }
1575 
1576  ++k;
1577  }
1578 
1579  my_bucket = 0;
1580  my_node = 0;
1581  my_index = k;
1582  }
1583 };
1584 
1585 template <typename Container, bool M, bool U>
1586 bool
1587 operator==(const hash_map_iterator<Container, M> &i,
1588  const hash_map_iterator<Container, U> &j)
1589 {
1590  return i.my_node == j.my_node && i.my_map == j.my_map;
1591 }
1592 
1593 template <typename Container, bool M, bool U>
1594 bool
1595 operator!=(const hash_map_iterator<Container, M> &i,
1596  const hash_map_iterator<Container, U> &j)
1597 {
1598  return i.my_node != j.my_node || i.my_map != j.my_map;
1599 }
1600 } /* namespace concurrent_hash_map_internal */
1649 template <typename Key, typename T, typename Hash, typename KeyEqual,
1650  typename MutexType, typename ScopedLockType>
1651 class concurrent_hash_map
1652  : protected concurrent_hash_map_internal::hash_map_base<Key, T, MutexType,
1653  ScopedLockType> {
1654  template <typename Container, bool is_const>
1655  friend class concurrent_hash_map_internal::hash_map_iterator;
1656 
1657 public:
1658  using size_type = typename concurrent_hash_map_internal::hash_map_base<
1659  Key, T, MutexType, ScopedLockType>::size_type;
1660  using hashcode_type =
1661  typename concurrent_hash_map_internal::hash_map_base<
1662  Key, T, MutexType, ScopedLockType>::hashcode_type;
1663  using key_type = Key;
1664  using mapped_type = T;
1665  using value_type = typename concurrent_hash_map_internal::hash_map_base<
1666  Key, T, MutexType, ScopedLockType>::node::value_type;
1667  using difference_type = ptrdiff_t;
1668  using pointer = value_type *;
1669  using const_pointer = const value_type *;
1670  using reference = value_type &;
1671  using const_reference = const value_type &;
1672  using iterator = concurrent_hash_map_internal::hash_map_iterator<
1673  concurrent_hash_map, false>;
1674  using const_iterator = concurrent_hash_map_internal::hash_map_iterator<
1675  concurrent_hash_map, true>;
1676  using hasher = Hash;
1677  using key_equal = typename concurrent_hash_map_internal::key_equal_type<
1678  Hash, KeyEqual>::type;
1679 
1680 protected:
1681  using mutex_t = MutexType;
1682  using scoped_t = ScopedLockType;
1683  /*
1684  * Explicitly use methods and types from template base class
1685  */
1686  using hash_map_base =
1687  concurrent_hash_map_internal::hash_map_base<Key, T, mutex_t,
1688  scoped_t>;
1689  using hash_map_base::calculate_mask;
1690  using hash_map_base::check_growth;
1691  using hash_map_base::check_mask_race;
1692  using hash_map_base::embedded_buckets;
1693  using hash_map_base::FEATURE_CONSISTENT_SIZE;
1694  using hash_map_base::get_bucket;
1695  using hash_map_base::get_pool_base;
1696  using hash_map_base::header_features;
1697  using hash_map_base::insert_new_node;
1698  using hash_map_base::internal_swap;
1699  using hash_map_base::layout_features;
1700  using hash_map_base::mask;
1701  using hash_map_base::reserve;
1702  using tls_t = typename hash_map_base::tls_t;
1703  using node = typename hash_map_base::node;
1704  using node_mutex_t = typename node::mutex_t;
1705  using node_ptr_t = typename hash_map_base::node_ptr_t;
1706  using bucket = typename hash_map_base::bucket;
1707  using bucket_lock_type = typename bucket::scoped_t;
1708  using segment_index_t = typename hash_map_base::segment_index_t;
1709  using segment_traits_t = typename hash_map_base::segment_traits_t;
1710  using segment_facade_t = typename hash_map_base::segment_facade_t;
1711  using scoped_lock_traits_type =
1712  concurrent_hash_map_internal::scoped_lock_traits<scoped_t>;
1713 
1714  friend class const_accessor;
1715  using persistent_node_ptr_t = detail::persistent_pool_ptr<node>;
1716 
1717  void
1718  delete_node(const node_ptr_t &n)
1719  {
1720  delete_persistent<node>(
1721  detail::static_persistent_pool_pointer_cast<node>(n)
1722  .get_persistent_ptr(this->my_pool_uuid));
1723  }
1724 
1725  template <typename K>
1726  persistent_node_ptr_t
1727  search_bucket(const K &key, bucket *b) const
1728  {
1729  assert(b->is_rehashed(std::memory_order_relaxed));
1730 
1731  persistent_node_ptr_t n =
1732  detail::static_persistent_pool_pointer_cast<node>(
1733  b->node_list);
1734 
1735  while (n &&
1736  !key_equal{}(key,
1737  n.get(this->my_pool_uuid)->item.first)) {
1738  n = detail::static_persistent_pool_pointer_cast<node>(
1739  n.get(this->my_pool_uuid)->next);
1740  }
1741 
1742  return n;
1743  }
1744 
1749  class bucket_accessor : public bucket_lock_type {
1750  bucket *my_b;
1751 
1752  public:
1753  bucket_accessor(bucket_accessor &&b) noexcept : my_b(b.my_b)
1754  {
1755  bucket_lock_type::mutex = b.bucket_lock_type::mutex;
1756  bucket_lock_type::is_writer =
1757  b.bucket_lock_type::is_writer;
1758  b.my_b = nullptr;
1759  b.bucket_lock_type::mutex = nullptr;
1760  b.bucket_lock_type::is_writer = false;
1761  }
1762 
1764  const hashcode_type h, bool writer = false)
1765  {
1766  acquire(base, h, writer);
1767  }
1768 
1773  inline void
1774  acquire(concurrent_hash_map *base, const hashcode_type h,
1775  bool writer = false)
1776  {
1777  my_b = base->get_bucket(h);
1778 
1779  if (my_b->is_rehashed(std::memory_order_acquire) ==
1780  false &&
1781  bucket_lock_type::try_acquire(this->my_b->mutex,
1782  /*write=*/true)) {
1783  if (my_b->is_rehashed(
1784  std::memory_order_relaxed) ==
1785  false) {
1786  /* recursive rehashing */
1787  base->rehash_bucket<false>(my_b, h);
1788  }
1789  } else {
1790  bucket_lock_type::acquire(my_b->mutex, writer);
1791  }
1792 
1793  assert(my_b->is_rehashed(std::memory_order_relaxed));
1794  }
1795 
1799  bool
1800  is_writer() const
1801  {
1802  return bucket_lock_type::is_writer;
1803  }
1804 
1809  bucket *
1810  get() const
1811  {
1812  return my_b;
1813  }
1814 
1819  bucket *operator->() const
1820  {
1821  return this->get();
1822  }
1823  };
1824 
1829  bucket *my_b;
1830 
1831  public:
1833  const hashcode_type h,
1834  bool writer = false)
1835  {
1836  acquire(base, h, writer);
1837  }
1838 
1839  /*
1840  * Find a bucket by masked hashcode, optionally rehash
1841  */
1842  inline void
1843  acquire(concurrent_hash_map *base, const hashcode_type h,
1844  bool writer = false)
1845  {
1846  my_b = base->get_bucket(h);
1847 
1848  if (my_b->is_rehashed(std::memory_order_relaxed) ==
1849  false) {
1850  /* recursive rehashing */
1851  base->rehash_bucket<true>(my_b, h);
1852  }
1853 
1854  assert(my_b->is_rehashed(std::memory_order_relaxed));
1855  }
1856 
1863  bool
1864  is_writer() const
1865  {
1866  return true;
1867  }
1868 
1873  bucket *
1874  get() const
1875  {
1876  return my_b;
1877  }
1878 
1883  bucket *operator->() const
1884  {
1885  return this->get();
1886  }
1887  };
1888 
1889  hashcode_type
1890  get_hash_code(node_ptr_t &n)
1891  {
1892  return hasher{}(
1893  detail::static_persistent_pool_pointer_cast<node>(n)(
1894  this->my_pool_uuid)
1895  ->item.first);
1896  }
1897 
1898  template <bool serial>
1899  void
1900  rehash_bucket(bucket *b_new, const hashcode_type h)
1901  {
1902  using accessor_type = typename std::conditional<
1903  serial, serial_bucket_accessor, bucket_accessor>::type;
1904 
1905  using scoped_lock_traits_type =
1906  concurrent_hash_map_internal::scoped_lock_traits<
1907  accessor_type>;
1908 
1909  /* First two bucket should be always rehashed */
1910  assert(h > 1);
1911 
1912  pool_base pop = get_pool_base();
1913  node_ptr_t *p_new = &(b_new->node_list);
1914 
1915  /* This condition is only true when there was a failure just
1916  * before setting rehashed flag */
1917  if (*p_new != nullptr) {
1918  assert(!b_new->is_rehashed(std::memory_order_relaxed));
1919 
1920  b_new->set_rehashed(std::memory_order_relaxed);
1921  pop.persist(b_new->rehashed);
1922 
1923  return;
1924  }
1925 
1926  /* get parent mask from the topmost bit */
1927  hashcode_type mask = (1u << detail::Log2(h)) - 1;
1928  assert((h & mask) < h);
1929  accessor_type b_old(
1930  this, h & mask,
1931  scoped_lock_traits_type::initial_rw_state(true));
1932 
1933  pmem::obj::transaction::run(pop, [&] {
1934  /* get full mask for new bucket */
1935  mask = (mask << 1) | 1;
1936  assert((mask & (mask + 1)) == 0 && (h & mask) == h);
1937 
1938  restart:
1939  for (node_ptr_t *p_old = &(b_old->node_list),
1940  n = *p_old;
1941  n; n = *p_old) {
1942  hashcode_type c = get_hash_code(n);
1943 #ifndef NDEBUG
1944  hashcode_type bmask = h & (mask >> 1);
1945 
1946  bmask = bmask == 0
1947  ? 1 /* minimal mask of parent bucket */
1948  : (1u << (detail::Log2(bmask) + 1)) - 1;
1949 
1950  assert((c & bmask) == (h & bmask));
1951 #endif
1952 
1953  if ((c & mask) == h) {
1954  if (!b_old.is_writer() &&
1955  !scoped_lock_traits_type::
1956  upgrade_to_writer(b_old)) {
1957  goto restart;
1958  /* node ptr can be invalid due
1959  * to concurrent erase */
1960  }
1961 
1962  /* Add to new b_new */
1963  *p_new = n;
1964 
1965  /* exclude from b_old */
1966  *p_old = n(this->my_pool_uuid)->next;
1967 
1968  p_new = &(n(this->my_pool_uuid)->next);
1969  } else {
1970  /* iterate to next item */
1971  p_old = &(n(this->my_pool_uuid)->next);
1972  }
1973  }
1974 
1975  *p_new = nullptr;
1976  });
1977 
1978  /* mark rehashed */
1979  b_new->set_rehashed(std::memory_order_release);
1980  pop.persist(b_new->rehashed);
1981  }
1982 
1983  void
1984  check_incompat_features()
1985  {
1986  if (layout_features.incompat != header_features().incompat)
1987  throw pmem::layout_error(
1988  "Incompat flags mismatch, for more details go to: https://pmem.io/pmdk/cpp_obj/ \n");
1989 
1990  if ((layout_features.compat & FEATURE_CONSISTENT_SIZE) &&
1991  this->value_size != sizeof(value_type))
1992  throw pmem::layout_error(
1993  "Size of value_type is different than the one stored in the pool \n");
1994  }
1995 
1996 public:
1997  class accessor;
2002  : protected node::scoped_t /*which derived from no_copy*/ {
2003  friend class concurrent_hash_map<Key, T, Hash, KeyEqual,
2004  mutex_t, scoped_t>;
2005  friend class accessor;
2007  using node::scoped_t::try_acquire;
2008 
2009  public:
2013  using value_type =
2014  const typename concurrent_hash_map::value_type;
2015 
2020  bool
2021  empty() const
2022  {
2023  return !my_node;
2024  }
2025 
2032  void
2034  {
2035  concurrent_hash_map_internal::check_outside_tx();
2036 
2037  if (my_node) {
2038  node::scoped_t::release();
2039  my_node = 0;
2040  }
2041  }
2042 
2046  const_reference operator*() const
2047  {
2048  assert(my_node);
2049 
2050  return my_node->item;
2051  }
2052 
2056  const_pointer operator->() const
2057  {
2058  return &operator*();
2059  }
2060 
2066  const_accessor() : my_node(OID_NULL), my_hash()
2067  {
2068  concurrent_hash_map_internal::check_outside_tx();
2069  }
2070 
2075  {
2076  my_node = OID_NULL; // scoped lock's release() is called
2077  // in its destructor
2078  }
2079 
2080  protected:
2081  node_ptr_t my_node;
2082 
2083  hashcode_type my_hash;
2084  };
2085 
2090  class accessor : public const_accessor {
2091  public:
2093  using value_type = typename concurrent_hash_map::value_type;
2094 
2096  reference operator*() const
2097  {
2098  assert(this->my_node);
2099 
2100  return this->my_node->item;
2101  }
2102 
2104  pointer operator->() const
2105  {
2106  return &operator*();
2107  }
2108  };
2109 
2113  concurrent_hash_map() : hash_map_base()
2114  {
2116  }
2117 
2122  concurrent_hash_map(size_type n) : hash_map_base()
2123  {
2125 
2126  reserve(n);
2127  }
2128 
2132  concurrent_hash_map(const concurrent_hash_map &table) : hash_map_base()
2133  {
2135 
2136  reserve(table.size());
2137 
2138  internal_copy(table);
2139  }
2140 
2144  concurrent_hash_map(concurrent_hash_map &&table) : hash_map_base()
2145  {
2147 
2148  swap(table);
2149  }
2150 
2154  template <typename I>
2155  concurrent_hash_map(I first, I last)
2156  {
2158 
2159  reserve(static_cast<size_type>(std::distance(first, last)));
2160 
2161  internal_copy(first, last);
2162  }
2163 
2167  concurrent_hash_map(std::initializer_list<value_type> il)
2168  {
2170 
2171  reserve(il.size());
2172 
2173  internal_copy(il.begin(), il.end());
2174  }
2175 
2184  void
2186  {
2187  check_incompat_features();
2188 
2189  calculate_mask();
2190 
2191  /*
2192  * Handle case where hash_map was created without
2193  * FEATURE_CONSISTENT_SIZE.
2194  */
2195  if (!(layout_features.compat & FEATURE_CONSISTENT_SIZE)) {
2196  auto actual_size =
2197  std::distance(this->begin(), this->end());
2198  assert(actual_size >= 0);
2199 
2200  this->my_size = static_cast<size_t>(actual_size);
2201 
2202  auto pop = get_pool_base();
2203  transaction::run(pop, [&] {
2204  this->tls_ptr = make_persistent<tls_t>();
2205  this->on_init_size =
2206  static_cast<size_t>(actual_size);
2207  this->value_size = sizeof(value_type);
2208 
2209  layout_features.compat |=
2210  FEATURE_CONSISTENT_SIZE;
2211  });
2212  } else {
2213  assert(this->tls_ptr != nullptr);
2214  this->tls_restore();
2215  }
2216 
2217  assert(this->size() ==
2218  size_type(std::distance(this->begin(), this->end())));
2219  }
2220 
2221  [[deprecated(
2222  "runtime_initialize(bool) is now deprecated, use runtime_initialize(void)")]] void
2223  runtime_initialize(bool graceful_shutdown)
2224  {
2225  check_incompat_features();
2226 
2227  calculate_mask();
2228 
2229  if (!graceful_shutdown) {
2230  auto actual_size =
2231  std::distance(this->begin(), this->end());
2232  assert(actual_size >= 0);
2233  this->my_size = static_cast<size_type>(actual_size);
2234  } else {
2235  assert(this->size() ==
2236  size_type(std::distance(this->begin(),
2237  this->end())));
2238  }
2239  }
2240 
2252  concurrent_hash_map &
2254  {
2255  if (this != &table) {
2256  clear();
2257  internal_copy(table);
2258  }
2259 
2260  return *this;
2261  }
2262 
2275  operator=(std::initializer_list<value_type> il)
2276  {
2277  clear();
2278 
2279  reserve(il.size());
2280 
2281  internal_copy(il.begin(), il.end());
2282 
2283  return *this;
2284  }
2285 
2294  void rehash(size_type n = 0);
2295 
2302  void clear();
2303 
2304  /*
2305  * Should be called before concurrent_hash_map destructor is called.
2306  * Otherwise, program can terminate if an exception occurs while freeing
2307  * memory inside dtor.
2308  *
2309  * Hash map can NOT be used after free_data() was called (unless this
2310  * was done in a transaction and transaction aborted).
2311  *
2312  * @throw std::transaction_error in case of PMDK transaction failure
2313  * @throw pmem::transaction_free_error when freeing underlying memory
2314  * failed.
2315  */
2316  void
2317  free_data()
2318  {
2319  auto pop = get_pool_base();
2320 
2321  transaction::run(pop, [&] {
2322  clear();
2323  this->free_tls();
2324  });
2325  }
2326 
2331  {
2332  free_data();
2333  }
2334 
2335  //------------------------------------------------------------------------
2336  // STL support - not thread-safe methods
2337  //------------------------------------------------------------------------
2338 
2345  iterator
2347  {
2348  return iterator(this, 0);
2349  }
2350 
2355  iterator
2357  {
2358  return iterator(this, mask() + 1);
2359  }
2360 
2365  const_iterator
2366  begin() const
2367  {
2368  return const_iterator(this, 0);
2369  }
2370 
2375  const_iterator
2376  end() const
2377  {
2378  return const_iterator(this, mask() + 1);
2379  }
2380 
2384  size_type
2385  size() const
2386  {
2387  return hash_map_base::size();
2388  }
2389 
2393  bool
2394  empty() const
2395  {
2396  return this->size() == 0;
2397  }
2398 
2402  size_type
2403  max_size() const
2404  {
2405  return (~size_type(0)) / sizeof(node);
2406  }
2407 
2411  size_type
2413  {
2414  return mask() + 1;
2415  }
2416 
2420  void swap(concurrent_hash_map &table);
2421 
2422  //------------------------------------------------------------------------
2423  // concurrent map operations
2424  //------------------------------------------------------------------------
2425 
2431  size_type
2432  count(const Key &key) const
2433  {
2434  concurrent_hash_map_internal::check_outside_tx();
2435 
2436  return const_cast<concurrent_hash_map *>(this)->internal_find(
2437  key, nullptr, false);
2438  }
2439 
2451  template <typename K,
2452  typename = typename std::enable_if<
2453  concurrent_hash_map_internal::
2454  has_transparent_key_equal<hasher>::value,
2455  K>::type>
2456  size_type
2457  count(const K &key) const
2458  {
2459  concurrent_hash_map_internal::check_outside_tx();
2460 
2461  return const_cast<concurrent_hash_map *>(this)->internal_find(
2462  key, nullptr, false);
2463  }
2464 
2471  bool
2472  find(const_accessor &result, const Key &key) const
2473  {
2474  concurrent_hash_map_internal::check_outside_tx();
2475 
2476  result.release();
2477 
2478  return const_cast<concurrent_hash_map *>(this)->internal_find(
2479  key, &result, false);
2480  }
2481 
2495  template <typename K,
2496  typename = typename std::enable_if<
2497  concurrent_hash_map_internal::
2498  has_transparent_key_equal<hasher>::value,
2499  K>::type>
2500  bool
2501  find(const_accessor &result, const K &key) const
2502  {
2503  concurrent_hash_map_internal::check_outside_tx();
2504 
2505  result.release();
2506 
2507  return const_cast<concurrent_hash_map *>(this)->internal_find(
2508  key, &result, false);
2509  }
2510 
2517  bool
2518  find(accessor &result, const Key &key)
2519  {
2520  concurrent_hash_map_internal::check_outside_tx();
2521 
2522  result.release();
2523 
2524  return internal_find(key, &result, true);
2525  }
2526 
2540  template <typename K,
2541  typename = typename std::enable_if<
2542  concurrent_hash_map_internal::
2543  has_transparent_key_equal<hasher>::value,
2544  K>::type>
2545  bool
2546  find(accessor &result, const K &key)
2547  {
2548  concurrent_hash_map_internal::check_outside_tx();
2549 
2550  result.release();
2551 
2552  return internal_find(key, &result, true);
2553  }
2561  bool
2562  insert(const_accessor &result, const Key &key)
2563  {
2564  concurrent_hash_map_internal::check_outside_tx();
2565 
2566  result.release();
2567 
2568  return internal_insert(key, &result, false, key);
2569  }
2570 
2578  bool
2579  insert(accessor &result, const Key &key)
2580  {
2581  concurrent_hash_map_internal::check_outside_tx();
2582 
2583  result.release();
2584 
2585  return internal_insert(key, &result, true, key);
2586  }
2587 
2595  bool
2596  insert(const_accessor &result, const value_type &value)
2597  {
2598  concurrent_hash_map_internal::check_outside_tx();
2599 
2600  result.release();
2601 
2602  return internal_insert(value.first, &result, false, value);
2603  }
2604 
2612  bool
2613  insert(accessor &result, const value_type &value)
2614  {
2615  concurrent_hash_map_internal::check_outside_tx();
2616 
2617  result.release();
2618 
2619  return internal_insert(value.first, &result, true, value);
2620  }
2621 
2628  bool
2629  insert(const value_type &value)
2630  {
2631  concurrent_hash_map_internal::check_outside_tx();
2632 
2633  return internal_insert(value.first, nullptr, false, value);
2634  }
2635 
2643  bool
2644  insert(const_accessor &result, value_type &&value)
2645  {
2646  concurrent_hash_map_internal::check_outside_tx();
2647 
2648  result.release();
2649 
2650  return internal_insert(value.first, &result, false,
2651  std::move(value));
2652  }
2653 
2661  bool
2662  insert(accessor &result, value_type &&value)
2663  {
2664  concurrent_hash_map_internal::check_outside_tx();
2665 
2666  result.release();
2667 
2668  return internal_insert(value.first, &result, true,
2669  std::move(value));
2670  }
2671 
2678  bool
2679  insert(value_type &&value)
2680  {
2681  concurrent_hash_map_internal::check_outside_tx();
2682 
2683  return internal_insert(value.first, nullptr, false,
2684  std::move(value));
2685  }
2686 
2692  template <typename I>
2693  void
2694  insert(I first, I last)
2695  {
2696  concurrent_hash_map_internal::check_outside_tx();
2697 
2698  for (; first != last; ++first)
2699  insert(*first);
2700  }
2701 
2707  void
2708  insert(std::initializer_list<value_type> il)
2709  {
2710  concurrent_hash_map_internal::check_outside_tx();
2711 
2712  insert(il.begin(), il.end());
2713  }
2714 
2723  template <typename M>
2724  bool
2725  insert_or_assign(const key_type &key, M &&obj)
2726  {
2727  concurrent_hash_map_internal::check_outside_tx();
2728 
2729  accessor acc;
2730  auto result = internal_insert(key, &acc, true, key,
2731  std::forward<M>(obj));
2732 
2733  if (!result) {
2734  pool_base pop = get_pool_base();
2736  acc->second = std::forward<M>(obj);
2738  }
2739 
2740  return result;
2741  }
2742 
2751  template <typename M>
2752  bool
2753  insert_or_assign(key_type &&key, M &&obj)
2754  {
2755  concurrent_hash_map_internal::check_outside_tx();
2756 
2757  accessor acc;
2758  auto result = internal_insert(key, &acc, true, std::move(key),
2759  std::forward<M>(obj));
2760 
2761  if (!result) {
2762  pool_base pop = get_pool_base();
2764  acc->second = std::forward<M>(obj);
2766  }
2767 
2768  return result;
2769  }
2770 
2779  template <
2780  typename K, typename M,
2781  typename = typename std::enable_if<
2782  concurrent_hash_map_internal::has_transparent_key_equal<
2783  hasher>::value &&
2784  std::is_constructible<key_type, K>::value,
2785  K>::type>
2786  bool
2787  insert_or_assign(K &&key, M &&obj)
2788  {
2789  concurrent_hash_map_internal::check_outside_tx();
2790 
2791  accessor acc;
2792  auto result =
2793  internal_insert(key, &acc, true, std::forward<K>(key),
2794  std::forward<M>(obj));
2795 
2796  if (!result) {
2797  pool_base pop = get_pool_base();
2799  acc->second = std::forward<M>(obj);
2801  }
2802 
2803  return result;
2804  }
2805 
2814  bool
2815  erase(const Key &key)
2816  {
2817  concurrent_hash_map_internal::check_outside_tx();
2818 
2819  return internal_erase(key);
2820  }
2821 
2840  pobj_defrag_result
2841  defragment(double start_percent = 0, double amount_percent = 100)
2842  {
2843  double end_percent = start_percent + amount_percent;
2844  if (start_percent < 0 || start_percent >= 100 ||
2845  end_percent < 0 || end_percent > 100 ||
2846  start_percent >= end_percent) {
2847  throw std::range_error("incorrect range");
2848  }
2849 
2850  size_t max_index = mask().load(std::memory_order_acquire);
2851  size_t start_index = (start_percent * max_index) / 100;
2852  size_t end_index = (end_percent * max_index) / 100;
2853 
2854 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
2855  ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
2856 #endif
2857 
2858  /* Create defrag object for elements in the current pool */
2859  pmem::obj::defrag my_defrag(this->get_pool_base());
2860  mutex_vector mv;
2861 
2862  /*
2863  * Locks are taken in the backward order to avoid deadlocks
2864  * with the rehashing of buckets.
2865  *
2866  * We do '+ 1' and '- 1' to handle the 'i == 0' case.
2867  */
2868  for (size_t i = end_index + 1; i >= start_index + 1; i--) {
2869  /*
2870  * All locks will be unlocked automatically
2871  * in the destructor of 'mv'.
2872  */
2873  bucket *b = mv.push_and_try_lock(this, i - 1);
2874  if (b == nullptr)
2875  continue;
2876 
2877  defrag_save_nodes(b, my_defrag);
2878  }
2879 
2880  return my_defrag.run();
2881  }
2882 
2897  template <typename K,
2898  typename = typename std::enable_if<
2899  concurrent_hash_map_internal::
2900  has_transparent_key_equal<hasher>::value,
2901  K>::type>
2902  bool
2903  erase(const K &key)
2904  {
2905  concurrent_hash_map_internal::check_outside_tx();
2906 
2907  return internal_erase(key);
2908  }
2909 
2910 protected:
2911  /*
2912  * Try to acquire the mutex for read or write.
2913  *
2914  * If acquiring succeeds returns true, otherwise retries for few times.
2915  * If acquiring fails after all attempts returns false.
2916  */
2917  bool try_acquire_item(const_accessor *result, node_mutex_t &mutex,
2918  bool write);
2919 
2925  public:
2926  using mutex_t = MutexType;
2927 
2929  bucket *
2930  push_and_try_lock(concurrent_hash_map *base, hashcode_type h)
2931  {
2932  vec.emplace_back(base, h, true /*writer*/);
2933  bucket *b = vec.back().get();
2934 
2935  auto node_ptr = static_cast<node *>(
2936  b->node_list.get(base->my_pool_uuid));
2937 
2938  while (node_ptr) {
2939  const_accessor ca;
2940  if (!base->try_acquire_item(&ca,
2941  node_ptr->mutex,
2942  /*write=*/true)) {
2943  vec.pop_back();
2944  return nullptr;
2945  }
2946 
2947  node_ptr =
2948  static_cast<node *>(node_ptr->next.get(
2949  (base->my_pool_uuid)));
2950  }
2951 
2952  return b;
2953  }
2954 
2955  private:
2956  std::vector<bucket_accessor> vec;
2957  };
2958 
2959  template <typename K>
2960  bool internal_find(const K &key, const_accessor *result, bool write);
2961 
2962  template <typename K, typename... Args>
2963  bool internal_insert(const K &key, const_accessor *result, bool write,
2964  Args &&... args);
2965 
2966  /* Obtain pointer to node and lock bucket */
2967  template <bool Bucket_rw_lock, typename K>
2968  persistent_node_ptr_t
2969  get_node(const K &key, bucket_accessor &b)
2970  {
2971  /* find a node */
2972  auto n = search_bucket(key, b.get());
2973 
2974  if (!n) {
2975  if (Bucket_rw_lock && !b.is_writer() &&
2976  !scoped_lock_traits_type::upgrade_to_writer(b)) {
2977  /* Rerun search_list, in case another
2978  * thread inserted the item during the
2979  * upgrade. */
2980  n = search_bucket(key, b.get());
2981  if (n) {
2982  /* unfortunately, it did */
2983  scoped_lock_traits_type::
2984  downgrade_to_reader(b);
2985  return n;
2986  }
2987  }
2988  }
2989 
2990  return n;
2991  }
2992 
2993  template <typename K>
2994  bool internal_erase(const K &key);
2995 
2996  void clear_segment(segment_index_t s);
2997 
3001  void internal_copy(const concurrent_hash_map &source);
3002 
3003  template <typename I>
3004  void internal_copy(I first, I last);
3005 
3010  void
3012  {
3013  auto node_ptr = static_cast<node *>(
3014  b->node_list.get(this->my_pool_uuid));
3015 
3016  while (node_ptr) {
3017  /*
3018  * We do not perform the defragmentation
3019  * on node pointers, because nodes
3020  * always have the same size.
3021  */
3022  defrag.add(node_ptr->item.first);
3023  defrag.add(node_ptr->item.second);
3024 
3025  node_ptr = static_cast<node *>(
3026  node_ptr->next.get((this->my_pool_uuid)));
3027  }
3028  }
3029 }; // class concurrent_hash_map
3030 
3031 template <typename Key, typename T, typename Hash, typename KeyEqual,
3032  typename MutexType, typename ScopedLockType>
3033 bool
3034 concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3035  ScopedLockType>::try_acquire_item(const_accessor *result,
3036  node_mutex_t &mutex,
3037  bool write)
3038 {
3039  /* acquire the item */
3040  if (!result->try_acquire(mutex, write)) {
3041  for (detail::atomic_backoff backoff(true);;) {
3042  if (result->try_acquire(mutex, write))
3043  break;
3044 
3045  if (!backoff.bounded_pause())
3046  return false;
3047  }
3048  }
3049 
3050  return true;
3051 }
3052 
3053 template <typename Key, typename T, typename Hash, typename KeyEqual,
3054  typename MutexType, typename ScopedLockType>
3055 template <typename K>
3056 bool
3057 concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3058  ScopedLockType>::internal_find(const K &key,
3059  const_accessor *result,
3060  bool write)
3061 {
3062  assert(!result || !result->my_node);
3063 
3064  hashcode_type m = mask().load(std::memory_order_acquire);
3065 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
3066  ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
3067 #endif
3068 
3069  assert((m & (m + 1)) == 0);
3070 
3071  hashcode_type const h = hasher{}(key);
3072 
3073  persistent_node_ptr_t node;
3074 
3075  while (true) {
3076  /* get bucket and acquire the lock */
3077  bucket_accessor b(
3078  this, h & m,
3079  scoped_lock_traits_type::initial_rw_state(false));
3080  node = get_node<false>(key, b);
3081 
3082  if (!node) {
3083  /* Element was possibly relocated, try again */
3084  if (check_mask_race(h, m)) {
3085  b.release();
3086  continue;
3087  } else {
3088  return false;
3089  }
3090  }
3091 
3092  /* No need to acquire the item or item acquired */
3093  if (!result ||
3094  try_acquire_item(
3095  result, node.get(this->my_pool_uuid)->mutex, write))
3096  break;
3097 
3098  /* the wait takes really long, restart the
3099  * operation */
3100  b.release();
3101 
3102  std::this_thread::yield();
3103 
3104  m = mask().load(std::memory_order_acquire);
3105 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
3106  ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
3107 #endif
3108  }
3109 
3110  if (result) {
3111  result->my_node = node.get_persistent_ptr(this->my_pool_uuid);
3112  result->my_hash = h;
3113  }
3114 
3115  return true;
3116 }
3117 
3118 template <typename Key, typename T, typename Hash, typename KeyEqual,
3119  typename MutexType, typename ScopedLockType>
3120 template <typename K, typename... Args>
3121 bool
3122 concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3123  ScopedLockType>::internal_insert(const K &key,
3124  const_accessor *result,
3125  bool write,
3126  Args &&... args)
3127 {
3128  assert(!result || !result->my_node);
3129 
3130  hashcode_type m = mask().load(std::memory_order_acquire);
3131 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
3132  ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
3133 #endif
3134 
3135  assert((m & (m + 1)) == 0);
3136 
3137  hashcode_type const h = hasher{}(key);
3138 
3139  persistent_node_ptr_t node;
3140  size_t new_size = 0;
3141  bool inserted = false;
3142 
3143  while (true) {
3144  /* get bucket and acquire the lock */
3145  bucket_accessor b(
3146  this, h & m,
3147  scoped_lock_traits_type::initial_rw_state(true));
3148  node = get_node<true>(key, b);
3149 
3150  if (!node) {
3151  /* Element was possibly relocated, try again */
3152  if (check_mask_race(h, m)) {
3153  b.release();
3154  continue;
3155  }
3156 
3157  /* insert and set flag to grow the container */
3158  new_size = insert_new_node(b.get(), node,
3159  std::forward<Args>(args)...);
3160  inserted = true;
3161  }
3162 
3163  /* No need to acquire the item or item acquired */
3164  if (!result ||
3165  try_acquire_item(
3166  result, node.get(this->my_pool_uuid)->mutex, write))
3167  break;
3168 
3169  /* the wait takes really long, restart the
3170  * operation */
3171  b.release();
3172 
3173  std::this_thread::yield();
3174 
3175  m = mask().load(std::memory_order_acquire);
3176 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
3177  ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
3178 #endif
3179  }
3180 
3181  if (result) {
3182  result->my_node = node.get_persistent_ptr(this->my_pool_uuid);
3183  result->my_hash = h;
3184  }
3185 
3186  check_growth(m, new_size);
3187 
3188  return inserted;
3189 }
3190 
3191 template <typename Key, typename T, typename Hash, typename KeyEqual,
3192  typename MutexType, typename ScopedLockType>
3193 template <typename K>
3194 bool
3195 concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3196  ScopedLockType>::internal_erase(const K &key)
3197 {
3198  node_ptr_t n;
3199  hashcode_type const h = hasher{}(key);
3200  hashcode_type m = mask().load(std::memory_order_acquire);
3201 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
3202  ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
3203 #endif
3204 
3205  pool_base pop = get_pool_base();
3206 
3207 restart : {
3208  /* lock scope */
3209  /* get bucket */
3210  bucket_accessor b(this, h & m,
3211  scoped_lock_traits_type::initial_rw_state(true));
3212 
3213 search:
3214  node_ptr_t *p = &b->node_list;
3215  n = *p;
3216 
3217  while (n &&
3218  !key_equal{}(key,
3219  detail::static_persistent_pool_pointer_cast<node>(
3220  n)(this->my_pool_uuid)
3221  ->item.first)) {
3222  p = &n(this->my_pool_uuid)->next;
3223  n = *p;
3224  }
3225 
3226  if (!n) {
3227  /* not found, but mask could be changed */
3228  if (check_mask_race(h, m))
3229  goto restart;
3230 
3231  return false;
3232  } else if (!b.is_writer() &&
3233  !scoped_lock_traits_type::upgrade_to_writer(b)) {
3234  if (check_mask_race(h, m)) /* contended upgrade, check mask */
3235  goto restart;
3236 
3237  goto search;
3238  }
3239 
3240  persistent_ptr<node> del = n(this->my_pool_uuid);
3241 
3242  {
3243  /* We cannot remove this element immediately because
3244  * other threads might work with this element via
3245  * accessors. The item_locker required to wait while
3246  * other threads use the node. */
3247  const_accessor acc;
3248  if (!try_acquire_item(&acc, del->mutex, true)) {
3249  /* the wait takes really long, restart the operation */
3250  b.release();
3251 
3252  std::this_thread::yield();
3253 
3254  m = mask().load(std::memory_order_acquire);
3255 
3256  goto restart;
3257  }
3258  }
3259 
3260  assert(pmemobj_tx_stage() == TX_STAGE_NONE);
3261 
3262  auto &size_diff = this->thread_size_diff();
3263 
3264  /* Only one thread can delete it due to write lock on the bucket
3265  */
3266  transaction::run(pop, [&] {
3267  *p = del->next;
3268  delete_node(del);
3269 
3270  --size_diff;
3271  });
3272 
3273  --(this->my_size);
3274 }
3275 
3276  return true;
3277 }
3278 
3279 template <typename Key, typename T, typename Hash, typename KeyEqual,
3280  typename MutexType, typename ScopedLockType>
3281 void
3284 {
3285  internal_swap(table);
3286 }
3287 
3288 template <typename Key, typename T, typename Hash, typename KeyEqual,
3289  typename MutexType, typename ScopedLockType>
3290 void
3292  size_type sz)
3293 {
3294  concurrent_hash_map_internal::check_outside_tx();
3295 
3296  reserve(sz);
3297  hashcode_type m = mask();
3298 
3299  /* only the last segment should be scanned for rehashing size or first
3300  * index of the last segment */
3301  hashcode_type b = (m + 1) >> 1;
3302 
3303  /* zero or power of 2 */
3304  assert((b & (b - 1)) == 0);
3305 
3306  for (; b <= m; ++b) {
3307  bucket *bp = get_bucket(b);
3308 
3309  concurrent_hash_map_internal::assert_not_locked<mutex_t,
3310  scoped_t>(
3311  bp->mutex);
3312  /* XXX Need to investigate if this statement is needed */
3313  if (bp->is_rehashed(std::memory_order_relaxed) == false)
3314  rehash_bucket<true>(bp, b);
3315  }
3316 }
3317 
3318 template <typename Key, typename T, typename Hash, typename KeyEqual,
3319  typename MutexType, typename ScopedLockType>
3320 void
3322 {
3323  hashcode_type m = mask();
3324 
3325  assert((m & (m + 1)) == 0);
3326 
3327 #ifndef NDEBUG
3328  /* check consistency */
3329  for (segment_index_t b = 0; b <= m; ++b) {
3330  bucket *bp = get_bucket(b);
3331  concurrent_hash_map_internal::assert_not_locked<mutex_t,
3332  scoped_t>(
3333  bp->mutex);
3334  }
3335 #endif
3336 
3337  pool_base pop = get_pool_base();
3338  { /* transaction scope */
3339 
3340  transaction::manual tx(pop);
3341 
3342  assert(this->tls_ptr != nullptr);
3343  this->tls_ptr->clear();
3344 
3345  this->on_init_size = 0;
3346 
3347  segment_index_t s = segment_traits_t::segment_index_of(m);
3348 
3349  assert(s + 1 == this->block_table_size ||
3350  !segment_facade_t(this->my_table, s + 1).is_valid());
3351 
3352  do {
3353  clear_segment(s);
3354  } while (s-- > 0);
3355 
3356  /*
3357  * As clear can only be called
3358  * from one thread, and there can be an outer
3359  * transaction we must make sure that mask and size
3360  * changes are transactional
3361  */
3362  transaction::snapshot((size_t *)&this->my_mask);
3363  transaction::snapshot((size_t *)&this->my_size);
3364 
3365  mask().store(embedded_buckets - 1, std::memory_order_relaxed);
3366  this->my_size = 0;
3367 
3369  }
3370 }
3371 
3372 template <typename Key, typename T, typename Hash, typename KeyEqual,
3373  typename MutexType, typename ScopedLockType>
3374 void
3375 concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3376  ScopedLockType>::clear_segment(segment_index_t s)
3377 {
3378  segment_facade_t segment(this->my_table, s);
3379 
3380  assert(segment.is_valid());
3381 
3382  size_type sz = segment.size();
3383  for (segment_index_t i = 0; i < sz; ++i) {
3384  for (node_ptr_t n = segment[i].node_list; n;
3385  n = segment[i].node_list) {
3386  segment[i].node_list = n(this->my_pool_uuid)->next;
3387  delete_node(n);
3388  }
3389  }
3390 
3391  if (s >= segment_traits_t::embedded_segments)
3392  segment.disable();
3393 }
3394 
3395 template <typename Key, typename T, typename Hash, typename KeyEqual,
3396  typename MutexType, typename ScopedLockType>
3397 void
3400 {
3401  auto pop = get_pool_base();
3402 
3403  reserve(source.size());
3404  internal_copy(source.begin(), source.end());
3405 }
3406 
3407 template <typename Key, typename T, typename Hash, typename KeyEqual,
3408  typename MutexType, typename ScopedLockType>
3409 template <typename I>
3410 void
3411 concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3412  ScopedLockType>::internal_copy(I first, I last)
3413 {
3414  hashcode_type m = mask();
3415 
3416  for (; first != last; ++first) {
3417  hashcode_type h = hasher{}(first->first);
3418  bucket *b = get_bucket(h & m);
3419 
3420  assert(b->is_rehashed(std::memory_order_relaxed));
3421 
3422  detail::persistent_pool_ptr<node> p;
3423  insert_new_node(b, p, *first);
3424  }
3425 }
3426 
3427 template <typename Key, typename T, typename Hash, typename KeyEqual,
3428  typename MutexType, typename ScopedLockType>
3429 inline bool
3430 operator==(const concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3431  ScopedLockType> &a,
3432  const concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3433  ScopedLockType> &b)
3434 {
3435  if (a.size() != b.size())
3436  return false;
3437 
3438  typename concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3439  ScopedLockType>::const_iterator
3440  i(a.begin()),
3441  i_end(a.end());
3442 
3443  typename concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3444  ScopedLockType>::const_iterator j,
3445  j_end(b.end());
3446 
3447  for (; i != i_end; ++i) {
3448  j = b.equal_range(i->first).first;
3449 
3450  if (j == j_end || !(i->second == j->second))
3451  return false;
3452  }
3453 
3454  return true;
3455 }
3456 
3457 template <typename Key, typename T, typename Hash, typename KeyEqual,
3458  typename MutexType, typename ScopedLockType>
3459 inline bool
3460 operator!=(const concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3461  ScopedLockType> &a,
3462  const concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3463  ScopedLockType> &b)
3464 {
3465  return !(a == b);
3466 }
3467 
3468 template <typename Key, typename T, typename Hash, typename KeyEqual,
3469  typename MutexType, typename ScopedLockType>
3470 inline void
3471 swap(concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType, ScopedLockType> &a,
3472  concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType, ScopedLockType> &b)
3473 {
3474  a.swap(b);
3475 }
3476 
3477 } /* namespace obj */
3478 } /* namespace pmem */
3479 
3480 #endif /* PMEMOBJ_CONCURRENT_HASH_MAP_HPP */
pmem::obj::concurrent_hash_map::clear
void clear()
Clear hash map content Not thread safe.
Definition: concurrent_hash_map.hpp:3321
pmem::obj::concurrent_hash_map::insert
bool insert(value_type &&value)
Insert item by copying if there is no such key present already.
Definition: concurrent_hash_map.hpp:2679
pmem::obj::transaction::commit
static void commit()
Manually commit a transaction.
Definition: transaction.hpp:369
pmem::obj::operator+
persistent_ptr< T > operator+(persistent_ptr< T > const &lhs, std::ptrdiff_t s)
Addition operator for persistent pointers.
Definition: persistent_ptr.hpp:865
pmem::obj::mutex
Persistent memory resident mutex implementation.
Definition: mutex.hpp:60
pmem::obj::concurrent_hash_map::concurrent_hash_map
concurrent_hash_map(concurrent_hash_map &&table)
Move constructor.
Definition: concurrent_hash_map.hpp:2144
pmem::obj::concurrent_hash_map::erase
bool erase(const Key &key)
Remove element with corresponding key.
Definition: concurrent_hash_map.hpp:2815
pmem::obj::concurrent_hash_map::bucket_accessor
Bucket accessor is to find, rehash, acquire a lock, and access a bucket.
Definition: concurrent_hash_map.hpp:1749
pmem::obj::concurrent_hash_map::const_accessor::release
void release()
Release accessor.
Definition: concurrent_hash_map.hpp:2033
pmem::obj::concurrent_hash_map::bucket_accessor::is_writer
bool is_writer() const
Check whether bucket is locked for write.
Definition: concurrent_hash_map.hpp:1800
pmem::obj::concurrent_hash_map::count
size_type count(const Key &key) const
Definition: concurrent_hash_map.hpp:2432
pmem::obj::concurrent_hash_map::end
iterator end()
Definition: concurrent_hash_map.hpp:2356
pmem::obj::p::get_ro
const T & get_ro() const noexcept
Retrieves read-only const reference of the object.
Definition: p.hpp:157
pmem::obj::concurrent_hash_map::insert
bool insert(accessor &result, value_type &&value)
Insert item by copying if there is no such key present already and acquire a write lock on the item.
Definition: concurrent_hash_map.hpp:2662
pmem
Persistent memory namespace.
Definition: allocation_flag.hpp:43
pmem::obj::concurrent_hash_map::~concurrent_hash_map
~concurrent_hash_map()
Clear table and destroy it.
Definition: concurrent_hash_map.hpp:2330
pmem::obj::concurrent_hash_map::operator=
concurrent_hash_map & operator=(const concurrent_hash_map &table)
Assignment Not thread safe.
Definition: concurrent_hash_map.hpp:2253
pmem::obj::begin
pmem::obj::array< T, N >::iterator begin(pmem::obj::array< T, N > &a)
Non-member begin.
Definition: array.hpp:833
pmem::obj::concurrent_hash_map::const_accessor::operator*
const_reference operator*() const
Definition: concurrent_hash_map.hpp:2046
pmem::obj::concurrent_hash_map::accessor
Allows write access to elements and combines data access, locking, and garbage collection.
Definition: concurrent_hash_map.hpp:2090
common.hpp
pmem::obj::concurrent_hash_map::insert
bool insert(const_accessor &result, const value_type &value)
Insert item by copying if there is no such key present already and acquire a read lock on the item.
Definition: concurrent_hash_map.hpp:2596
template_helpers.hpp
pmem::obj::concurrent_hash_map::insert_or_assign
bool insert_or_assign(key_type &&key, M &&obj)
Inserts item if there is no such key present already, assigns provided value otherwise.
Definition: concurrent_hash_map.hpp:2753
pmem::obj::concurrent_hash_map::runtime_initialize
void runtime_initialize()
Initialize persistent concurrent hash map after process restart.
Definition: concurrent_hash_map.hpp:2185
pmem::obj::transaction::snapshot
static void snapshot(const T *addr, size_t num=1)
Takes a “snapshot” of given elements of type T number (1 by default), located at the given address pt...
Definition: transaction.hpp:513
pmem::obj::concurrent_hash_map::serial_bucket_accessor::operator->
bucket * operator->() const
Overloaded arrow operator.
Definition: concurrent_hash_map.hpp:1883
pmem::obj::concurrent_hash_map::insert
bool insert(const_accessor &result, value_type &&value)
Insert item by copying if there is no such key present already and acquire a read lock on the item.
Definition: concurrent_hash_map.hpp:2644
pmem::obj::operator++
p< T > & operator++(p< T > &pp)
Prefix increment operator overload.
Definition: pext.hpp:77
pmem::obj::concurrent_hash_map::find
bool find(accessor &result, const K &key)
Find item and acquire a write lock on the item.
Definition: concurrent_hash_map.hpp:2546
pmem::obj::operator==
bool operator==(standard_alloc_policy< T > const &, standard_alloc_policy< T2 > const &)
Determines if memory from another allocator can be deallocated from this one.
Definition: allocator.hpp:402
pmem::obj::concurrent_hash_map::serial_bucket_accessor::get
bucket * get() const
Get bucket pointer.
Definition: concurrent_hash_map.hpp:1874
pmem::obj::p
Resides on pmem class.
Definition: p.hpp:64
pmem::obj::defrag
Defrag class.
Definition: defrag.hpp:112
pmem::obj::concurrent_hash_map::mutex_vector::push_and_try_lock
bucket * push_and_try_lock(concurrent_hash_map *base, hashcode_type h)
Save pointer to the lock in the vector and lock it.
Definition: concurrent_hash_map.hpp:2930
pmem::obj::concurrent_hash_map::const_accessor::~const_accessor
~const_accessor()
Destroy result after releasing the underlying reference.
Definition: concurrent_hash_map.hpp:2074
pmem::obj::concurrent_hash_map::bucket_count
size_type bucket_count() const
Definition: concurrent_hash_map.hpp:2412
defrag.hpp
pmem::obj::concurrent_hash_map::size
size_type size() const
Definition: concurrent_hash_map.hpp:2385
make_persistent.hpp
pmem::obj::concurrent_hash_map::concurrent_hash_map
concurrent_hash_map(std::initializer_list< value_type > il)
Construct table with initializer list.
Definition: concurrent_hash_map.hpp:2167
pmem::obj::concurrent_hash_map::mutex_vector
Vector of locks to be unlocked at the destruction time.
Definition: concurrent_hash_map.hpp:2924
pmem::obj::transaction::run
static void run(pool_base &pool, std::function< void()> tx, Locks &... locks)
Execute a closure-like transaction and lock locks.
Definition: transaction.hpp:422
pmem::obj::concurrent_hash_map::concurrent_hash_map
concurrent_hash_map(size_type n)
Construct empty table with n preallocated buckets.
Definition: concurrent_hash_map.hpp:2122
pmem::obj::concurrent_hash_map::count
size_type count(const K &key) const
This overload only participates in overload resolution if the qualified-id Hash::transparent_key_equa...
Definition: concurrent_hash_map.hpp:2457
pmem::obj::concurrent_hash_map::insert
bool insert(accessor &result, const Key &key)
Insert item (if not already present) and acquire a write lock on the item.
Definition: concurrent_hash_map.hpp:2579
pmem::obj::concurrent_hash_map::find
bool find(accessor &result, const Key &key)
Find item and acquire a write lock on the item.
Definition: concurrent_hash_map.hpp:2518
pmem::obj::operator-
persistent_ptr< T > operator-(persistent_ptr< T > const &lhs, std::ptrdiff_t s)
Subtraction operator for persistent pointers.
Definition: persistent_ptr.hpp:879
pmem::obj::swap
void swap(pmem::obj::array< T, N > &lhs, pmem::obj::array< T, N > &rhs)
Non-member swap function.
Definition: array.hpp:913
pmem::obj::operator+=
p< T > & operator+=(p< T > &lhs, const p< Y > &rhs)
Addition assignment operator overload.
Definition: pext.hpp:123
pmem::obj::concurrent_hash_map::insert
void insert(std::initializer_list< value_type > il)
Insert initializer list.
Definition: concurrent_hash_map.hpp:2708
pmem::obj::operator!=
bool operator!=(const allocator< T, P, Tr > &lhs, const OtherAllocator &rhs)
Determines if memory from another allocator can be deallocated from this one.
Definition: allocator.hpp:518
pmem::obj::defrag::run
pobj_defrag_result run()
Starts defragmentation with previously stored pointers.
Definition: defrag.hpp:217
pmem::obj::concurrent_hash_map::concurrent_hash_map
concurrent_hash_map(const concurrent_hash_map &table)
Copy constructor.
Definition: concurrent_hash_map.hpp:2132
pmem::obj::concurrent_hash_map::max_size
size_type max_size() const
Upper bound on size.
Definition: concurrent_hash_map.hpp:2403
pmem::obj::concurrent_hash_map::concurrent_hash_map
concurrent_hash_map()
Construct empty table.
Definition: concurrent_hash_map.hpp:2113
pmem::obj::operator-=
p< T > & operator-=(p< T > &lhs, const p< Y > &rhs)
Subtraction assignment operator overload.
Definition: pext.hpp:145
transaction.hpp
pmem::obj::concurrent_hash_map::begin
const_iterator begin() const
Definition: concurrent_hash_map.hpp:2366
pmem::obj::concurrent_hash_map::bucket_accessor::acquire
void acquire(concurrent_hash_map *base, const hashcode_type h, bool writer=false)
Find a bucket by masked hashcode, optionally rehash, and acquire the lock.
Definition: concurrent_hash_map.hpp:1774
pmem::obj::concurrent_hash_map::insert_or_assign
bool insert_or_assign(const key_type &key, M &&obj)
Inserts item if there is no such key present already, assigns provided value otherwise.
Definition: concurrent_hash_map.hpp:2725
pmem::obj::concurrent_hash_map::erase
bool erase(const K &key)
Remove element with corresponding key.
Definition: concurrent_hash_map.hpp:2903
pmem::obj::persistent_ptr< node >
shared_mutex.hpp
pmem::obj::concurrent_hash_map::insert_or_assign
bool insert_or_assign(K &&key, M &&obj)
Inserts item if there is no such key-comparable type present already, assigns provided value otherwis...
Definition: concurrent_hash_map.hpp:2787
pmem::obj::concurrent_hash_map::insert
bool insert(accessor &result, const value_type &value)
Insert item by copying if there is no such key present already and acquire a write lock on the item.
Definition: concurrent_hash_map.hpp:2613
p.hpp
pmem::obj::concurrent_hash_map::const_accessor::operator->
const_pointer operator->() const
Definition: concurrent_hash_map.hpp:2056
pmem::obj::concurrent_hash_map::operator=
concurrent_hash_map & operator=(std::initializer_list< value_type > il)
Assignment Not thread safe.
Definition: concurrent_hash_map.hpp:2275
pmem::obj::concurrent_hash_map::accessor::operator->
pointer operator->() const
Return pointer to associated value in hash table.
Definition: concurrent_hash_map.hpp:2104
pmem::obj::get
T & get(pmem::obj::array< T, N > &a)
Non-member get function.
Definition: array.hpp:923
pmem::layout_error
Custom layout error class.
Definition: pexceptions.hpp:207
pmem::obj::concurrent_hash_map::bucket_accessor::get
bucket * get() const
Get bucket pointer.
Definition: concurrent_hash_map.hpp:1810
pmem::obj::concurrent_hash_map::const_accessor::value_type
const typename concurrent_hash_map::value_type value_type
Type of value.
Definition: concurrent_hash_map.hpp:2014
pmem::obj::concurrent_hash_map::serial_bucket_accessor::is_writer
bool is_writer() const
This method is added for consistency with bucket_accessor class.
Definition: concurrent_hash_map.hpp:1864
pmem::transaction_scope_error
Custom transaction error class.
Definition: pexceptions.hpp:187
pmem::obj::concurrent_hash_map::defragment
pobj_defrag_result defragment(double start_percent=0, double amount_percent=100)
Defragment the given (by 'start_percent' and 'amount_percent') part of buckets of the hash map.
Definition: concurrent_hash_map.hpp:2841
pmem::obj::concurrent_hash_map::internal_copy
void internal_copy(const concurrent_hash_map &source)
Copy "source" to *this, where *this must start out empty.
Definition: concurrent_hash_map.hpp:3399
pmem::obj::concurrent_hash_map::empty
bool empty() const
Definition: concurrent_hash_map.hpp:2394
pmem::obj::defrag::add
std::enable_if< is_defragmentable< T >), void >::type add(T &t)
Stores address of the referenced object to the defragmentation queue.
Definition: defrag.hpp:141
pmem::obj::concurrent_hash_map::find
bool find(const_accessor &result, const K &key) const
Find item and acquire a read lock on the item.
Definition: concurrent_hash_map.hpp:2501
pmem::obj::concurrent_hash_map::insert
bool insert(const value_type &value)
Insert item by copying if there is no such key present already.
Definition: concurrent_hash_map.hpp:2629
pmem::obj::concurrent_hash_map::concurrent_hash_map
concurrent_hash_map(I first, I last)
Construction table with copying iteration range.
Definition: concurrent_hash_map.hpp:2155
pmem::obj::pool_base
The non-template pool base class.
Definition: pool.hpp:75
atomic_backoff.hpp
pmem::obj::concurrent_hash_map::insert
bool insert(const_accessor &result, const Key &key)
Insert item (if not already present) and acquire a read lock on the item.
Definition: concurrent_hash_map.hpp:2562
pmem::obj::concurrent_hash_map::const_accessor
Combines data access, locking, and garbage collection.
Definition: concurrent_hash_map.hpp:2001
pmem::obj::concurrent_hash_map::begin
iterator begin()
Definition: concurrent_hash_map.hpp:2346
persistent_ptr.hpp
pmem::obj::concurrent_hash_map::bucket_accessor::operator->
bucket * operator->() const
Overloaded arrow operator.
Definition: concurrent_hash_map.hpp:1819
pmem::obj::concurrent_hash_map::accessor::operator*
reference operator*() const
Return reference to associated value in hash table.
Definition: concurrent_hash_map.hpp:2096
pmem::obj::concurrent_hash_map::swap
void swap(concurrent_hash_map &table)
Swap two instances.
Definition: concurrent_hash_map.hpp:3282
pmem::obj::concurrent_hash_map::insert
void insert(I first, I last)
Insert range [first, last)
Definition: concurrent_hash_map.hpp:2694
pmem::obj::transaction::manual
C++ manual scope transaction class.
Definition: transaction.hpp:100
pmem::obj::operator--
p< T > & operator--(p< T > &pp)
Prefix decrement operator overload.
Definition: pext.hpp:88
pmem::obj::concurrent_hash_map::const_accessor::const_accessor
const_accessor()
Create empty result.
Definition: concurrent_hash_map.hpp:2066
pmem::obj::concurrent_hash_map::find
bool find(const_accessor &result, const Key &key) const
Find item and acquire a read lock on the item.
Definition: concurrent_hash_map.hpp:2472
pmem::obj::concurrent_hash_map
Persistent memory aware implementation of Intel TBB concurrent_hash_map.
Definition: concurrent_hash_map.hpp:261
enumerable_thread_specific.hpp
pmem::obj::concurrent_hash_map::rehash
void rehash(size_type n=0)
Rehashes and optionally resizes the whole table.
Definition: concurrent_hash_map.hpp:3291
mutex.hpp
pmem::obj::concurrent_hash_map::serial_bucket_accessor
Serial bucket accessor used to access bucket in a serial operations.
Definition: concurrent_hash_map.hpp:1828
pmem::obj::concurrent_hash_map::const_accessor::empty
bool empty() const
Definition: concurrent_hash_map.hpp:2021
pmem::obj::concurrent_hash_map::end
const_iterator end() const
Definition: concurrent_hash_map.hpp:2376
pmem::obj::concurrent_hash_map::defrag_save_nodes
void defrag_save_nodes(bucket *b, pmem::obj::defrag &defrag)
Internal method used by defragment().
Definition: concurrent_hash_map.hpp:3011