PMDK C++ bindings  1.8.1
This is the C++ bindings documentation for PMDK's libpmemobj.
concurrent_hash_map.hpp
1 /*
2  * Copyright 2019-2020, Intel Corporation
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  *
8  * * Redistributions of source code must retain the above copyright
9  * notice, this list of conditions and the following disclaimer.
10  *
11  * * Redistributions in binary form must reproduce the above copyright
12  * notice, this list of conditions and the following disclaimer in
13  * the documentation and/or other materials provided with the
14  * distribution.
15  *
16  * * Neither the name of the copyright holder nor the names of its
17  * contributors may be used to endorse or promote products derived
18  * from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
38 #ifndef PMEMOBJ_CONCURRENT_HASH_MAP_HPP
39 #define PMEMOBJ_CONCURRENT_HASH_MAP_HPP
40 
41 #include <libpmemobj++/detail/atomic_backoff.hpp>
44 
46 #include <libpmemobj++/mutex.hpp>
47 #include <libpmemobj++/p.hpp>
50 
51 #include <libpmemobj++/detail/persistent_pool_ptr.hpp>
53 
54 #include <atomic>
55 #include <cassert>
56 #include <functional>
57 #include <initializer_list>
58 #include <iterator> // for std::distance
59 #include <memory>
60 #include <mutex>
61 #include <thread>
62 #include <type_traits>
63 #include <utility>
64 
65 namespace std
66 {
70 template <typename T>
71 struct hash<pmem::obj::p<T>> {
72  size_t
73  operator()(const pmem::obj::p<T> &x) const
74  {
75  return hash<T>()(x.get_ro());
76  }
77 };
78 } /* namespace std */
79 
80 namespace pmem
81 {
82 namespace obj
83 {
84 
85 namespace concurrent_hash_map_internal
86 {
87 template <typename SharedMutexT>
88 class shared_mutex_scoped_lock {
89  using rw_mutex_type = SharedMutexT;
90 
91 public:
92  shared_mutex_scoped_lock(const shared_mutex_scoped_lock &) = delete;
93  shared_mutex_scoped_lock &
94  operator=(const shared_mutex_scoped_lock &) = delete;
95 
97  shared_mutex_scoped_lock() : mutex(nullptr), is_writer(false)
98  {
99  }
100 
102  shared_mutex_scoped_lock(rw_mutex_type &m, bool write = true)
103  : mutex(nullptr)
104  {
105  acquire(m, write);
106  }
107 
109  ~shared_mutex_scoped_lock()
110  {
111  if (mutex)
112  release();
113  }
114 
116  void
117  acquire(rw_mutex_type &m, bool write = true)
118  {
119  is_writer = write;
120  mutex = &m;
121  if (write)
122  mutex->lock();
123  else
124  mutex->lock_shared();
125  }
126 
130  void
131  release()
132  {
133  assert(mutex);
134  rw_mutex_type *m = mutex;
135  mutex = nullptr;
136  if (is_writer) {
137  m->unlock();
138  } else {
139  m->unlock_shared();
140  }
141  }
142 
147  bool
148  try_acquire(rw_mutex_type &m, bool write = true)
149  {
150  assert(!mutex);
151  bool result;
152  is_writer = write;
153  result = write ? m.try_lock() : m.try_lock_shared();
154  if (result)
155  mutex = &m;
156  return result;
157  }
158 
159 protected:
164  rw_mutex_type *mutex;
169  bool is_writer;
170 }; /* class shared_mutex_scoped_lock */
171 
172 template <typename ScopedLockType>
173 using scoped_lock_upgrade_to_writer =
174  decltype(std::declval<ScopedLockType>().upgrade_to_writer());
175 
176 template <typename ScopedLockType>
177 using scoped_lock_has_upgrade_to_writer =
178  detail::supports<ScopedLockType, scoped_lock_upgrade_to_writer>;
179 
180 template <typename ScopedLockType>
181 using scoped_lock_downgrade_to_reader =
182  decltype(std::declval<ScopedLockType>().downgrade_to_reader());
183 
184 template <typename ScopedLockType>
185 using scoped_lock_has_downgrade_to_reader =
186  detail::supports<ScopedLockType, scoped_lock_downgrade_to_reader>;
187 
188 template <typename ScopedLockType,
189  bool = scoped_lock_has_upgrade_to_writer<ScopedLockType>::value
190  &&scoped_lock_has_downgrade_to_reader<ScopedLockType>::value>
191 class scoped_lock_traits {
192 public:
193  using scope_lock_type = ScopedLockType;
194 
195  static bool
196  initial_rw_state(bool write)
197  {
198  /* For upgradeable locks, initial state is always read */
199  return false;
200  }
201 
202  static bool
203  upgrade_to_writer(scope_lock_type &lock)
204  {
205  return lock.upgrade_to_writer();
206  }
207 
208  static bool
209  downgrade_to_reader(scope_lock_type &lock)
210  {
211  return lock.downgrade_to_reader();
212  }
213 };
214 
215 template <typename ScopedLockType>
216 class scoped_lock_traits<ScopedLockType, false> {
217 public:
218  using scope_lock_type = ScopedLockType;
219 
220  static bool
221  initial_rw_state(bool write)
222  {
223  /* For non-upgradeable locks, we take lock in required mode
224  * immediately */
225  return write;
226  }
227 
228  static bool
229  upgrade_to_writer(scope_lock_type &lock)
230  {
231  /* This overload is for locks which do not support upgrade
232  * operation. For those locks, upgrade_to_writer should not be
233  * called when holding a read lock */
234  return true;
235  }
236 
237  static bool
238  downgrade_to_reader(scope_lock_type &lock)
239  {
240  /* This overload is for locks which do not support downgrade
241  * operation. For those locks, downgrade_to_reader should never
242  * be called */
243  assert(false);
244 
245  return false;
246  }
247 };
248 }
249 
250 template <typename Key, typename T, typename Hash = std::hash<Key>,
251  typename KeyEqual = std::equal_to<Key>,
252  typename MutexType = pmem::obj::shared_mutex,
253  typename ScopedLockType = concurrent_hash_map_internal::
254  shared_mutex_scoped_lock<MutexType>>
255 class concurrent_hash_map;
256 
258 namespace concurrent_hash_map_internal
259 {
260 /* Helper method which throws an exception when called in a tx */
261 static inline void
262 check_outside_tx()
263 {
264  if (pmemobj_tx_stage() != TX_STAGE_NONE)
266  "Function called inside transaction scope.");
267 }
268 
269 template <typename Hash>
270 using transparent_key_equal = typename Hash::transparent_key_equal;
271 
272 template <typename Hash>
273 using has_transparent_key_equal = detail::supports<Hash, transparent_key_equal>;
274 
275 template <typename Hash, typename Pred,
276  bool = has_transparent_key_equal<Hash>::value>
277 struct key_equal_type {
278  using type = typename Hash::transparent_key_equal;
279 };
280 
281 template <typename Hash, typename Pred>
282 struct key_equal_type<Hash, Pred, false> {
283  using type = Pred;
284 };
285 
286 template <typename Mutex, typename ScopedLockType>
287 void
288 assert_not_locked(Mutex &mtx)
289 {
290 #ifndef NDEBUG
291  ScopedLockType scoped_lock;
292  assert(scoped_lock.try_acquire(mtx));
293  scoped_lock.release();
294 #else
295  (void)mtx;
296 #endif
297 }
298 
299 template <typename Key, typename T, typename MutexType, typename ScopedLockType>
300 struct hash_map_node {
302  using mutex_t = MutexType;
303 
305  using scoped_t = ScopedLockType;
306 
307  using value_type = std::pair<const Key, T>;
308 
310  using node_ptr_t = detail::persistent_pool_ptr<
311  hash_map_node<Key, T, mutex_t, scoped_t>>;
312 
314  node_ptr_t next;
315 
317  mutex_t mutex;
318 
320  value_type item;
321 
322  hash_map_node() : next(OID_NULL)
323  {
324  }
325 
326  hash_map_node(const node_ptr_t &_next) : next(_next)
327  {
328  }
329 
330  hash_map_node(node_ptr_t &&_next) : next(std::move(_next))
331  {
332  }
333 
334  hash_map_node(const node_ptr_t &_next, const Key &key)
335  : next(_next), item(key, T())
336  {
337  }
338 
339  hash_map_node(const node_ptr_t &_next, const Key &key, const T &t)
340  : next(_next), item(key, t)
341  {
342  }
343 
344  hash_map_node(const node_ptr_t &_next, value_type &&i)
345  : next(_next), item(std::move(i))
346  {
347  }
348 
349  template <typename... Args>
350  hash_map_node(node_ptr_t &&_next, Args &&... args)
351  : next(std::forward<node_ptr_t>(_next)),
352  item(std::forward<Args>(args)...)
353  {
354  }
355 
356  hash_map_node(const node_ptr_t &_next, const value_type &i)
357  : next(_next), item(i)
358  {
359  }
360 
362  hash_map_node(const hash_map_node &) = delete;
363 
365  hash_map_node &operator=(const hash_map_node &) = delete;
366 }; /* struct node */
367 
372 template <typename Bucket>
373 class segment_traits {
374 public:
376  using segment_index_t = size_t;
377  using size_type = size_t;
378  using bucket_type = Bucket;
379 
380 protected:
382  constexpr static size_type max_allocation_size = PMEMOBJ_MAX_ALLOC_SIZE;
383 
385  constexpr static segment_index_t first_big_block = 27;
386  /* TODO: avoid hardcoded value; need constexpr similar to:
387  * Log2(max_allocation_size / sizeof(bucket_type)) */
388 
390  constexpr static size_type big_block_size = size_type(1)
391  << first_big_block;
392 
393  /* Block size in bytes cannot exceed max_allocation_size */
394  static_assert((big_block_size * sizeof(bucket_type)) <
395  max_allocation_size,
396  "Block size exceeds max_allocation_size");
397 
399  constexpr static segment_index_t
400  first_block_in_segment(segment_index_t seg)
401  {
402  return seg < first_big_block
403  ? seg
404  : (first_big_block +
405  (segment_index_t(1) << (seg - first_big_block)) - 1);
406  }
407 
409  constexpr static size_type
410  blocks_in_segment(segment_index_t seg)
411  {
412  return seg < first_big_block
413  ? segment_index_t(1)
414  : segment_index_t(1) << (seg - first_big_block);
415  }
416 
418  constexpr static size_type
419  block_size(segment_index_t b)
420  {
421  return b < first_big_block ? segment_size(b ? b : 1)
422  : big_block_size;
423  }
424 
425 public:
427  constexpr static segment_index_t embedded_segments = 1;
428 
430  constexpr static size_type embedded_buckets = 1 << embedded_segments;
431 
433  constexpr static segment_index_t number_of_segments = 32;
434 
436  static const size_type first_block = 8;
437 
439  constexpr static segment_index_t
440  number_of_blocks()
441  {
442  return first_block_in_segment(number_of_segments);
443  }
444 
446  static segment_index_t
447  segment_index_of(size_type index)
448  {
449  return segment_index_t(detail::Log2(index | 1));
450  }
451 
453  constexpr static segment_index_t
454  segment_base(segment_index_t k)
455  {
456  return (segment_index_t(1) << k) & ~segment_index_t(1);
457  }
458 
460  constexpr static size_type
461  segment_size(segment_index_t k)
462  {
463  return size_type(1) << k; // fake value for k == 0
464  }
465  static_assert(
466  embedded_segments < first_big_block,
467  "Number of embedded segments cannot exceed max_allocation_size");
468 }; /* End of class segment_traits */
469 
486 template <typename BlockTable, typename SegmentTraits, bool is_const>
487 class segment_facade_impl : public SegmentTraits {
488 private:
489  using traits_type = SegmentTraits;
490  using traits_type::block_size;
491  using traits_type::blocks_in_segment;
492  using traits_type::embedded_buckets;
493  using traits_type::embedded_segments;
494  using traits_type::first_block;
495  using traits_type::first_block_in_segment;
496  using traits_type::segment_base;
497  using traits_type::segment_size;
498 
499 public:
500  using table_reference =
501  typename std::conditional<is_const, const BlockTable &,
502  BlockTable &>::type;
503 
504  using table_pointer =
505  typename std::conditional<is_const, const BlockTable *,
506  BlockTable *>::type;
507 
508  using bucket_type = typename traits_type::bucket_type;
509  using segment_index_t = typename traits_type::segment_index_t;
510  using size_type = typename traits_type::size_type;
511 
513  segment_facade_impl(table_reference table, segment_index_t s)
514  : my_table(&table), my_seg(s)
515  {
516  assert(my_seg < traits_type::number_of_segments);
517  }
518 
520  segment_facade_impl(const segment_facade_impl &src)
521  : my_table(src.my_table), my_seg(src.my_seg)
522  {
523  }
524 
525  segment_facade_impl(segment_facade_impl &&src) = default;
526 
528  segment_facade_impl &
529  operator=(const segment_facade_impl &src)
530  {
531  my_table = src.my_table;
532  my_seg = src.my_seg;
533  return *this;
534  }
535 
537  segment_facade_impl &
538  operator=(segment_facade_impl &&src)
539  {
540  my_table = src.my_table;
541  my_seg = src.my_seg;
542  return *this;
543  }
544 
551  bucket_type &operator[](size_type i) const
552  {
553  assert(i < size());
554 
555  segment_index_t table_block = first_block_in_segment(my_seg);
556  size_type b_size = block_size(table_block);
557 
558  table_block += i / b_size;
559  i = i % b_size;
560 
561  return (*my_table)[table_block][static_cast<std::ptrdiff_t>(i)];
562  }
563 
567  segment_facade_impl &
568  operator++()
569  {
570  ++my_seg;
571  return *this;
572  }
573 
577  segment_facade_impl
578  operator++(int)
579  {
580  segment_facade_impl tmp = *this;
581  ++(*this);
582  return tmp;
583  }
584 
588  segment_facade_impl &
589  operator--()
590  {
591  --my_seg;
592  return *this;
593  }
594 
598  segment_facade_impl
599  operator--(int)
600  {
601  segment_facade_impl tmp = *this;
602  --(*this);
603  return tmp;
604  }
605 
609  segment_facade_impl &
610  operator+=(segment_index_t off)
611  {
612  my_seg += off;
613  return *this;
614  }
615 
619  segment_facade_impl &
620  operator-=(segment_index_t off)
621  {
622  my_seg -= off;
623  return *this;
624  }
625 
629  segment_facade_impl
630  operator+(segment_index_t off) const
631  {
632  return segment_facade_impl(*(this->my_table),
633  this->my_seg + off);
634  }
635 
639  segment_facade_impl
640  operator-(segment_index_t off) const
641  {
642  return segment_facade_impl(*(this->my_table),
643  this->my_seg - off);
644  }
645 
649  void
650  enable(pool_base &pop)
651  {
652  assert(my_seg >= embedded_segments);
653 
654  if (my_seg < first_block) {
655  enable_first_block(pop);
656  } else {
657  enable_big_segment(pop);
658  }
659  }
660 
664  void
665  disable()
666  {
667  assert(my_seg >= embedded_segments);
668 
669  if (my_seg < first_block) {
670  if (my_seg == embedded_segments) {
671  size_type sz = segment_size(first_block) -
672  embedded_buckets;
673  delete_persistent<bucket_type[]>(
674  (*my_table)[my_seg], sz);
675  }
676  (*my_table)[my_seg] = nullptr;
677  } else {
678  block_range blocks = segment_blocks(my_seg);
679 
680  for (segment_index_t b = blocks.first;
681  b < blocks.second; ++b) {
682  if ((*my_table)[b] != nullptr) {
683  delete_persistent<bucket_type[]>(
684  (*my_table)[b], block_size(b));
685  (*my_table)[b] = nullptr;
686  }
687  }
688  }
689  }
690 
694  constexpr size_type
695  size() const
696  {
697  return segment_size(my_seg ? my_seg : 1);
698  }
699 
705  bool
706  is_valid() const
707  {
708  block_range blocks = segment_blocks(my_seg);
709 
710  for (segment_index_t b = blocks.first; b < blocks.second; ++b) {
711  if ((*my_table)[b] == nullptr)
712  return false;
713  }
714 
715  return true;
716  }
717 
718 private:
719  using block_range = std::pair<segment_index_t, segment_index_t>;
720 
724  static block_range
725  segment_blocks(segment_index_t seg)
726  {
727  segment_index_t begin = first_block_in_segment(seg);
728 
729  return block_range(begin, begin + blocks_in_segment(seg));
730  }
731 
732  void
733  enable_first_block(pool_base &pop)
734  {
735  assert(my_seg == embedded_segments);
736  {
737  transaction::manual tx(pop);
738 
739  size_type sz =
740  segment_size(first_block) - embedded_buckets;
741  (*my_table)[my_seg] =
742  make_persistent<bucket_type[]>(sz);
743 
744  persistent_ptr<bucket_type> base =
745  (*my_table)[embedded_segments].raw();
746 
747  for (segment_index_t s = my_seg + 1; s < first_block;
748  ++s) {
749  std::ptrdiff_t off =
750  static_cast<std::ptrdiff_t>(
751  segment_base(s) -
752  segment_base(my_seg));
753 
754  (*my_table)[s] = (base + off).raw();
755  }
756 
758  }
759  }
760 
761  void
762  enable_big_segment(pool_base &pop)
763  {
764  block_range blocks = segment_blocks(my_seg);
765  {
766  transaction::manual tx(pop);
767 
768  for (segment_index_t b = blocks.first;
769  b < blocks.second; ++b) {
770  assert((*my_table)[b] == nullptr);
771  (*my_table)[b] = make_persistent<bucket_type[]>(
772  block_size(b));
773  }
774 
776  }
777  }
778 
780  table_pointer my_table;
781 
783  segment_index_t my_seg;
784 }; /* End of class segment_facade_impl */
785 
792 template <typename Key, typename T, typename MutexType, typename ScopedLockType>
793 class hash_map_base {
794 public:
795  using mutex_t = MutexType;
796  using scoped_t = ScopedLockType;
797 
799  using size_type = size_t;
800 
802  using hashcode_type = size_t;
803 
805  using node = hash_map_node<Key, T, mutex_t, scoped_t>;
806 
808  using node_ptr_t = detail::persistent_pool_ptr<node>;
809 
811  struct bucket {
812  using mutex_t = MutexType;
813  using scoped_t = ScopedLockType;
814 
816  mutex_t mutex;
817 
819  p<std::atomic<uint64_t>> rehashed;
820 
822  node_ptr_t node_list;
823 
825  bucket() : node_list(nullptr)
826  {
827 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
828  VALGRIND_HG_DISABLE_CHECKING(&rehashed,
829  sizeof(rehashed));
830 #endif
831  rehashed.get_rw() = false;
832  }
833 
839  bool
840  is_rehashed(std::memory_order order)
841  {
842  return rehashed.get_ro().load(order);
843  }
844 
845  void
846  set_rehashed(std::memory_order order)
847  {
848  rehashed.get_rw().store(true, order);
849  }
850 
852  bucket(const bucket &) = delete;
853 
855  bucket &operator=(const bucket &) = delete;
856  }; /* End of struct bucket */
857 
859  using segment_traits_t = segment_traits<bucket>;
860 
862  using segment_index_t = typename segment_traits_t::segment_index_t;
863 
865  static const size_type embedded_buckets =
866  segment_traits_t::embedded_buckets;
867 
869  static const size_type first_block = segment_traits_t::first_block;
870 
872  constexpr static size_type block_table_size =
873  segment_traits_t::number_of_blocks();
874 
876  using segment_ptr_t = persistent_ptr<bucket[]>;
877 
879  using bucket_ptr_t = persistent_ptr<bucket>;
880 
882  using blocks_table_t = segment_ptr_t[block_table_size];
883 
885  using segment_enable_mutex_t = pmem::obj::mutex;
886 
888  struct features {
889  uint32_t compat;
890  uint32_t incompat;
891  };
892 
893  /* --------------------------------------------------------- */
894 
896  p<uint64_t> my_pool_uuid;
897 
900  features layout_features;
901 
904  std::aligned_storage<sizeof(size_t), sizeof(size_t)>::type
905  my_mask_reserved;
906 
908  /* my_mask always restored on restart. */
909  p<std::atomic<hashcode_type>> my_mask;
910 
912  std::aligned_storage<32, 8>::type padding1;
913 
918  blocks_table_t my_table;
919 
920  /* It must be in separate cache line from my_mask due to performance
921  * effects */
923  p<std::atomic<size_type>> my_size;
924 
926  std::aligned_storage<24, 8>::type padding2;
927 
929  std::aligned_storage<64, 8>::type reserved;
930 
932  segment_enable_mutex_t my_segment_enable_mutex;
933 
935  bucket my_embedded_segment[embedded_buckets];
936 
937  /* --------------------------------------------------------- */
938 
940  static constexpr features
941  header_features()
942  {
943  return {0, 0};
944  }
945 
946  const std::atomic<hashcode_type> &
947  mask() const noexcept
948  {
949  return my_mask.get_ro();
950  }
951 
952  std::atomic<hashcode_type> &
953  mask() noexcept
954  {
955  return my_mask.get_rw();
956  }
957 
959  using const_segment_facade_t =
960  segment_facade_impl<blocks_table_t, segment_traits_t, true>;
961 
963  using segment_facade_t =
964  segment_facade_impl<blocks_table_t, segment_traits_t, false>;
965 
967  hash_map_base()
968  {
969  static_assert(
970  sizeof(size_type) == sizeof(std::atomic<size_type>),
971  "std::atomic should have the same layout as underlying integral type");
972 
973 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
974  VALGRIND_HG_DISABLE_CHECKING(&my_size, sizeof(my_size));
975  VALGRIND_HG_DISABLE_CHECKING(&my_mask, sizeof(my_mask));
976 #endif
977  layout_features = header_features();
978 
979  my_size.get_rw() = 0;
980  PMEMoid oid = pmemobj_oid(this);
981 
982  assert(!OID_IS_NULL(oid));
983 
984  my_pool_uuid = oid.pool_uuid_lo;
985 
986  pool_base pop = get_pool_base();
987  /* enable embedded segments */
988  for (size_type i = 0; i < segment_traits_t::embedded_segments;
989  ++i) {
990  my_table[i] =
991  pmemobj_oid(my_embedded_segment +
992  segment_traits_t::segment_base(i));
993  segment_facade_t seg(my_table, i);
994  mark_rehashed<false>(pop, seg);
995  }
996  }
997 
1001  void
1002  calculate_mask()
1003  {
1004 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
1005  VALGRIND_HG_DISABLE_CHECKING(&my_size, sizeof(my_size));
1006  VALGRIND_HG_DISABLE_CHECKING(&my_mask, sizeof(my_mask));
1007 #endif
1008 #if LIBPMEMOBJ_CPP_VG_PMEMCHECK_ENABLED
1009  VALGRIND_PMC_REMOVE_PMEM_MAPPING(&my_mask, sizeof(my_mask));
1010 #endif
1011 
1012  hashcode_type m = embedded_buckets - 1;
1013 
1014  const_segment_facade_t segment(
1015  my_table, segment_traits_t::embedded_segments);
1016 
1017  while (segment.is_valid()) {
1018  m += segment.size();
1019  ++segment;
1020  }
1021 
1022  mask().store(m, std::memory_order_relaxed);
1023  }
1024 
1025  void
1026  restore_size(size_type actual_size)
1027  {
1028  my_size.get_rw().store(actual_size, std::memory_order_relaxed);
1029  pool_base pop = get_pool_base();
1030  pop.persist(my_size);
1031  }
1032 
1036  template <bool Flush = true>
1037  void
1038  mark_rehashed(pool_base &pop, segment_facade_t &segment)
1039  {
1040  for (size_type i = 0; i < segment.size(); ++i) {
1041  bucket *b = &(segment[i]);
1042 
1043  assert_not_locked<mutex_t, scoped_t>(b->mutex);
1044 
1045  b->set_rehashed(std::memory_order_relaxed);
1046  }
1047 
1048  if (Flush) {
1049  /* Flush in separate loop to avoid read-after-flush */
1050  for (size_type i = 0; i < segment.size(); ++i) {
1051  bucket *b = &(segment[i]);
1052  pop.flush(b->rehashed);
1053  }
1054 
1055  pop.drain();
1056  }
1057  }
1058 
1062  void
1063  enable_segment(segment_index_t k, bool is_initial = false)
1064  {
1065  assert(k);
1066 
1067  pool_base pop = get_pool_base();
1068  size_type sz;
1069 
1070  if (k >= first_block) {
1071  segment_facade_t new_segment(my_table, k);
1072 
1073  sz = new_segment.size();
1074  if (!new_segment.is_valid())
1075  new_segment.enable(pop);
1076 
1077  if (is_initial) {
1078  mark_rehashed(pop, new_segment);
1079  }
1080 
1081  /* double it to get entire capacity of the container */
1082  sz <<= 1;
1083  } else {
1084  /* the first block */
1085  assert(k == segment_traits_t::embedded_segments);
1086 
1087  for (segment_index_t i = k; i < first_block; ++i) {
1088  segment_facade_t new_segment(my_table, i);
1089 
1090  if (!new_segment.is_valid())
1091  new_segment.enable(pop);
1092 
1093  if (is_initial) {
1094  mark_rehashed(pop, new_segment);
1095  }
1096  }
1097 
1098  sz = segment_traits_t::segment_size(first_block);
1099  }
1100 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
1101  ANNOTATE_HAPPENS_BEFORE(&my_mask);
1102 #endif
1103  mask().store(sz - 1, std::memory_order_release);
1104  }
1105 
1110  bucket *
1111  get_bucket(hashcode_type h) const
1112  {
1113  segment_index_t s = segment_traits_t::segment_index_of(h);
1114 
1115  h -= segment_traits_t::segment_base(s);
1116 
1117  const_segment_facade_t segment(my_table, s);
1118 
1119  assert(segment.is_valid());
1120 
1121  return &(segment[h]);
1122  }
1123 
1127  inline bool
1128  check_mask_race(hashcode_type h, hashcode_type &m) const
1129  {
1130  hashcode_type m_now, m_old = m;
1131 
1132  m_now = mask().load(std::memory_order_acquire);
1133 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
1134  ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
1135 #endif
1136 
1137  if (m_old != m_now)
1138  return check_rehashing_collision(h, m_old, m = m_now);
1139 
1140  return false;
1141  }
1142 
1146  bool
1147  check_rehashing_collision(hashcode_type h, hashcode_type m_old,
1148  hashcode_type m) const
1149  {
1150  assert(m_old != m);
1151 
1152  if ((h & m_old) != (h & m)) {
1153  /* mask changed for this hashcode, rare event condition
1154  * above proves that 'h' has some other bits set beside
1155  * 'm_old', find next applicable mask after m_old */
1156 
1157  for (++m_old; !(h & m_old); m_old <<= 1)
1158  ;
1159 
1160  m_old = (m_old << 1) - 1; /* get full mask from a bit */
1161 
1162  assert((m_old & (m_old + 1)) == 0 && m_old <= m);
1163 
1164  /* check whether it is rehashing/ed */
1165  bucket *b = get_bucket(h & m_old);
1166  return b->is_rehashed(std::memory_order_acquire);
1167  }
1168 
1169  return false;
1170  }
1171 
1177  template <typename Node, typename... Args>
1178  size_type
1179  insert_new_node(bucket *b, detail::persistent_pool_ptr<Node> &new_node,
1180  Args &&... args)
1181  {
1182  pool_base pop = get_pool_base();
1183 
1184  pmem::obj::transaction::run(pop, [&] {
1185  new_node = pmem::obj::make_persistent<Node>(
1186  b->node_list, std::forward<Args>(args)...);
1187  b->node_list = new_node; /* bucket is locked */
1188  });
1189 
1190  /* prefix form is to enforce allocation after the first item
1191  * inserted */
1192  size_t sz = ++(my_size.get_rw());
1193  pop.persist(&my_size, sizeof(my_size));
1194 
1195  return sz;
1196  }
1197 
1202  bool
1203  check_growth(hashcode_type m, size_type sz)
1204  {
1205  if (sz >= m) {
1206  segment_index_t new_seg =
1207  static_cast<segment_index_t>(detail::Log2(
1208  m +
1209  1)); /* optimized segment_index_of */
1210 
1211  assert(segment_facade_t(my_table, new_seg - 1)
1212  .is_valid());
1213 
1214  std::unique_lock<segment_enable_mutex_t> lock(
1215  my_segment_enable_mutex, std::try_to_lock);
1216 
1217  if (lock) {
1218  if (mask().load(std::memory_order_relaxed) ==
1219  m) {
1220  /* Otherwise, other thread enable this
1221  * segment */
1222  enable_segment(new_seg);
1223 
1224  return true;
1225  }
1226  }
1227  }
1228 
1229  return false;
1230  }
1231 
1235  void
1236  reserve(size_type buckets)
1237  {
1238  if (buckets == 0)
1239  return;
1240 
1241  --buckets;
1242 
1243  bool is_initial = (my_size.get_ro() == 0);
1244 
1245  for (size_type m = mask(); buckets > m; m = mask())
1246  enable_segment(
1247  segment_traits_t::segment_index_of(m + 1),
1248  is_initial);
1249  }
1250 
1255  void
1256  internal_swap(hash_map_base<Key, T, mutex_t, scoped_t> &table)
1257  {
1258  pool_base p = get_pool_base();
1259  {
1260  transaction::manual tx(p);
1261 
1262  this->my_pool_uuid.swap(table.my_pool_uuid);
1263  /* Swap my_mask */
1264  this->mask() = table.mask().exchange(
1265  this->mask(), std::memory_order_relaxed);
1266 
1267  /* Swap my_size */
1268  this->my_size.get_rw() =
1269  table.my_size.get_rw().exchange(
1270  this->my_size.get_ro(),
1271  std::memory_order_relaxed);
1272 
1273  for (size_type i = 0; i < embedded_buckets; ++i)
1274  this->my_embedded_segment[i].node_list.swap(
1275  table.my_embedded_segment[i].node_list);
1276 
1277  for (size_type i = segment_traits_t::embedded_segments;
1278  i < block_table_size; ++i)
1279  this->my_table[i].swap(table.my_table[i]);
1280 
1282  }
1283  }
1284 
1289  pool_base
1290  get_pool_base()
1291  {
1292  PMEMobjpool *pop =
1293  pmemobj_pool_by_oid(PMEMoid{my_pool_uuid, 0});
1294 
1295  return pool_base(pop);
1296  }
1297 }; /* End of class hash_map_base */
1298 
1304 template <typename Container, bool is_const>
1305 class hash_map_iterator {
1306 public:
1307  using iterator_category = std::forward_iterator_tag;
1308  using difference_type = ptrdiff_t;
1309  using map_type = Container;
1310  using value_type = typename map_type::value_type;
1311  using node = typename map_type::node;
1312  using bucket = typename map_type::bucket;
1313  using map_ptr = typename std::conditional<is_const, const map_type *,
1314  map_type *>::type;
1315  using reference =
1316  typename std::conditional<is_const,
1317  typename map_type::const_reference,
1318  typename map_type::reference>::type;
1319  using pointer =
1320  typename std::conditional<is_const,
1321  typename map_type::const_pointer,
1322  typename map_type::pointer>::type;
1323 
1324  template <typename C, bool M, bool U>
1325  friend bool operator==(const hash_map_iterator<C, M> &i,
1326  const hash_map_iterator<C, U> &j);
1327 
1328  template <typename C, bool M, bool U>
1329  friend bool operator!=(const hash_map_iterator<C, M> &i,
1330  const hash_map_iterator<C, U> &j);
1331 
1332  friend class hash_map_iterator<map_type, true>;
1333 
1334 #if !defined(_MSC_VER) || defined(__INTEL_COMPILER)
1335 private:
1336  template <typename Key, typename T, typename Hash, typename KeyEqual,
1337  typename MutexType, typename ScopedLockType>
1338  friend class ::pmem::obj::concurrent_hash_map;
1339 #else
1340 public: /* workaround */
1341 #endif
1342  hash_map_iterator(map_ptr map, size_t index)
1343  : my_map(map), my_index(index), my_bucket(nullptr), my_node(nullptr)
1344  {
1345  if (my_index <= my_map->mask()) {
1346  bucket_accessor acc(my_map, my_index);
1347  my_bucket = acc.get();
1348  my_node = static_cast<node *>(
1349  my_bucket->node_list.get(my_map->my_pool_uuid));
1350 
1351  if (!my_node) {
1352  advance_to_next_bucket();
1353  }
1354  }
1355  }
1356 
1357 public:
1359  hash_map_iterator() = default;
1360 
1362  hash_map_iterator(const hash_map_iterator &other)
1363  : my_map(other.my_map),
1364  my_index(other.my_index),
1365  my_bucket(other.my_bucket),
1366  my_node(other.my_node)
1367  {
1368  }
1369 
1371  template <typename U = void,
1372  typename = typename std::enable_if<is_const, U>::type>
1373  hash_map_iterator(const hash_map_iterator<map_type, false> &other)
1374  : my_map(other.my_map),
1375  my_index(other.my_index),
1376  my_bucket(other.my_bucket),
1377  my_node(other.my_node)
1378  {
1379  }
1380 
1381  hash_map_iterator &operator=(const hash_map_iterator &it) = default;
1382 
1384  reference operator*() const
1385  {
1386  assert(my_node);
1387  return my_node->item;
1388  }
1389 
1391  pointer operator->() const
1392  {
1393  return &operator*();
1394  }
1395 
1397  hash_map_iterator &
1398  operator++()
1399  {
1400  my_node = static_cast<node *>(
1401  my_node->next.get((my_map->my_pool_uuid)));
1402 
1403  if (!my_node)
1404  advance_to_next_bucket();
1405 
1406  return *this;
1407  }
1408 
1410  hash_map_iterator
1411  operator++(int)
1412  {
1413  hash_map_iterator old(*this);
1414  operator++();
1415  return old;
1416  }
1417 
1418 private:
1420  map_ptr my_map = nullptr;
1421 
1423  size_t my_index = 0;
1424 
1426  bucket *my_bucket = nullptr;
1427 
1429  node *my_node = nullptr;
1430 
1431  class bucket_accessor {
1432  public:
1433  bucket_accessor(map_ptr m, size_t index)
1434  {
1435  my_bucket = m->get_bucket(index);
1436  }
1437 
1438  bucket *
1439  get() const
1440  {
1441  return my_bucket;
1442  }
1443 
1444  private:
1445  bucket *my_bucket;
1446  };
1447 
1448  void
1449  advance_to_next_bucket()
1450  {
1451  size_t k = my_index + 1;
1452 
1453  assert(my_bucket);
1454 
1455  while (k <= my_map->mask()) {
1456  bucket_accessor acc(my_map, k);
1457  my_bucket = acc.get();
1458 
1459  if (my_bucket->node_list) {
1460  my_node = static_cast<node *>(
1461  my_bucket->node_list.get(
1462  my_map->my_pool_uuid));
1463 
1464  my_index = k;
1465 
1466  return;
1467  }
1468 
1469  ++k;
1470  }
1471 
1472  my_bucket = 0;
1473  my_node = 0;
1474  my_index = k;
1475  }
1476 };
1477 
1478 template <typename Container, bool M, bool U>
1479 bool
1480 operator==(const hash_map_iterator<Container, M> &i,
1481  const hash_map_iterator<Container, U> &j)
1482 {
1483  return i.my_node == j.my_node && i.my_map == j.my_map;
1484 }
1485 
1486 template <typename Container, bool M, bool U>
1487 bool
1488 operator!=(const hash_map_iterator<Container, M> &i,
1489  const hash_map_iterator<Container, U> &j)
1490 {
1491  return i.my_node != j.my_node || i.my_map != j.my_map;
1492 }
1493 } /* namespace concurrent_hash_map_internal */
1542 template <typename Key, typename T, typename Hash, typename KeyEqual,
1543  typename MutexType, typename ScopedLockType>
1545  : protected concurrent_hash_map_internal::hash_map_base<Key, T, MutexType,
1546  ScopedLockType> {
1547  template <typename Container, bool is_const>
1548  friend class concurrent_hash_map_internal::hash_map_iterator;
1549 
1550 public:
1551  using size_type = typename concurrent_hash_map_internal::hash_map_base<
1552  Key, T, MutexType, ScopedLockType>::size_type;
1553  using hashcode_type =
1554  typename concurrent_hash_map_internal::hash_map_base<
1555  Key, T, MutexType, ScopedLockType>::hashcode_type;
1556  using key_type = Key;
1557  using mapped_type = T;
1558  using value_type = typename concurrent_hash_map_internal::hash_map_base<
1559  Key, T, MutexType, ScopedLockType>::node::value_type;
1560  using difference_type = ptrdiff_t;
1561  using pointer = value_type *;
1562  using const_pointer = const value_type *;
1563  using reference = value_type &;
1564  using const_reference = const value_type &;
1565  using iterator = concurrent_hash_map_internal::hash_map_iterator<
1566  concurrent_hash_map, false>;
1567  using const_iterator = concurrent_hash_map_internal::hash_map_iterator<
1568  concurrent_hash_map, true>;
1569  using hasher = Hash;
1570  using key_equal = typename concurrent_hash_map_internal::key_equal_type<
1571  Hash, KeyEqual>::type;
1572 
1573 protected:
1574  using mutex_t = MutexType;
1575  using scoped_t = ScopedLockType;
1576  /*
1577  * Explicitly use methods and types from template base class
1578  */
1579  using hash_map_base =
1580  concurrent_hash_map_internal::hash_map_base<Key, T, mutex_t,
1581  scoped_t>;
1582  using hash_map_base::calculate_mask;
1583  using hash_map_base::check_growth;
1584  using hash_map_base::check_mask_race;
1585  using hash_map_base::embedded_buckets;
1586  using hash_map_base::get_bucket;
1587  using hash_map_base::get_pool_base;
1588  using hash_map_base::header_features;
1589  using hash_map_base::insert_new_node;
1590  using hash_map_base::internal_swap;
1591  using hash_map_base::layout_features;
1592  using hash_map_base::mask;
1593  using hash_map_base::reserve;
1594  using node = typename hash_map_base::node;
1595  using node_mutex_t = typename node::mutex_t;
1596  using node_ptr_t = typename hash_map_base::node_ptr_t;
1597  using bucket = typename hash_map_base::bucket;
1598  using bucket_lock_type = typename bucket::scoped_t;
1599  using segment_index_t = typename hash_map_base::segment_index_t;
1600  using segment_traits_t = typename hash_map_base::segment_traits_t;
1601  using segment_facade_t = typename hash_map_base::segment_facade_t;
1602  using scoped_lock_traits_type =
1603  concurrent_hash_map_internal::scoped_lock_traits<scoped_t>;
1604 
1605  friend class const_accessor;
1606  using persistent_node_ptr_t = detail::persistent_pool_ptr<node>;
1607 
1608  void
1609  delete_node(const node_ptr_t &n)
1610  {
1611  delete_persistent<node>(
1612  detail::static_persistent_pool_pointer_cast<node>(n)
1613  .get_persistent_ptr(this->my_pool_uuid));
1614  }
1615 
1616  template <typename K>
1617  persistent_node_ptr_t
1618  search_bucket(const K &key, bucket *b) const
1619  {
1620  assert(b->is_rehashed(std::memory_order_relaxed));
1621 
1622  persistent_node_ptr_t n =
1623  detail::static_persistent_pool_pointer_cast<node>(
1624  b->node_list);
1625 
1626  while (n &&
1627  !key_equal{}(key,
1628  n.get(this->my_pool_uuid)->item.first)) {
1629  n = detail::static_persistent_pool_pointer_cast<node>(
1630  n.get(this->my_pool_uuid)->next);
1631  }
1632 
1633  return n;
1634  }
1635 
1640  class bucket_accessor : public bucket_lock_type {
1641  bucket *my_b;
1642 
1643  public:
1645  const hashcode_type h, bool writer = false)
1646  {
1647  acquire(base, h, writer);
1648  }
1649 
1654  inline void
1655  acquire(concurrent_hash_map *base, const hashcode_type h,
1656  bool writer = false)
1657  {
1658  my_b = base->get_bucket(h);
1659 
1660  if (my_b->is_rehashed(std::memory_order_acquire) ==
1661  false &&
1662  bucket_lock_type::try_acquire(this->my_b->mutex,
1663  /*write=*/true)) {
1664  if (my_b->is_rehashed(
1665  std::memory_order_relaxed) ==
1666  false) {
1667  /* recursive rehashing */
1668  base->rehash_bucket<false>(my_b, h);
1669  }
1670  } else {
1671  bucket_lock_type::acquire(my_b->mutex, writer);
1672  }
1673 
1674  assert(my_b->is_rehashed(std::memory_order_relaxed));
1675  }
1676 
1680  bool
1681  is_writer() const
1682  {
1683  return bucket_lock_type::is_writer;
1684  }
1685 
1690  bucket *
1691  get() const
1692  {
1693  return my_b;
1694  }
1695 
1700  bucket *operator->() const
1701  {
1702  return this->get();
1703  }
1704  };
1705 
1710  bucket *my_b;
1711 
1712  public:
1714  const hashcode_type h,
1715  bool writer = false)
1716  {
1717  acquire(base, h, writer);
1718  }
1719 
1720  /*
1721  * Find a bucket by masked hashcode, optionally rehash
1722  */
1723  inline void
1724  acquire(concurrent_hash_map *base, const hashcode_type h,
1725  bool writer = false)
1726  {
1727  my_b = base->get_bucket(h);
1728 
1729  if (my_b->is_rehashed(std::memory_order_relaxed) ==
1730  false) {
1731  /* recursive rehashing */
1732  base->rehash_bucket<true>(my_b, h);
1733  }
1734 
1735  assert(my_b->is_rehashed(std::memory_order_relaxed));
1736  }
1737 
1744  bool
1745  is_writer() const
1746  {
1747  return true;
1748  }
1749 
1754  bucket *
1755  get() const
1756  {
1757  return my_b;
1758  }
1759 
1764  bucket *operator->() const
1765  {
1766  return this->get();
1767  }
1768  };
1769 
1770  hashcode_type
1771  get_hash_code(node_ptr_t &n)
1772  {
1773  return hasher{}(
1774  detail::static_persistent_pool_pointer_cast<node>(n)(
1775  this->my_pool_uuid)
1776  ->item.first);
1777  }
1778 
1779  template <bool serial>
1780  void
1781  rehash_bucket(bucket *b_new, const hashcode_type h)
1782  {
1783  using accessor_type = typename std::conditional<
1784  serial, serial_bucket_accessor, bucket_accessor>::type;
1785 
1786  using scoped_lock_traits_type =
1787  concurrent_hash_map_internal::scoped_lock_traits<
1788  accessor_type>;
1789 
1790  /* First two bucket should be always rehashed */
1791  assert(h > 1);
1792 
1793  pool_base pop = get_pool_base();
1794  node_ptr_t *p_new = &(b_new->node_list);
1795 
1796  /* This condition is only true when there was a failure just
1797  * before setting rehashed flag */
1798  if (*p_new != nullptr) {
1799  assert(!b_new->is_rehashed(std::memory_order_relaxed));
1800 
1801  b_new->set_rehashed(std::memory_order_relaxed);
1802  pop.persist(b_new->rehashed);
1803 
1804  return;
1805  }
1806 
1807  /* get parent mask from the topmost bit */
1808  hashcode_type mask = (1u << detail::Log2(h)) - 1;
1809  assert((h & mask) < h);
1810  accessor_type b_old(
1811  this, h & mask,
1812  scoped_lock_traits_type::initial_rw_state(true));
1813 
1814  pmem::obj::transaction::run(pop, [&] {
1815  /* get full mask for new bucket */
1816  mask = (mask << 1) | 1;
1817  assert((mask & (mask + 1)) == 0 && (h & mask) == h);
1818 
1819  restart:
1820  for (node_ptr_t *p_old = &(b_old->node_list),
1821  n = *p_old;
1822  n; n = *p_old) {
1823  hashcode_type c = get_hash_code(n);
1824 #ifndef NDEBUG
1825  hashcode_type bmask = h & (mask >> 1);
1826 
1827  bmask = bmask == 0
1828  ? 1 /* minimal mask of parent bucket */
1829  : (1u << (detail::Log2(bmask) + 1)) - 1;
1830 
1831  assert((c & bmask) == (h & bmask));
1832 #endif
1833 
1834  if ((c & mask) == h) {
1835  if (!b_old.is_writer() &&
1836  !scoped_lock_traits_type::
1837  upgrade_to_writer(b_old)) {
1838  goto restart;
1839  /* node ptr can be invalid due
1840  * to concurrent erase */
1841  }
1842 
1843  /* Add to new b_new */
1844  *p_new = n;
1845 
1846  /* exclude from b_old */
1847  *p_old = n(this->my_pool_uuid)->next;
1848 
1849  p_new = &(n(this->my_pool_uuid)->next);
1850  } else {
1851  /* iterate to next item */
1852  p_old = &(n(this->my_pool_uuid)->next);
1853  }
1854  }
1855 
1856  *p_new = nullptr;
1857  });
1858 
1859  /* mark rehashed */
1860  b_new->set_rehashed(std::memory_order_release);
1861  pop.persist(b_new->rehashed);
1862  }
1863 
1864  void
1865  check_features()
1866  {
1867  if (layout_features.incompat != header_features().incompat)
1868  throw pmem::layout_error(
1869  "Incompat flags mismatch, for more details go to: https://pmem.io/pmdk/cpp_obj/ \n");
1870  }
1871 
1872 public:
1873  class accessor;
1878  : protected node::scoped_t /*which derived from no_copy*/ {
1879  friend class concurrent_hash_map<Key, T, Hash, KeyEqual,
1880  mutex_t, scoped_t>;
1881  friend class accessor;
1883  using node::scoped_t::try_acquire;
1884 
1885  public:
1889  using value_type =
1890  const typename concurrent_hash_map::value_type;
1891 
1896  bool
1897  empty() const
1898  {
1899  return !my_node;
1900  }
1901 
1908  void
1910  {
1911  concurrent_hash_map_internal::check_outside_tx();
1912 
1913  if (my_node) {
1914  node::scoped_t::release();
1915  my_node = 0;
1916  }
1917  }
1918 
1922  const_reference operator*() const
1923  {
1924  assert(my_node);
1925 
1926  return my_node->item;
1927  }
1928 
1932  const_pointer operator->() const
1933  {
1934  return &operator*();
1935  }
1936 
1942  const_accessor() : my_node(OID_NULL)
1943  {
1944  concurrent_hash_map_internal::check_outside_tx();
1945  }
1946 
1951  {
1952  my_node = OID_NULL; // scoped lock's release() is called
1953  // in its destructor
1954  }
1955 
1956  protected:
1957  node_ptr_t my_node;
1958 
1959  hashcode_type my_hash;
1960  };
1961 
1966  class accessor : public const_accessor {
1967  public:
1969  using value_type = typename concurrent_hash_map::value_type;
1970 
1972  reference operator*() const
1973  {
1974  assert(this->my_node);
1975 
1976  return this->my_node->item;
1977  }
1978 
1980  pointer operator->() const
1981  {
1982  return &operator*();
1983  }
1984  };
1985 
1989  concurrent_hash_map() : hash_map_base()
1990  {
1991  runtime_initialize(true);
1992  }
1993 
1998  concurrent_hash_map(size_type n) : hash_map_base()
1999  {
2000  runtime_initialize(true);
2001 
2002  reserve(n);
2003  }
2004 
2008  concurrent_hash_map(const concurrent_hash_map &table) : hash_map_base()
2009  {
2010  runtime_initialize(true);
2011 
2012  reserve(table.size());
2013 
2014  internal_copy(table);
2015  }
2016 
2020  concurrent_hash_map(concurrent_hash_map &&table) : hash_map_base()
2021  {
2022  runtime_initialize(true);
2023 
2024  swap(table);
2025  }
2026 
2030  template <typename I>
2031  concurrent_hash_map(I first, I last)
2032  {
2033  runtime_initialize(true);
2034 
2035  reserve(static_cast<size_type>(std::distance(first, last)));
2036 
2037  internal_copy(first, last);
2038  }
2039 
2043  concurrent_hash_map(std::initializer_list<value_type> il)
2044  {
2045  runtime_initialize(true);
2046 
2047  reserve(il.size());
2048 
2049  internal_copy(il.begin(), il.end());
2050  }
2051 
2060  void
2061  runtime_initialize(bool graceful_shutdown = false)
2062  {
2063  check_features();
2064 
2065  calculate_mask();
2066 
2067  if (!graceful_shutdown) {
2068  auto actual_size =
2069  std::distance(this->begin(), this->end());
2070  assert(actual_size >= 0);
2071  this->restore_size(size_type(actual_size));
2072  } else {
2073  assert(this->size() ==
2074  size_type(std::distance(this->begin(),
2075  this->end())));
2076  }
2077  }
2078 
2090  concurrent_hash_map &
2092  {
2093  if (this != &table) {
2094  clear();
2095  internal_copy(table);
2096  }
2097 
2098  return *this;
2099  }
2100 
2113  operator=(std::initializer_list<value_type> il)
2114  {
2115  clear();
2116 
2117  reserve(il.size());
2118 
2119  internal_copy(il.begin(), il.end());
2120 
2121  return *this;
2122  }
2123 
2132  void rehash(size_type n = 0);
2133 
2140  void clear();
2141 
2146  {
2147  clear();
2148  }
2149 
2150  //------------------------------------------------------------------------
2151  // STL support - not thread-safe methods
2152  //------------------------------------------------------------------------
2153 
2160  iterator
2162  {
2163  return iterator(this, 0);
2164  }
2165 
2170  iterator
2172  {
2173  return iterator(this, mask() + 1);
2174  }
2175 
2180  const_iterator
2181  begin() const
2182  {
2183  return const_iterator(this, 0);
2184  }
2185 
2190  const_iterator
2191  end() const
2192  {
2193  return const_iterator(this, mask() + 1);
2194  }
2195 
2199  size_type
2200  size() const
2201  {
2202  return this->my_size.get_ro();
2203  }
2204 
2208  bool
2209  empty() const
2210  {
2211  return this->my_size.get_ro() == 0;
2212  }
2213 
2217  size_type
2218  max_size() const
2219  {
2220  return (~size_type(0)) / sizeof(node);
2221  }
2222 
2226  size_type
2228  {
2229  return mask() + 1;
2230  }
2231 
2235  void swap(concurrent_hash_map &table);
2236 
2237  //------------------------------------------------------------------------
2238  // concurrent map operations
2239  //------------------------------------------------------------------------
2240 
2246  size_type
2247  count(const Key &key) const
2248  {
2249  concurrent_hash_map_internal::check_outside_tx();
2250 
2251  return const_cast<concurrent_hash_map *>(this)->internal_find(
2252  key, nullptr, false);
2253  }
2254 
2266  template <typename K,
2267  typename = typename std::enable_if<
2268  concurrent_hash_map_internal::
2269  has_transparent_key_equal<hasher>::value,
2270  K>::type>
2271  size_type
2272  count(const K &key) const
2273  {
2274  concurrent_hash_map_internal::check_outside_tx();
2275 
2276  return const_cast<concurrent_hash_map *>(this)->internal_find(
2277  key, nullptr, false);
2278  }
2279 
2286  bool
2287  find(const_accessor &result, const Key &key) const
2288  {
2289  concurrent_hash_map_internal::check_outside_tx();
2290 
2291  result.release();
2292 
2293  return const_cast<concurrent_hash_map *>(this)->internal_find(
2294  key, &result, false);
2295  }
2296 
2310  template <typename K,
2311  typename = typename std::enable_if<
2312  concurrent_hash_map_internal::
2313  has_transparent_key_equal<hasher>::value,
2314  K>::type>
2315  bool
2316  find(const_accessor &result, const K &key) const
2317  {
2318  concurrent_hash_map_internal::check_outside_tx();
2319 
2320  result.release();
2321 
2322  return const_cast<concurrent_hash_map *>(this)->internal_find(
2323  key, &result, false);
2324  }
2325 
2332  bool
2333  find(accessor &result, const Key &key)
2334  {
2335  concurrent_hash_map_internal::check_outside_tx();
2336 
2337  result.release();
2338 
2339  return internal_find(key, &result, true);
2340  }
2341 
2355  template <typename K,
2356  typename = typename std::enable_if<
2357  concurrent_hash_map_internal::
2358  has_transparent_key_equal<hasher>::value,
2359  K>::type>
2360  bool
2361  find(accessor &result, const K &key)
2362  {
2363  concurrent_hash_map_internal::check_outside_tx();
2364 
2365  result.release();
2366 
2367  return internal_find(key, &result, true);
2368  }
2376  bool
2377  insert(const_accessor &result, const Key &key)
2378  {
2379  concurrent_hash_map_internal::check_outside_tx();
2380 
2381  result.release();
2382 
2383  return internal_insert(key, &result, false, key);
2384  }
2385 
2393  bool
2394  insert(accessor &result, const Key &key)
2395  {
2396  concurrent_hash_map_internal::check_outside_tx();
2397 
2398  result.release();
2399 
2400  return internal_insert(key, &result, true, key);
2401  }
2402 
2410  bool
2411  insert(const_accessor &result, const value_type &value)
2412  {
2413  concurrent_hash_map_internal::check_outside_tx();
2414 
2415  result.release();
2416 
2417  return internal_insert(value.first, &result, false, value);
2418  }
2419 
2427  bool
2428  insert(accessor &result, const value_type &value)
2429  {
2430  concurrent_hash_map_internal::check_outside_tx();
2431 
2432  result.release();
2433 
2434  return internal_insert(value.first, &result, true, value);
2435  }
2436 
2443  bool
2444  insert(const value_type &value)
2445  {
2446  concurrent_hash_map_internal::check_outside_tx();
2447 
2448  return internal_insert(value.first, nullptr, false, value);
2449  }
2450 
2458  bool
2459  insert(const_accessor &result, value_type &&value)
2460  {
2461  concurrent_hash_map_internal::check_outside_tx();
2462 
2463  result.release();
2464 
2465  return internal_insert(value.first, &result, false,
2466  std::move(value));
2467  }
2468 
2476  bool
2477  insert(accessor &result, value_type &&value)
2478  {
2479  concurrent_hash_map_internal::check_outside_tx();
2480 
2481  result.release();
2482 
2483  return internal_insert(value.first, &result, true,
2484  std::move(value));
2485  }
2486 
2493  bool
2494  insert(value_type &&value)
2495  {
2496  concurrent_hash_map_internal::check_outside_tx();
2497 
2498  return internal_insert(value.first, nullptr, false,
2499  std::move(value));
2500  }
2501 
2507  template <typename I>
2508  void
2509  insert(I first, I last)
2510  {
2511  concurrent_hash_map_internal::check_outside_tx();
2512 
2513  for (; first != last; ++first)
2514  insert(*first);
2515  }
2516 
2522  void
2523  insert(std::initializer_list<value_type> il)
2524  {
2525  concurrent_hash_map_internal::check_outside_tx();
2526 
2527  insert(il.begin(), il.end());
2528  }
2529 
2538  bool
2539  erase(const Key &key)
2540  {
2541  concurrent_hash_map_internal::check_outside_tx();
2542 
2543  return internal_erase(key);
2544  }
2545 
2560  template <typename K,
2561  typename = typename std::enable_if<
2562  concurrent_hash_map_internal::
2563  has_transparent_key_equal<hasher>::value,
2564  K>::type>
2565  bool
2566  erase(const K &key)
2567  {
2568  concurrent_hash_map_internal::check_outside_tx();
2569 
2570  return internal_erase(key);
2571  }
2572 
2573 protected:
2574  /*
2575  * Try to acquire the mutex for read or write.
2576  *
2577  * If acquiring succeeds returns true, otherwise retries for few times.
2578  * If acquiring fails after all attempts returns false.
2579  */
2580  bool try_acquire_item(const_accessor *result, node_mutex_t &mutex,
2581  bool write);
2582 
2583  template <typename K>
2584  bool internal_find(const K &key, const_accessor *result, bool write);
2585 
2586  template <typename K, typename... Args>
2587  bool internal_insert(const K &key, const_accessor *result, bool write,
2588  Args &&... args);
2589 
2590  /* Obtain pointer to node and lock bucket */
2591  template <bool Bucket_rw_lock, typename K>
2592  persistent_node_ptr_t
2593  get_node(const K &key, bucket_accessor &b)
2594  {
2595  /* find a node */
2596  auto n = search_bucket(key, b.get());
2597 
2598  if (!n) {
2599  if (Bucket_rw_lock && !b.is_writer() &&
2600  !scoped_lock_traits_type::upgrade_to_writer(b)) {
2601  /* Rerun search_list, in case another
2602  * thread inserted the item during the
2603  * upgrade. */
2604  n = search_bucket(key, b.get());
2605  if (n) {
2606  /* unfortunately, it did */
2607  scoped_lock_traits_type::
2608  downgrade_to_reader(b);
2609  return n;
2610  }
2611  }
2612  }
2613 
2614  return n;
2615  }
2616 
2617  template <typename K>
2618  bool internal_erase(const K &key);
2619 
2620  void clear_segment(segment_index_t s);
2621 
2625  void internal_copy(const concurrent_hash_map &source);
2626 
2627  template <typename I>
2628  void internal_copy(I first, I last);
2629 
2630 }; // class concurrent_hash_map
2631 
2632 template <typename Key, typename T, typename Hash, typename KeyEqual,
2633  typename MutexType, typename ScopedLockType>
2634 bool
2635 concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
2636  ScopedLockType>::try_acquire_item(const_accessor *result,
2637  node_mutex_t &mutex,
2638  bool write)
2639 {
2640  /* acquire the item */
2641  if (!result->try_acquire(mutex, write)) {
2642  for (detail::atomic_backoff backoff(true);;) {
2643  if (result->try_acquire(mutex, write))
2644  break;
2645 
2646  if (!backoff.bounded_pause())
2647  return false;
2648  }
2649  }
2650 
2651  return true;
2652 }
2653 
2654 template <typename Key, typename T, typename Hash, typename KeyEqual,
2655  typename MutexType, typename ScopedLockType>
2656 template <typename K>
2657 bool
2658 concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
2659  ScopedLockType>::internal_find(const K &key,
2660  const_accessor *result,
2661  bool write)
2662 {
2663  assert(!result || !result->my_node);
2664 
2665  hashcode_type m = mask().load(std::memory_order_acquire);
2666 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
2667  ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
2668 #endif
2669 
2670  assert((m & (m + 1)) == 0);
2671 
2672  hashcode_type const h = hasher{}(key);
2673 
2674  persistent_node_ptr_t node;
2675 
2676  while (true) {
2677  /* get bucket and acquire the lock */
2678  bucket_accessor b(
2679  this, h & m,
2680  scoped_lock_traits_type::initial_rw_state(false));
2681  node = get_node<false>(key, b);
2682 
2683  if (!node) {
2684  /* Element was possibly relocated, try again */
2685  if (check_mask_race(h, m)) {
2686  b.release();
2687  continue;
2688  } else {
2689  return false;
2690  }
2691  }
2692 
2693  /* No need to acquire the item or item acquired */
2694  if (!result ||
2695  try_acquire_item(
2696  result, node.get(this->my_pool_uuid)->mutex, write))
2697  break;
2698 
2699  /* the wait takes really long, restart the
2700  * operation */
2701  b.release();
2702 
2703  std::this_thread::yield();
2704 
2705  m = mask().load(std::memory_order_acquire);
2706 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
2707  ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
2708 #endif
2709  }
2710 
2711  if (result) {
2712  result->my_node = node.get_persistent_ptr(this->my_pool_uuid);
2713  result->my_hash = h;
2714  }
2715 
2716  return true;
2717 }
2718 
2719 template <typename Key, typename T, typename Hash, typename KeyEqual,
2720  typename MutexType, typename ScopedLockType>
2721 template <typename K, typename... Args>
2722 bool
2723 concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
2724  ScopedLockType>::internal_insert(const K &key,
2725  const_accessor *result,
2726  bool write,
2727  Args &&... args)
2728 {
2729  assert(!result || !result->my_node);
2730 
2731  hashcode_type m = mask().load(std::memory_order_acquire);
2732 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
2733  ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
2734 #endif
2735 
2736  assert((m & (m + 1)) == 0);
2737 
2738  hashcode_type const h = hasher{}(key);
2739 
2740  persistent_node_ptr_t node;
2741  size_t new_size = 0;
2742  bool inserted = false;
2743 
2744  while (true) {
2745  /* get bucket and acquire the lock */
2746  bucket_accessor b(
2747  this, h & m,
2748  scoped_lock_traits_type::initial_rw_state(true));
2749  node = get_node<true>(key, b);
2750 
2751  if (!node) {
2752  /* Element was possibly relocated, try again */
2753  if (check_mask_race(h, m)) {
2754  b.release();
2755  continue;
2756  }
2757 
2758  /* insert and set flag to grow the container */
2759  new_size = insert_new_node(b.get(), node,
2760  std::forward<Args>(args)...);
2761  inserted = true;
2762  }
2763 
2764  /* No need to acquire the item or item acquired */
2765  if (!result ||
2766  try_acquire_item(
2767  result, node.get(this->my_pool_uuid)->mutex, write))
2768  break;
2769 
2770  /* the wait takes really long, restart the
2771  * operation */
2772  b.release();
2773 
2774  std::this_thread::yield();
2775 
2776  m = mask().load(std::memory_order_acquire);
2777 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
2778  ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
2779 #endif
2780  }
2781 
2782  if (result) {
2783  result->my_node = node.get_persistent_ptr(this->my_pool_uuid);
2784  result->my_hash = h;
2785  }
2786 
2787  check_growth(m, new_size);
2788 
2789  return inserted;
2790 }
2791 
2792 template <typename Key, typename T, typename Hash, typename KeyEqual,
2793  typename MutexType, typename ScopedLockType>
2794 template <typename K>
2795 bool
2796 concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
2797  ScopedLockType>::internal_erase(const K &key)
2798 {
2799  node_ptr_t n;
2800  hashcode_type const h = hasher{}(key);
2801  hashcode_type m = mask().load(std::memory_order_acquire);
2802 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
2803  ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
2804 #endif
2805 
2806  pool_base pop = get_pool_base();
2807 
2808 restart : {
2809  /* lock scope */
2810  /* get bucket */
2811  bucket_accessor b(this, h & m,
2812  scoped_lock_traits_type::initial_rw_state(true));
2813 
2814 search:
2815  node_ptr_t *p = &b->node_list;
2816  n = *p;
2817 
2818  while (n &&
2819  !key_equal{}(key,
2820  detail::static_persistent_pool_pointer_cast<node>(
2821  n)(this->my_pool_uuid)
2822  ->item.first)) {
2823  p = &n(this->my_pool_uuid)->next;
2824  n = *p;
2825  }
2826 
2827  if (!n) {
2828  /* not found, but mask could be changed */
2829  if (check_mask_race(h, m))
2830  goto restart;
2831 
2832  return false;
2833  } else if (!b.is_writer() &&
2834  !scoped_lock_traits_type::upgrade_to_writer(b)) {
2835  if (check_mask_race(h, m)) /* contended upgrade, check mask */
2836  goto restart;
2837 
2838  goto search;
2839  }
2840 
2841  persistent_ptr<node> del = n(this->my_pool_uuid);
2842 
2843  {
2844  /* We cannot remove this element immediately because
2845  * other threads might work with this element via
2846  * accessors. The item_locker required to wait while
2847  * other threads use the node. */
2848  const_accessor acc;
2849  if (!try_acquire_item(&acc, del->mutex, true)) {
2850  /* the wait takes really long, restart the operation */
2851  b.release();
2852 
2853  std::this_thread::yield();
2854 
2855  m = mask().load(std::memory_order_acquire);
2856 
2857  goto restart;
2858  }
2859  }
2860 
2861  /* Only one thread can delete it due to write lock on the bucket */
2862  transaction::run(pop, [&] {
2863  *p = del->next;
2864  delete_node(del);
2865  });
2866 
2867  --(this->my_size.get_rw());
2868  pop.persist(this->my_size);
2869 }
2870 
2871  return true;
2872 }
2873 
2874 template <typename Key, typename T, typename Hash, typename KeyEqual,
2875  typename MutexType, typename ScopedLockType>
2876 void
2879 {
2880  internal_swap(table);
2881 }
2882 
2883 template <typename Key, typename T, typename Hash, typename KeyEqual,
2884  typename MutexType, typename ScopedLockType>
2885 void
2887  size_type sz)
2888 {
2889  concurrent_hash_map_internal::check_outside_tx();
2890 
2891  reserve(sz);
2892  hashcode_type m = mask();
2893 
2894  /* only the last segment should be scanned for rehashing size or first
2895  * index of the last segment */
2896  hashcode_type b = (m + 1) >> 1;
2897 
2898  /* zero or power of 2 */
2899  assert((b & (b - 1)) == 0);
2900 
2901  for (; b <= m; ++b) {
2902  bucket *bp = get_bucket(b);
2903 
2904  concurrent_hash_map_internal::assert_not_locked<mutex_t,
2905  scoped_t>(
2906  bp->mutex);
2907  /* XXX Need to investigate if this statement is needed */
2908  if (bp->is_rehashed(std::memory_order_relaxed) == false)
2909  rehash_bucket<true>(bp, b);
2910  }
2911 }
2912 
2913 template <typename Key, typename T, typename Hash, typename KeyEqual,
2914  typename MutexType, typename ScopedLockType>
2915 void
2917 {
2918  hashcode_type m = mask();
2919 
2920  assert((m & (m + 1)) == 0);
2921 
2922 #ifndef NDEBUG
2923  /* check consistency */
2924  for (segment_index_t b = 0; b <= m; ++b) {
2925  bucket *bp = get_bucket(b);
2926  concurrent_hash_map_internal::assert_not_locked<mutex_t,
2927  scoped_t>(
2928  bp->mutex);
2929  }
2930 #endif
2931 
2932  pool_base pop = get_pool_base();
2933  { /* transaction scope */
2934 
2935  transaction::manual tx(pop);
2936 
2937  this->my_size.get_rw() = 0;
2938  segment_index_t s = segment_traits_t::segment_index_of(m);
2939 
2940  assert(s + 1 == this->block_table_size ||
2941  !segment_facade_t(this->my_table, s + 1).is_valid());
2942 
2943  do {
2944  clear_segment(s);
2945  } while (s-- > 0);
2946 
2947  mask().store(embedded_buckets - 1, std::memory_order_relaxed);
2948 
2950  }
2951 }
2952 
2953 template <typename Key, typename T, typename Hash, typename KeyEqual,
2954  typename MutexType, typename ScopedLockType>
2955 void
2956 concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
2957  ScopedLockType>::clear_segment(segment_index_t s)
2958 {
2959  segment_facade_t segment(this->my_table, s);
2960 
2961  assert(segment.is_valid());
2962 
2963  size_type sz = segment.size();
2964  for (segment_index_t i = 0; i < sz; ++i) {
2965  for (node_ptr_t n = segment[i].node_list; n;
2966  n = segment[i].node_list) {
2967  segment[i].node_list = n(this->my_pool_uuid)->next;
2968  delete_node(n);
2969  }
2970  }
2971 
2972  if (s >= segment_traits_t::embedded_segments)
2973  segment.disable();
2974 }
2975 
2976 template <typename Key, typename T, typename Hash, typename KeyEqual,
2977  typename MutexType, typename ScopedLockType>
2978 void
2980  internal_copy(const concurrent_hash_map &source)
2981 {
2982  reserve(source.my_size.get_ro());
2983  internal_copy(source.begin(), source.end());
2984 }
2985 
2986 template <typename Key, typename T, typename Hash, typename KeyEqual,
2987  typename MutexType, typename ScopedLockType>
2988 template <typename I>
2989 void
2990 concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
2991  ScopedLockType>::internal_copy(I first, I last)
2992 {
2993  hashcode_type m = mask();
2994 
2995  for (; first != last; ++first) {
2996  hashcode_type h = hasher{}(first->first);
2997  bucket *b = get_bucket(h & m);
2998 
2999  assert(b->is_rehashed(std::memory_order_relaxed));
3000 
3001  detail::persistent_pool_ptr<node> p;
3002  insert_new_node(b, p, *first);
3003  }
3004 }
3005 
3006 template <typename Key, typename T, typename Hash, typename KeyEqual,
3007  typename MutexType, typename ScopedLockType>
3008 inline bool
3009 operator==(const concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3010  ScopedLockType> &a,
3011  const concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3012  ScopedLockType> &b)
3013 {
3014  if (a.size() != b.size())
3015  return false;
3016 
3017  typename concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3018  ScopedLockType>::const_iterator
3019  i(a.begin()),
3020  i_end(a.end());
3021 
3022  typename concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3023  ScopedLockType>::const_iterator j,
3024  j_end(b.end());
3025 
3026  for (; i != i_end; ++i) {
3027  j = b.equal_range(i->first).first;
3028 
3029  if (j == j_end || !(i->second == j->second))
3030  return false;
3031  }
3032 
3033  return true;
3034 }
3035 
3036 template <typename Key, typename T, typename Hash, typename KeyEqual,
3037  typename MutexType, typename ScopedLockType>
3038 inline bool
3039 operator!=(const concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3040  ScopedLockType> &a,
3041  const concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3042  ScopedLockType> &b)
3043 {
3044  return !(a == b);
3045 }
3046 
3047 template <typename Key, typename T, typename Hash, typename KeyEqual,
3048  typename MutexType, typename ScopedLockType>
3049 inline void
3050 swap(concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType, ScopedLockType> &a,
3051  concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType, ScopedLockType> &b)
3052 {
3053  a.swap(b);
3054 }
3055 
3056 } /* namespace obj */
3057 } /* namespace pmem */
3058 
3059 #endif /* PMEMOBJ_CONCURRENT_HASH_MAP_HPP */
pmem::obj::concurrent_hash_map::clear
void clear()
Clear hash map content Not thread safe.
Definition: concurrent_hash_map.hpp:2916
pmem::obj::concurrent_hash_map::insert
bool insert(value_type &&value)
Insert item by copying if there is no such key present already.
Definition: concurrent_hash_map.hpp:2494
pmem::obj::transaction::commit
static void commit()
Manually commit a transaction.
Definition: transaction.hpp:350
pmem::obj::mutex
Persistent memory resident mutex implementation.
Definition: mutex.hpp:60
pmem::obj::concurrent_hash_map::concurrent_hash_map
concurrent_hash_map(concurrent_hash_map &&table)
Move constructor.
Definition: concurrent_hash_map.hpp:2020
pmem::obj::concurrent_hash_map::erase
bool erase(const Key &key)
Remove element with corresponding key.
Definition: concurrent_hash_map.hpp:2539
pmem::obj::operator+=
p< T > & operator+=(p< T > &lhs, const p< Y > &rhs)
Addition assignment operator overload.
Definition: pext.hpp:123
pmem::obj::concurrent_hash_map::bucket_accessor
Bucket accessor is to find, rehash, acquire a lock, and access a bucket.
Definition: concurrent_hash_map.hpp:1640
pmem::obj::concurrent_hash_map::const_accessor::release
void release()
Release accessor.
Definition: concurrent_hash_map.hpp:1909
pmem::obj::get
T & get(pmem::obj::array< T, N > &a)
Non-member get function.
Definition: array.hpp:923
pmem::obj::concurrent_hash_map::bucket_accessor::is_writer
bool is_writer() const
Check whether bucket is locked for write.
Definition: concurrent_hash_map.hpp:1681
pmem::obj::concurrent_hash_map::count
size_type count(const Key &key) const
Definition: concurrent_hash_map.hpp:2247
pmem::obj::concurrent_hash_map::end
iterator end()
Definition: concurrent_hash_map.hpp:2171
pmem::obj::p::get_ro
const T & get_ro() const noexcept
Retrieves read-only const reference of the object.
Definition: p.hpp:157
pmem::obj::concurrent_hash_map::insert
bool insert(accessor &result, value_type &&value)
Insert item by copying if there is no such key present already and acquire a write lock on the item.
Definition: concurrent_hash_map.hpp:2477
pmem
A persistent version of concurrent hash map implementation Ref: https://arxiv.org/abs/1509....
Definition: allocation_flag.hpp:44
pmem::obj::concurrent_hash_map::~concurrent_hash_map
~concurrent_hash_map()
Clear table and destroy it.
Definition: concurrent_hash_map.hpp:2145
pmem::obj::concurrent_hash_map::operator=
concurrent_hash_map & operator=(const concurrent_hash_map &table)
Assignment Not thread safe.
Definition: concurrent_hash_map.hpp:2091
pmem::obj::concurrent_hash_map::const_accessor::operator*
const_reference operator*() const
Definition: concurrent_hash_map.hpp:1922
pmem::obj::concurrent_hash_map::accessor
Allows write access to elements and combines data access, locking, and garbage collection.
Definition: concurrent_hash_map.hpp:1966
common.hpp
Commonly used functionality.
pmem::obj::concurrent_hash_map::insert
bool insert(const_accessor &result, const value_type &value)
Insert item by copying if there is no such key present already and acquire a read lock on the item.
Definition: concurrent_hash_map.hpp:2411
template_helpers.hpp
Commonly used SFINAE helpers.
pmem::obj::concurrent_hash_map::runtime_initialize
void runtime_initialize(bool graceful_shutdown=false)
Intialize persistent concurrent hash map after process restart.
Definition: concurrent_hash_map.hpp:2061
pmem::obj::swap
void swap(pmem::obj::array< T, N > &lhs, pmem::obj::array< T, N > &rhs)
Non-member swap function.
Definition: array.hpp:913
pmem::obj::concurrent_hash_map::serial_bucket_accessor::operator->
bucket * operator->() const
Overloaded arrow operator.
Definition: concurrent_hash_map.hpp:1764
pmem::obj::concurrent_hash_map::insert
bool insert(const_accessor &result, value_type &&value)
Insert item by copying if there is no such key present already and acquire a read lock on the item.
Definition: concurrent_hash_map.hpp:2459
pmem::obj::concurrent_hash_map::find
bool find(accessor &result, const K &key)
Find item and acquire a write lock on the item.
Definition: concurrent_hash_map.hpp:2361
pmem::obj::concurrent_hash_map::serial_bucket_accessor::get
bucket * get() const
Get bucket pointer.
Definition: concurrent_hash_map.hpp:1755
pmem::obj::p
Resides on pmem class.
Definition: p.hpp:64
pmem::obj::concurrent_hash_map::const_accessor::~const_accessor
~const_accessor()
Destroy result after releasing the underlying reference.
Definition: concurrent_hash_map.hpp:1950
pmem::obj::concurrent_hash_map::bucket_count
size_type bucket_count() const
Definition: concurrent_hash_map.hpp:2227
pmem::obj::concurrent_hash_map::size
size_type size() const
Definition: concurrent_hash_map.hpp:2200
make_persistent.hpp
Persistent_ptr transactional allocation functions for objects.
pmem::obj::operator!=
bool operator!=(const allocator< T, P, Tr > &lhs, const OtherAllocator &rhs)
Determines if memory from another allocator can be deallocated from this one.
Definition: allocator.hpp:518
pmem::obj::concurrent_hash_map::concurrent_hash_map
concurrent_hash_map(std::initializer_list< value_type > il)
Construct table with initializer list.
Definition: concurrent_hash_map.hpp:2043
pmem::obj::transaction::run
static void run(pool_base &pool, std::function< void()> tx, Locks &... locks)
Execute a closure-like transaction and lock locks.
Definition: transaction.hpp:403
pmem::obj::concurrent_hash_map::concurrent_hash_map
concurrent_hash_map(size_type n)
Construct empty table with n preallocated buckets.
Definition: concurrent_hash_map.hpp:1998
pmem::obj::concurrent_hash_map::count
size_type count(const K &key) const
This overload only participates in overload resolution if the qualified-id Hash::transparent_key_equa...
Definition: concurrent_hash_map.hpp:2272
pmem::obj::concurrent_hash_map::insert
bool insert(accessor &result, const Key &key)
Insert item (if not already present) and acquire a write lock on the item.
Definition: concurrent_hash_map.hpp:2394
pmem::obj::concurrent_hash_map::find
bool find(accessor &result, const Key &key)
Find item and acquire a write lock on the item.
Definition: concurrent_hash_map.hpp:2333
pmem::obj::begin
pmem::obj::array< T, N >::iterator begin(pmem::obj::array< T, N > &a)
Non-member begin.
Definition: array.hpp:833
pmem::obj::concurrent_hash_map::insert
void insert(std::initializer_list< value_type > il)
Insert initializer list.
Definition: concurrent_hash_map.hpp:2523
pmem::obj::concurrent_hash_map::concurrent_hash_map
concurrent_hash_map(const concurrent_hash_map &table)
Copy constructor.
Definition: concurrent_hash_map.hpp:2008
pmem::obj::concurrent_hash_map::max_size
size_type max_size() const
Upper bound on size.
Definition: concurrent_hash_map.hpp:2218
pmem::obj::concurrent_hash_map::concurrent_hash_map
concurrent_hash_map()
Construct empty table.
Definition: concurrent_hash_map.hpp:1989
transaction.hpp
C++ pmemobj transactions.
pmem::obj::concurrent_hash_map::begin
const_iterator begin() const
Definition: concurrent_hash_map.hpp:2181
pmem::obj::concurrent_hash_map::bucket_accessor::acquire
void acquire(concurrent_hash_map *base, const hashcode_type h, bool writer=false)
Find a bucket by masked hashcode, optionally rehash, and acquire the lock.
Definition: concurrent_hash_map.hpp:1655
pmem::obj::concurrent_hash_map::erase
bool erase(const K &key)
Remove element with corresponding key.
Definition: concurrent_hash_map.hpp:2566
pmem::obj::persistent_ptr< node >
shared_mutex.hpp
Pmem-resident shared mutex.
pmem::obj::concurrent_hash_map::insert
bool insert(accessor &result, const value_type &value)
Insert item by copying if there is no such key present already and acquire a write lock on the item.
Definition: concurrent_hash_map.hpp:2428
p.hpp
Resides on pmem property template.
pmem::obj::concurrent_hash_map::const_accessor::operator->
const_pointer operator->() const
Definition: concurrent_hash_map.hpp:1932
pmem::obj::concurrent_hash_map::operator=
concurrent_hash_map & operator=(std::initializer_list< value_type > il)
Assignment Not thread safe.
Definition: concurrent_hash_map.hpp:2113
pmem::obj::operator-=
p< T > & operator-=(p< T > &lhs, const p< Y > &rhs)
Subtraction assignment operator overload.
Definition: pext.hpp:145
pmem::obj::concurrent_hash_map::accessor::operator->
pointer operator->() const
Return pointer to associated value in hash table.
Definition: concurrent_hash_map.hpp:1980
pmem::layout_error
Custom layout error class.
Definition: pexceptions.hpp:205
pmem::obj::operator==
bool operator==(standard_alloc_policy< T > const &, standard_alloc_policy< T2 > const &)
Determines if memory from another allocator can be deallocated from this one.
Definition: allocator.hpp:402
std
A persistent version of concurrent hash map implementation Ref: https://arxiv.org/abs/1509....
Definition: concurrent_hash_map.hpp:66
pmem::obj::concurrent_hash_map::bucket_accessor::get
bucket * get() const
Get bucket pointer.
Definition: concurrent_hash_map.hpp:1691
pmem::obj::concurrent_hash_map::const_accessor::value_type
const typename concurrent_hash_map::value_type value_type
Type of value.
Definition: concurrent_hash_map.hpp:1890
pmem::obj::concurrent_hash_map::serial_bucket_accessor::is_writer
bool is_writer() const
This method is added for consistency with bucket_accessor class.
Definition: concurrent_hash_map.hpp:1745
pmem::transaction_scope_error
Custom transaction error class.
Definition: pexceptions.hpp:185
pmem::obj::concurrent_hash_map::internal_copy
void internal_copy(const concurrent_hash_map &source)
Copy "source" to *this, where *this must start out empty.
Definition: concurrent_hash_map.hpp:2980
pmem::obj::concurrent_hash_map::empty
bool empty() const
Definition: concurrent_hash_map.hpp:2209
pmem::obj::concurrent_hash_map::find
bool find(const_accessor &result, const K &key) const
Find item and acquire a read lock on the item.
Definition: concurrent_hash_map.hpp:2316
pmem::obj::concurrent_hash_map::insert
bool insert(const value_type &value)
Insert item by copying if there is no such key present already.
Definition: concurrent_hash_map.hpp:2444
pmem::obj::operator++
p< T > & operator++(p< T > &pp)
Prefix increment operator overload.
Definition: pext.hpp:77
pmem::obj::concurrent_hash_map::concurrent_hash_map
concurrent_hash_map(I first, I last)
Construction table with copying iteration range.
Definition: concurrent_hash_map.hpp:2031
pmem::obj::operator+
persistent_ptr< T > operator+(persistent_ptr< T > const &lhs, std::ptrdiff_t s)
Addition operator for persistent pointers.
Definition: persistent_ptr.hpp:607
pmem::obj::pool_base
The non-template pool base class.
Definition: pool.hpp:67
pmem::obj::concurrent_hash_map::insert
bool insert(const_accessor &result, const Key &key)
Insert item (if not already present) and acquire a read lock on the item.
Definition: concurrent_hash_map.hpp:2377
pmem::obj::concurrent_hash_map::const_accessor
Combines data access, locking, and garbage collection.
Definition: concurrent_hash_map.hpp:1878
pmem::obj::concurrent_hash_map::begin
iterator begin()
Definition: concurrent_hash_map.hpp:2161
persistent_ptr.hpp
Persistent smart pointer.
pmem::obj::concurrent_hash_map::bucket_accessor::operator->
bucket * operator->() const
Overloaded arrow operator.
Definition: concurrent_hash_map.hpp:1700
pmem::obj::concurrent_hash_map::accessor::operator*
reference operator*() const
Return reference to associated value in hash table.
Definition: concurrent_hash_map.hpp:1972
pmem::obj::operator-
persistent_ptr< T > operator-(persistent_ptr< T > const &lhs, std::ptrdiff_t s)
Subtraction operator for persistent pointers.
Definition: persistent_ptr.hpp:621
pmem::obj::concurrent_hash_map::swap
void swap(concurrent_hash_map &table)
Swap two instances.
Definition: concurrent_hash_map.hpp:2877
pmem::obj::concurrent_hash_map::insert
void insert(I first, I last)
Insert range [first, last)
Definition: concurrent_hash_map.hpp:2509
pmem::obj::transaction::manual
C++ manual scope transaction class.
Definition: transaction.hpp:92
pmem::obj::concurrent_hash_map::const_accessor::const_accessor
const_accessor()
Create empty result.
Definition: concurrent_hash_map.hpp:1942
pmem::obj::concurrent_hash_map::find
bool find(const_accessor &result, const Key &key) const
Find item and acquire a read lock on the item.
Definition: concurrent_hash_map.hpp:2287
pmem::obj::concurrent_hash_map
Persistent memory aware implementation of Intel TBB concurrent_hash_map.
Definition: concurrent_hash_map.hpp:1546
pmem::obj::operator--
p< T > & operator--(p< T > &pp)
Prefix decrement operator overload.
Definition: pext.hpp:88
pmem::obj::concurrent_hash_map::rehash
void rehash(size_type n=0)
Rehashes and optionally resizes the whole table.
Definition: concurrent_hash_map.hpp:2886
mutex.hpp
Pmem-resident mutex.
pmem::obj::concurrent_hash_map::serial_bucket_accessor
Serial bucket accessor used to access bucket in a serial operations.
Definition: concurrent_hash_map.hpp:1709
pmem::obj::concurrent_hash_map::const_accessor::empty
bool empty() const
Definition: concurrent_hash_map.hpp:1897
pmem::obj::concurrent_hash_map::end
const_iterator end() const
Definition: concurrent_hash_map.hpp:2191