PMDK C++ bindings  1.7
This is the C++ bindings documentation for PMDK's libpmemobj.
concurrent_hash_map.hpp
1 /*
2  * Copyright 2019, Intel Corporation
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  *
8  * * Redistributions of source code must retain the above copyright
9  * notice, this list of conditions and the following disclaimer.
10  *
11  * * Redistributions in binary form must reproduce the above copyright
12  * notice, this list of conditions and the following disclaimer in
13  * the documentation and/or other materials provided with the
14  * distribution.
15  *
16  * * Neither the name of the copyright holder nor the names of its
17  * contributors may be used to endorse or promote products derived
18  * from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
38 #ifndef PMEMOBJ_CONCURRENT_HASH_MAP_HPP
39 #define PMEMOBJ_CONCURRENT_HASH_MAP_HPP
40 
46 #include <libpmemobj++/mutex.hpp>
47 #include <libpmemobj++/p.hpp>
50 
51 #if LIBPMEMOBJ_CPP_USE_TBB_RW_MUTEX
52 #include "tbb/spin_rw_mutex.h"
53 #else
55 #endif
56 
57 #include <libpmemobj++/detail/persistent_pool_ptr.hpp>
58 
59 #include <atomic>
60 #include <cassert>
61 #include <functional>
62 #include <initializer_list>
63 #include <iterator> // for std::distance
64 #include <memory>
65 #include <mutex>
66 #include <thread>
67 #include <type_traits>
68 
69 #if _MSC_VER
70 #include <intrin.h>
71 #include <windows.h>
72 #endif
73 
74 namespace std
75 {
79 template <typename T>
80 struct hash<pmem::obj::p<T>> {
81  size_t
82  operator()(const pmem::obj::p<T> &x) const
83  {
84  return hash<T>()(x.get_ro());
85  }
86 };
87 } /* namespace std */
88 
89 namespace pmem
90 {
91 namespace obj
92 {
93 namespace experimental
94 {
95 
96 using namespace pmem::obj;
97 
98 template <typename Key, typename T, typename Hash = std::hash<Key>,
99  typename KeyEqual = std::equal_to<Key>>
100 class concurrent_hash_map;
101 
103 namespace internal
104 {
105 template <typename Hash>
106 using transparent_key_equal = typename Hash::transparent_key_equal;
107 
108 template <typename Hash>
109 using has_transparent_key_equal = detail::supports<Hash, transparent_key_equal>;
110 
111 template <typename Hash, typename Pred,
112  bool = has_transparent_key_equal<Hash>::value>
113 struct key_equal_type {
114  using type = typename Hash::transparent_key_equal;
115 };
116 
117 template <typename Hash, typename Pred>
118 struct key_equal_type<Hash, Pred, false> {
119  using type = Pred;
120 };
121 
122 template <typename Mutex>
123 void
124 assert_not_locked(Mutex &mtx)
125 {
126 #ifndef NDEBUG
127  assert(mtx.try_lock());
128  mtx.unlock();
129 #else
130  (void)mtx;
131 #endif
132 }
133 
134 template <typename Mutex>
135 void
136 assert_not_locked(pmem::obj::experimental::v<Mutex> &mtx)
137 {
138  assert_not_locked<Mutex>(mtx.get());
139 }
140 
141 #if _MSC_VER
142 static inline int
143 Log2(uint64_t x)
144 {
145  unsigned long j;
146  _BitScanReverse64(&j, x);
147  return static_cast<int>(j);
148 }
149 #elif __GNUC__ || __clang__
150 static inline int
151 Log2(uint64_t x)
152 {
153  // __builtin_clz builtin count _number_ of leading zeroes
154  return 8 * int(sizeof(x)) - __builtin_clzll(x) - 1;
155 }
156 #else
157 static inline int
158 Log2(uint64_t x)
159 {
160  x |= (x >> 1);
161  x |= (x >> 2);
162  x |= (x >> 4);
163  x |= (x >> 8);
164  x |= (x >> 16);
165  x |= (x >> 32);
166 
167  static const int table[64] = {
168  0, 58, 1, 59, 47, 53, 2, 60, 39, 48, 27, 54, 33, 42, 3, 61,
169  51, 37, 40, 49, 18, 28, 20, 55, 30, 34, 11, 43, 14, 22, 4, 62,
170  57, 46, 52, 38, 26, 32, 41, 50, 36, 17, 19, 29, 10, 13, 21, 56,
171  45, 25, 31, 35, 16, 9, 12, 44, 24, 15, 8, 23, 7, 6, 5, 63};
172 
173  return table[(x * 0x03f6eaf2cd271461) >> 58];
174 }
175 #endif
176 
177 class atomic_backoff {
183  static const int32_t LOOPS_BEFORE_YIELD = 16;
184  int32_t count;
185 
186  static inline void
187  __pause(int32_t delay)
188  {
189  for (; delay > 0; --delay) {
190 #if _MSC_VER
191  YieldProcessor();
192 #elif __GNUC__ && (__i386__ || __x86_64__)
193  // Only i386 and x86-64 have pause instruction
194  __builtin_ia32_pause();
195 #endif
196  }
197  }
198 
199 public:
203  atomic_backoff(const atomic_backoff &) = delete;
207  atomic_backoff &operator=(const atomic_backoff &) = delete;
208 
210  /* In many cases, an object of this type is initialized eagerly on hot
211  * path, as in for(atomic_backoff b; ; b.pause()) {...} For this reason,
212  * the construction cost must be very small! */
213  atomic_backoff() : count(1)
214  {
215  }
216 
220  atomic_backoff(bool) : count(1)
221  {
222  pause();
223  }
224 
228  void
229  pause()
230  {
231  if (count <= LOOPS_BEFORE_YIELD) {
232  __pause(count);
233  /* Pause twice as long the next time. */
234  count *= 2;
235  } else {
236  /* Pause is so long that we might as well yield CPU to
237  * scheduler. */
238  std::this_thread::yield();
239  }
240  }
241 
245  bool
246  bounded_pause()
247  {
248  __pause(count);
249  if (count < LOOPS_BEFORE_YIELD) {
250  /* Pause twice as long the next time. */
251  count *= 2;
252  return true;
253  } else {
254  return false;
255  }
256  }
257 
258  void
259  reset()
260  {
261  count = 1;
262  }
263 }; /* class atomic_backoff */
264 
269 template <typename T, typename InitFunctor>
270 class atomic_wrapper {
271 public:
272  using value_type = T;
273  using atomic_type = std::atomic<value_type>;
274  using init_type = InitFunctor;
275 
276  atomic_wrapper() noexcept
277  {
278  }
279 
280  atomic_wrapper(const value_type &val) noexcept : my_atomic(val)
281  {
282  }
283 
284  template <typename... Args>
285  atomic_wrapper(Args &&... args)
286  : my_atomic(init_type()(std::forward<Args>(args)...))
287  {
288  }
289 
290  operator atomic_type &() noexcept
291  {
292  return my_atomic;
293  }
294 
295 private:
296  atomic_type my_atomic;
297 };
298 
303 template <typename T, typename U, typename... Args>
304 void
305 make_persistent_object(pool_base &pop, persistent_ptr<U> &ptr, Args &&... args)
306 {
307 #if LIBPMEMOBJ_CPP_CONCURRENT_HASH_MAP_USE_ATOMIC_ALLOCATOR
308  make_persistent_atomic<T>(pop, ptr, std::forward<Args>(args)...);
309 #else
310  try {
311  transaction::manual tx(pop);
312  ptr = make_persistent<T>(std::forward<Args>(args)...);
314  } catch (...) {
315  throw std::bad_alloc();
316  }
317 #endif
318 }
319 
320 #if !LIBPMEMOBJ_CPP_USE_TBB_RW_MUTEX
321 class shared_mutex_scoped_lock {
322  using rw_mutex_type = pmem::obj::shared_mutex;
323 
324 public:
325  shared_mutex_scoped_lock(const shared_mutex_scoped_lock &) = delete;
326  shared_mutex_scoped_lock &
327  operator=(const shared_mutex_scoped_lock &) = delete;
328 
330  shared_mutex_scoped_lock() : mutex(nullptr), is_writer(false)
331  {
332  }
333 
335  shared_mutex_scoped_lock(rw_mutex_type &m, bool write = true)
336  : mutex(nullptr)
337  {
338  acquire(m, write);
339  }
340 
342  ~shared_mutex_scoped_lock()
343  {
344  if (mutex)
345  release();
346  }
347 
349  void
350  acquire(rw_mutex_type &m, bool write = true)
351  {
352  is_writer = write;
353  mutex = &m;
354  if (write)
355  mutex->lock();
356  else
357  mutex->lock_shared();
358  }
359 
368  bool
369  upgrade_to_writer()
370  {
371  assert(!is_writer);
372  mutex->unlock_shared();
373  is_writer = true;
374  mutex->lock();
375  return false;
376  }
377 
381  void
382  release()
383  {
384  assert(mutex);
385  rw_mutex_type *m = mutex;
386  mutex = nullptr;
387  if (is_writer) {
388  m->unlock();
389  } else {
390  m->unlock_shared();
391  }
392  }
393 
400  bool
401  downgrade_to_reader()
402  {
403  assert(is_writer);
404  return false;
405  }
406 
411  bool
412  try_acquire(rw_mutex_type &m, bool write = true)
413  {
414  assert(!mutex);
415  bool result;
416  is_writer = write;
417  result = write ? m.try_lock() : m.try_lock_shared();
418  if (result)
419  mutex = &m;
420  return result;
421  }
422 
423 protected:
428  rw_mutex_type *mutex;
433  bool is_writer;
434 }; /* class shared_mutex_scoped_lock */
435 #endif
436 
437 struct hash_map_node_base {
438 #if LIBPMEMOBJ_CPP_USE_TBB_RW_MUTEX
439 
441 
443  using scoped_t = tbb::spin_rw_mutex::scoped_lock;
444 #else
445 
446  using mutex_t = pmem::obj::shared_mutex;
447 
449  using scoped_t = shared_mutex_scoped_lock;
450 #endif
451 
453  using node_base_ptr_t = detail::persistent_pool_ptr<hash_map_node_base>;
454 
456  node_base_ptr_t next;
457 
459  mutex_t mutex;
460 
461  hash_map_node_base() : next(OID_NULL)
462  {
463  }
464 
465  hash_map_node_base(const node_base_ptr_t &_next) : next(_next)
466  {
467  }
468 
469  hash_map_node_base(node_base_ptr_t &&_next) : next(std::move(_next))
470  {
471  }
472 
474  hash_map_node_base(const hash_map_node_base &) = delete;
475 
477  hash_map_node_base &operator=(const hash_map_node_base &) = delete;
478 }; /* struct hash_map_node_base */
479 
481 static detail::persistent_pool_ptr<hash_map_node_base> const empty_bucket =
482  OID_NULL;
483 
488 template <typename Bucket>
489 class segment_traits {
490 public:
492  using segment_index_t = size_t;
493  using size_type = size_t;
494  using bucket_type = Bucket;
495 
496 protected:
498  constexpr static size_type max_allocation_size = PMEMOBJ_MAX_ALLOC_SIZE;
499 
501  constexpr static segment_index_t first_big_block = 27;
502  /* TODO: avoid hardcoded value; need constexpr similar to:
503  * Log2(max_allocation_size / sizeof(bucket_type)) */
504 
506  constexpr static size_type big_block_size = size_type(1)
507  << first_big_block;
508 
509  /* Block size in bytes cannot exceed max_allocation_size */
510  static_assert((big_block_size * sizeof(bucket_type)) <
511  max_allocation_size,
512  "Block size exceeds max_allocation_size");
513 
515  constexpr static segment_index_t
516  first_block_in_segment(segment_index_t seg)
517  {
518  return seg < first_big_block
519  ? seg
520  : (first_big_block +
521  (segment_index_t(1) << (seg - first_big_block)) - 1);
522  }
523 
525  constexpr static size_type
526  blocks_in_segment(segment_index_t seg)
527  {
528  return seg < first_big_block
529  ? segment_index_t(1)
530  : segment_index_t(1) << (seg - first_big_block);
531  }
532 
534  constexpr static size_type
535  block_size(segment_index_t b)
536  {
537  return b < first_big_block ? segment_size(b ? b : 1)
538  : big_block_size;
539  }
540 
541 public:
543  constexpr static segment_index_t embedded_segments = 1;
544 
546  constexpr static size_type embedded_buckets = 1 << embedded_segments;
547 
549  constexpr static segment_index_t number_of_segments = 32;
550 
552  static const size_type first_block = 8;
553 
555  constexpr static segment_index_t
556  number_of_blocks()
557  {
558  return first_block_in_segment(number_of_segments);
559  }
560 
562  static segment_index_t
563  segment_index_of(size_type index)
564  {
565  return segment_index_t(Log2(index | 1));
566  }
567 
569  constexpr static segment_index_t
570  segment_base(segment_index_t k)
571  {
572  return (segment_index_t(1) << k) & ~segment_index_t(1);
573  }
574 
576  constexpr static size_type
577  segment_size(segment_index_t k)
578  {
579  return size_type(1) << k; // fake value for k == 0
580  }
581  static_assert(
582  embedded_segments < first_big_block,
583  "Number of embedded segments cannot exceed max_allocation_size");
584 }; /* End of class segment_traits */
585 
602 template <typename BlockTable, typename SegmentTraits, bool is_const>
603 class segment_facade_impl : public SegmentTraits {
604 private:
605  using traits_type = SegmentTraits;
606  using traits_type::block_size;
607  using traits_type::blocks_in_segment;
608  using traits_type::embedded_buckets;
609  using traits_type::embedded_segments;
610  using traits_type::first_block;
611  using traits_type::first_block_in_segment;
612  using traits_type::segment_base;
613  using traits_type::segment_size;
614 
615 public:
616  using table_reference =
617  typename std::conditional<is_const, const BlockTable &,
618  BlockTable &>::type;
619 
620  using table_pointer =
621  typename std::conditional<is_const, const BlockTable *,
622  BlockTable *>::type;
623 
624  using bucket_type = typename traits_type::bucket_type;
625  using segment_index_t = typename traits_type::segment_index_t;
626  using size_type = typename traits_type::size_type;
627 
629  segment_facade_impl(table_reference table, segment_index_t s)
630  : my_table(&table), my_seg(s)
631  {
632  assert(my_seg < traits_type::number_of_segments);
633  }
634 
636  segment_facade_impl(const segment_facade_impl &src)
637  : my_table(src.my_table), my_seg(src.my_seg)
638  {
639  }
640 
641  segment_facade_impl(segment_facade_impl &&src) = default;
642 
644  segment_facade_impl &
645  operator=(const segment_facade_impl &src)
646  {
647  my_table = src.my_table;
648  my_seg = src.my_seg;
649  return *this;
650  }
651 
653  segment_facade_impl &
654  operator=(segment_facade_impl &&src)
655  {
656  my_table = src.my_table;
657  my_seg = src.my_seg;
658  return *this;
659  }
660 
667  bucket_type &operator[](size_type i) const
668  {
669  assert(i < size());
670 
671  segment_index_t table_block = first_block_in_segment(my_seg);
672  size_type b_size = block_size(table_block);
673 
674  table_block += i / b_size;
675  i = i % b_size;
676 
677  return (*my_table)[table_block][static_cast<std::ptrdiff_t>(i)];
678  }
679 
683  segment_facade_impl &
684  operator++()
685  {
686  ++my_seg;
687  return *this;
688  }
689 
693  segment_facade_impl
694  operator++(int)
695  {
696  segment_facade_impl tmp = *this;
697  ++(*this);
698  return tmp;
699  }
700 
704  segment_facade_impl &
705  operator--()
706  {
707  --my_seg;
708  return *this;
709  }
710 
714  segment_facade_impl
715  operator--(int)
716  {
717  segment_facade_impl tmp = *this;
718  --(*this);
719  return tmp;
720  }
721 
725  segment_facade_impl &
726  operator+=(segment_index_t off)
727  {
728  my_seg += off;
729  return *this;
730  }
731 
735  segment_facade_impl &
736  operator-=(segment_index_t off)
737  {
738  my_seg -= off;
739  return *this;
740  }
741 
745  segment_facade_impl
746  operator+(segment_index_t off) const
747  {
748  return segment_facade_impl(*(this->my_table),
749  this->my_seg + off);
750  }
751 
755  segment_facade_impl
756  operator-(segment_index_t off) const
757  {
758  return segment_facade_impl(*(this->my_table),
759  this->my_seg - off);
760  }
761 
765  void
766  enable(pool_base &pop)
767  {
768  assert(my_seg >= embedded_segments);
769 
770  if (my_seg < first_block) {
771  enable_first_block(pop);
772  } else {
773  enable_big_segment(pop);
774  }
775  }
776 
780  void
781  disable()
782  {
783  assert(my_seg >= embedded_segments);
784 
785  if (my_seg < first_block) {
786  if (my_seg == embedded_segments) {
787  size_type sz = segment_size(first_block) -
788  embedded_buckets;
789  delete_persistent<bucket_type[]>(
790  (*my_table)[my_seg], sz);
791  }
792  (*my_table)[my_seg] = nullptr;
793  } else {
794  block_range blocks = segment_blocks(my_seg);
795 
796  for (segment_index_t b = blocks.first;
797  b < blocks.second; ++b) {
798  if ((*my_table)[b] != nullptr) {
799  delete_persistent<bucket_type[]>(
800  (*my_table)[b], block_size(b));
801  (*my_table)[b] = nullptr;
802  }
803  }
804  }
805  }
806 
810  constexpr size_type
811  size() const
812  {
813  return segment_size(my_seg ? my_seg : 1);
814  }
815 
821  bool
822  is_valid() const
823  {
824  block_range blocks = segment_blocks(my_seg);
825 
826  for (segment_index_t b = blocks.first; b < blocks.second; ++b) {
827  if ((*my_table)[b] == nullptr)
828  return false;
829  }
830 
831  return true;
832  }
833 
834 private:
835  using block_range = std::pair<segment_index_t, segment_index_t>;
836 
840  static block_range
841  segment_blocks(segment_index_t seg)
842  {
843  segment_index_t begin = first_block_in_segment(seg);
844 
845  return block_range(begin, begin + blocks_in_segment(seg));
846  }
847 
848  void
849  enable_first_block(pool_base &pop)
850  {
851  assert(my_seg == embedded_segments);
852  try {
853  transaction::manual tx(pop);
854 
855  size_type sz =
856  segment_size(first_block) - embedded_buckets;
857  (*my_table)[my_seg] =
858  make_persistent<bucket_type[]>(sz);
859 
861  (*my_table)[embedded_segments].raw();
862 
863  for (segment_index_t s = my_seg + 1; s < first_block;
864  ++s) {
865  std::ptrdiff_t off =
866  static_cast<std::ptrdiff_t>(
867  segment_base(s) -
868  segment_base(my_seg));
869 
870  (*my_table)[s] = (base + off).raw();
871  }
872 
874  } catch (...) {
875  throw std::bad_alloc();
876  }
877  }
878 
879  void
880  enable_big_segment(pool_base &pop)
881  {
882  block_range blocks = segment_blocks(my_seg);
883 #if LIBPMEMOBJ_CPP_CONCURRENT_HASH_MAP_USE_ATOMIC_ALLOCATOR
884  for (segment_index_t b = blocks.first; b < blocks.second; ++b) {
885  if ((*my_table)[b] == nullptr)
886  make_persistent_atomic<bucket_type[]>(
887  pop, (*my_table)[b], block_size(b));
888  }
889 #else
890  try {
891  transaction::manual tx(pop);
892 
893  for (segment_index_t b = blocks.first;
894  b < blocks.second; ++b) {
895  assert((*my_table)[b] == nullptr);
896  (*my_table)[b] = make_persistent<bucket_type[]>(
897  block_size(b));
898  }
899 
901  } catch (...) {
902  throw std::bad_alloc();
903  }
904 #endif
905  }
906 
908  table_pointer my_table;
909 
911  segment_index_t my_seg;
912 }; /* End of class segment_facade_impl */
913 
918 class hash_map_base {
919 public:
921  using size_type = size_t;
922 
924  using hashcode_t = size_t;
925 
927  using node_base = hash_map_node_base;
928 
930  using node_base_ptr_t = detail::persistent_pool_ptr<node_base>;
931 
933  using tmp_node_ptr_t = persistent_ptr<node_base>;
934 
936  struct bucket {
937 #if LIBPMEMOBJ_CPP_USE_TBB_RW_MUTEX
938 
940 
942  using scoped_t = tbb::spin_rw_mutex::scoped_lock;
943 #else
944 
945  using mutex_t = pmem::obj::shared_mutex;
946 
948  using scoped_t = shared_mutex_scoped_lock;
949 #endif
950 
952  mutex_t mutex;
953 
955  p<std::atomic<bool>> rehashed;
956 
958  node_base_ptr_t node_list;
959 
961  tmp_node_ptr_t tmp_node;
962 
964  bucket() : node_list(empty_bucket), tmp_node(nullptr)
965  {
966 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
967  VALGRIND_HG_DISABLE_CHECKING(&rehashed,
968  sizeof(rehashed));
969 #endif
970  rehashed.get_rw() = false;
971  }
972 
978  bool
979  is_rehashed(std::memory_order order)
980  {
981  return rehashed.get_ro().load(order);
982  }
983 
984  void
985  set_rehashed(std::memory_order order)
986  {
987  rehashed.get_rw().store(true, order);
988  }
989 
991  bucket(const bucket &) = delete;
992 
994  bucket &operator=(const bucket &) = delete;
995  }; /* End of struct bucket */
996 
998  using segment_traits_t = segment_traits<bucket>;
999 
1001  using segment_index_t = typename segment_traits_t::segment_index_t;
1002 
1004  static const size_type embedded_buckets =
1005  segment_traits_t::embedded_buckets;
1006 
1008  static const size_type first_block = segment_traits_t::first_block;
1009 
1011  constexpr static size_type block_table_size =
1012  segment_traits_t::number_of_blocks();
1013 
1015  using segment_ptr_t = persistent_ptr<bucket[]>;
1016 
1018  using bucket_ptr_t = persistent_ptr<bucket>;
1019 
1021  using blocks_table_t = segment_ptr_t[block_table_size];
1022 
1023  class calculate_mask {
1024  public:
1025  hashcode_t
1026  operator()(const hash_map_base *map_base) const
1027  {
1028  return map_base->calculate_mask();
1029  }
1030  }; /* class mask_initializer */
1031 
1033  using mask_type = v<atomic_wrapper<hashcode_t, calculate_mask>>;
1034 
1036  p<uint64_t> my_pool_uuid;
1037 
1039  /* my_mask always restored on restart. */
1040  mask_type my_mask;
1041 
1046  blocks_table_t my_table;
1047 
1048  /* It must be in separate cache line from my_mask due to performance
1049  * effects */
1051  p<std::atomic<size_type>> my_size;
1052 
1054  bucket my_embedded_segment[embedded_buckets];
1055 
1057  using segment_enable_mutex_t = pmem::obj::mutex;
1058 
1060  segment_enable_mutex_t my_segment_enable_mutex;
1061 
1063  static bool
1064  is_valid(void *ptr)
1065  {
1066  return reinterpret_cast<uintptr_t>(ptr) > uintptr_t(63);
1067  }
1068 
1069  template <typename U>
1070  static bool
1071  is_valid(const detail::persistent_pool_ptr<U> &ptr)
1072  {
1073  return ptr.raw() > uint64_t(63);
1074  }
1075 
1076  template <typename U>
1077  static bool
1078  is_valid(const persistent_ptr<U> &ptr)
1079  {
1080  return ptr.raw().off > uint64_t(63);
1081  }
1082 
1083  const std::atomic<hashcode_t> &
1084  mask() const noexcept
1085  {
1086  return const_cast<mask_type &>(my_mask).get(this);
1087  }
1088 
1089  std::atomic<hashcode_t> &
1090  mask() noexcept
1091  {
1092  return my_mask.get(this);
1093  }
1094 
1096  using const_segment_facade_t =
1097  segment_facade_impl<blocks_table_t, segment_traits_t, true>;
1098 
1100  using segment_facade_t =
1101  segment_facade_impl<blocks_table_t, segment_traits_t, false>;
1102 
1104  hash_map_base()
1105  {
1106  static_assert(
1107  sizeof(size_type) == sizeof(std::atomic<size_type>),
1108  "std::atomic should have the same layout as underlying integral type");
1109 
1110 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
1111  VALGRIND_HG_DISABLE_CHECKING(&my_size, sizeof(my_size));
1112  VALGRIND_HG_DISABLE_CHECKING(&my_mask, sizeof(my_mask));
1113 #endif
1114 
1115  my_size.get_rw() = 0;
1116  PMEMoid oid = pmemobj_oid(this);
1117 
1118  assert(!OID_IS_NULL(oid));
1119 
1120  my_pool_uuid = oid.pool_uuid_lo;
1121 
1122  pool_base pop = get_pool_base();
1123  /* enable embedded segments */
1124  for (size_type i = 0; i < segment_traits_t::embedded_segments;
1125  ++i) {
1126  my_table[i] =
1127  pmemobj_oid(my_embedded_segment +
1128  segment_traits_t::segment_base(i));
1129  segment_facade_t seg(my_table, i);
1130  mark_rehashed<false>(pop, seg);
1131  }
1132 
1133  assert(mask() == embedded_buckets - 1);
1134  }
1135 
1139  hashcode_t
1140  calculate_mask() const
1141  {
1142 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
1143  VALGRIND_HG_DISABLE_CHECKING(&my_size, sizeof(my_size));
1144  VALGRIND_HG_DISABLE_CHECKING(&my_mask, sizeof(my_mask));
1145 #endif
1146  hashcode_t m = embedded_buckets - 1;
1147 
1148  const_segment_facade_t segment(
1149  my_table, segment_traits_t::embedded_segments);
1150 
1151  while (segment.is_valid()) {
1152  m += segment.size();
1153  ++segment;
1154  }
1155  return m;
1156  }
1157 
1158  void
1159  restore_size(size_type actual_size)
1160  {
1161  my_size.get_rw().store(actual_size, std::memory_order_relaxed);
1162  pool_base pop = get_pool_base();
1163  pop.persist(my_size);
1164  }
1165 
1169  template <bool Flush = true>
1170  void
1171  mark_rehashed(pool_base &pop, segment_facade_t &segment)
1172  {
1173  for (size_type i = 0; i < segment.size(); ++i) {
1174  bucket *b = &(segment[i]);
1175 
1176  assert_not_locked(b->mutex);
1177 
1178  b->set_rehashed(std::memory_order_relaxed);
1179  }
1180 
1181  if (Flush) {
1182  /* Flush in separate loop to avoid read-after-flush */
1183  for (size_type i = 0; i < segment.size(); ++i) {
1184  bucket *b = &(segment[i]);
1185  pop.flush(b->rehashed);
1186  }
1187 
1188  pop.drain();
1189  }
1190  }
1191 
1195  void
1196  add_to_bucket(bucket *b, pool_base &pop)
1197  {
1198  assert(b->is_rehashed(std::memory_order_relaxed) == true);
1199  assert(is_valid(b->tmp_node));
1200  assert(b->tmp_node->next == b->node_list);
1201 
1202  b->node_list = b->tmp_node; /* bucket is locked */
1203  pop.persist(&(b->node_list), sizeof(b->node_list));
1204  }
1205 
1209  void
1210  enable_segment(segment_index_t k, bool is_initial = false)
1211  {
1212  assert(k);
1213 
1214  pool_base pop = get_pool_base();
1215  size_type sz;
1216 
1217  if (k >= first_block) {
1218  segment_facade_t new_segment(my_table, k);
1219 
1220  sz = new_segment.size();
1221  if (!new_segment.is_valid())
1222  new_segment.enable(pop);
1223 
1224  if (is_initial) {
1225  mark_rehashed(pop, new_segment);
1226  }
1227 
1228  /* double it to get entire capacity of the container */
1229  sz <<= 1;
1230  } else {
1231  /* the first block */
1232  assert(k == segment_traits_t::embedded_segments);
1233 
1234  for (segment_index_t i = k; i < first_block; ++i) {
1235  segment_facade_t new_segment(my_table, i);
1236 
1237  if (!new_segment.is_valid())
1238  new_segment.enable(pop);
1239 
1240  if (is_initial) {
1241  mark_rehashed(pop, new_segment);
1242  }
1243  }
1244 
1245  sz = segment_traits_t::segment_size(first_block);
1246  }
1247 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
1248  ANNOTATE_HAPPENS_BEFORE(&my_mask);
1249 #endif
1250  mask().store(sz - 1, std::memory_order_release);
1251  }
1252 
1257  bucket *
1258  get_bucket(hashcode_t h) const
1259  {
1260  segment_index_t s = segment_traits_t::segment_index_of(h);
1261 
1262  h -= segment_traits_t::segment_base(s);
1263 
1264  const_segment_facade_t segment(my_table, s);
1265 
1266  assert(segment.is_valid());
1267 
1268  return &(segment[h]);
1269  }
1270 
1274  inline bool
1275  check_mask_race(hashcode_t h, hashcode_t &m) const
1276  {
1277  hashcode_t m_now, m_old = m;
1278 
1279  m_now = mask().load(std::memory_order_acquire);
1280 
1281  if (m_old != m_now)
1282  return check_rehashing_collision(h, m_old, m = m_now);
1283 
1284  return false;
1285  }
1286 
1290  bool
1291  check_rehashing_collision(hashcode_t h, hashcode_t m_old,
1292  hashcode_t m) const
1293  {
1294  assert(m_old != m);
1295 
1296  if ((h & m_old) != (h & m)) {
1297  /* mask changed for this hashcode, rare event condition
1298  * above proves that 'h' has some other bits set beside
1299  * 'm_old', find next applicable mask after m_old */
1300 
1301  for (++m_old; !(h & m_old); m_old <<= 1)
1302  ;
1303 
1304  m_old = (m_old << 1) - 1; /* get full mask from a bit */
1305 
1306  assert((m_old & (m_old + 1)) == 0 && m_old <= m);
1307 
1308  /* check whether it is rehashing/ed */
1309  bucket *b = get_bucket(h & m_old);
1310  return b->is_rehashed(std::memory_order_acquire);
1311  }
1312 
1313  return false;
1314  }
1315 
1319  void
1320  correct_bucket(bucket *b)
1321  {
1322  pool_base pop = get_pool_base();
1323 
1324  if (is_valid(b->tmp_node)) {
1325  if (b->tmp_node->next == b->node_list) {
1326  insert_new_node(pop, b);
1327  } else {
1328  b->tmp_node.raw_ptr()->off = 0;
1329  pop.persist(&(b->tmp_node.raw().off),
1330  sizeof(b->tmp_node.raw().off));
1331  }
1332  }
1333  }
1334 
1339  size_type
1340  insert_new_node(pool_base &pop, bucket *b)
1341  {
1342  add_to_bucket(b, pop);
1343 
1344  /* prefix form is to enforce allocation after the first item
1345  * inserted */
1346  size_t sz = ++(my_size.get_rw());
1347  pop.persist(&my_size, sizeof(my_size));
1348 
1349  b->tmp_node.raw_ptr()->off = 0;
1350  pop.persist(&(b->tmp_node.raw().off),
1351  sizeof(b->tmp_node.raw().off));
1352 
1353  return sz;
1354  }
1355 
1360  bool
1361  check_growth(hashcode_t m, size_type sz)
1362  {
1363  if (sz >= m) {
1364  segment_index_t new_seg = static_cast<segment_index_t>(
1365  Log2(m + 1)); /* optimized segment_index_of */
1366 
1367  assert(segment_facade_t(my_table, new_seg - 1)
1368  .is_valid());
1369 
1370  std::unique_lock<segment_enable_mutex_t> lock(
1371  my_segment_enable_mutex, std::try_to_lock);
1372 
1373  if (lock) {
1374  if (mask().load(std::memory_order_relaxed) ==
1375  m) {
1376  /* Otherwise, other thread enable this
1377  * segment */
1378  enable_segment(new_seg);
1379 
1380  return true;
1381  }
1382  }
1383  }
1384 
1385  return false;
1386  }
1387 
1391  void
1392  reserve(size_type buckets)
1393  {
1394  if (buckets == 0)
1395  return;
1396 
1397  --buckets;
1398 
1399  bool is_initial = (my_size.get_ro() == 0);
1400 
1401  for (size_type m = mask(); buckets > m; m = mask())
1402  enable_segment(
1403  segment_traits_t::segment_index_of(m + 1),
1404  is_initial);
1405  }
1406 
1411  void
1412  internal_swap(hash_map_base &table)
1413  {
1414  pool_base p = get_pool_base();
1415  try {
1416  transaction::manual tx(p);
1417 
1418  this->my_pool_uuid.swap(table.my_pool_uuid);
1419  /* Swap my_mask */
1420  this->mask() = table.mask().exchange(
1421  this->mask(), std::memory_order_relaxed);
1422 
1423  /* Swap my_size */
1424  this->my_size.get_rw() =
1425  table.my_size.get_rw().exchange(
1426  this->my_size.get_ro(),
1427  std::memory_order_relaxed);
1428 
1429  for (size_type i = 0; i < embedded_buckets; ++i)
1430  this->my_embedded_segment[i].node_list.swap(
1431  table.my_embedded_segment[i].node_list);
1432 
1433  for (size_type i = segment_traits_t::embedded_segments;
1434  i < block_table_size; ++i)
1435  this->my_table[i].swap(table.my_table[i]);
1436 
1438  } catch (const pmem::transaction_error &e) {
1439  throw std::runtime_error(e);
1440  }
1441  }
1442 
1447  pool_base
1448  get_pool_base()
1449  {
1450  PMEMobjpool *pop =
1451  pmemobj_pool_by_oid(PMEMoid{my_pool_uuid, 0});
1452 
1453  return pool_base(pop);
1454  }
1455 }; /* End of class hash_map_base */
1456 
1462 template <typename Container, bool is_const>
1463 class hash_map_iterator {
1464 public:
1465  using iterator_category = std::forward_iterator_tag;
1466  using difference_type = ptrdiff_t;
1467  using map_type = Container;
1468  using value_type = typename map_type::value_type;
1469  using node = typename map_type::node;
1470  using bucket = typename map_type::bucket;
1471  using map_ptr = typename std::conditional<is_const, const map_type *,
1472  map_type *>::type;
1473  using reference =
1474  typename std::conditional<is_const,
1475  typename map_type::const_reference,
1476  typename map_type::reference>::type;
1477  using pointer =
1478  typename std::conditional<is_const,
1479  typename map_type::const_pointer,
1480  typename map_type::pointer>::type;
1481 
1482  template <typename C, bool M, bool U>
1483  friend bool operator==(const hash_map_iterator<C, M> &i,
1484  const hash_map_iterator<C, U> &j);
1485 
1486  template <typename C, bool M, bool U>
1487  friend bool operator!=(const hash_map_iterator<C, M> &i,
1488  const hash_map_iterator<C, U> &j);
1489 
1490  friend class hash_map_iterator<map_type, true>;
1491 
1492 #if !defined(_MSC_VER) || defined(__INTEL_COMPILER)
1493 private:
1494  template <typename Key, typename T, typename Hash, typename KeyEqual>
1495  friend class experimental::concurrent_hash_map;
1496 #else
1497 public: /* workaround */
1498 #endif
1499  hash_map_iterator(map_ptr map, size_t index)
1500  : my_map(map), my_index(index), my_bucket(nullptr), my_node(nullptr)
1501  {
1502  if (my_index <= my_map->mask()) {
1503  bucket_accessor acc(my_map, my_index);
1504  my_bucket = acc.get();
1505  my_node = static_cast<node *>(
1506  my_bucket->node_list.get(my_map->my_pool_uuid));
1507 
1508  if (!hash_map_base::is_valid(my_node)) {
1509  advance_to_next_bucket();
1510  }
1511  }
1512  }
1513 
1514 public:
1516  hash_map_iterator() : my_map(), my_index(), my_bucket(), my_node()
1517  {
1518  }
1519 
1521  hash_map_iterator(const hash_map_iterator &other)
1522  : my_map(other.my_map),
1523  my_index(other.my_index),
1524  my_bucket(other.my_bucket),
1525  my_node(other.my_node)
1526  {
1527  }
1528 
1530  template <typename U = void,
1531  typename = typename std::enable_if<is_const, U>::type>
1532  hash_map_iterator(const hash_map_iterator<map_type, false> &other)
1533  : my_map(other.my_map),
1534  my_index(other.my_index),
1535  my_bucket(other.my_bucket),
1536  my_node(other.my_node)
1537  {
1538  }
1539 
1541  reference operator*() const
1542  {
1543  assert(hash_map_base::is_valid(my_node));
1544  return my_node->item;
1545  }
1546 
1548  pointer operator->() const
1549  {
1550  return &operator*();
1551  }
1552 
1554  hash_map_iterator &
1555  operator++()
1556  {
1557  my_node = static_cast<node *>(
1558  my_node->next.get((my_map->my_pool_uuid)));
1559 
1560  if (!my_node)
1561  advance_to_next_bucket();
1562 
1563  return *this;
1564  }
1565 
1567  hash_map_iterator
1568  operator++(int)
1569  {
1570  hash_map_iterator old(*this);
1571  operator++();
1572  return old;
1573  }
1574 
1575 private:
1577  map_ptr my_map;
1578 
1580  size_t my_index;
1581 
1583  bucket *my_bucket;
1584 
1586  node *my_node;
1587 
1588  class bucket_accessor {
1589  public:
1590  bucket_accessor(map_ptr m, size_t index)
1591  {
1592  my_bucket = m->get_bucket(index);
1593  const_cast<map_type *>(m)->correct_bucket(my_bucket);
1594  }
1595 
1596  bucket *
1597  get() const
1598  {
1599  return my_bucket;
1600  }
1601 
1602  private:
1603  bucket *my_bucket;
1604  };
1605 
1606  void
1607  advance_to_next_bucket()
1608  {
1609  size_t k = my_index + 1;
1610 
1611  assert(my_bucket);
1612 
1613  while (k <= my_map->mask()) {
1614  bucket_accessor acc(my_map, k);
1615  my_bucket = acc.get();
1616 
1617  if (hash_map_base::is_valid(my_bucket->node_list)) {
1618  my_node = static_cast<node *>(
1619  my_bucket->node_list.get(
1620  my_map->my_pool_uuid));
1621 
1622  my_index = k;
1623 
1624  return;
1625  }
1626 
1627  ++k;
1628  }
1629 
1630  my_bucket = 0;
1631  my_node = 0;
1632  my_index = k;
1633  }
1634 };
1635 
1636 template <typename Container, bool M, bool U>
1637 bool
1638 operator==(const hash_map_iterator<Container, M> &i,
1639  const hash_map_iterator<Container, U> &j)
1640 {
1641  return i.my_node == j.my_node && i.my_map == j.my_map;
1642 }
1643 
1644 template <typename Container, bool M, bool U>
1645 bool
1646 operator!=(const hash_map_iterator<Container, M> &i,
1647  const hash_map_iterator<Container, U> &j)
1648 {
1649  return i.my_node != j.my_node || i.my_map != j.my_map;
1650 }
1651 
1652 } /* namespace internal */
1658 template <typename Key, typename T, typename Hash, typename KeyEqual>
1659 class concurrent_hash_map : protected internal::hash_map_base {
1660  template <typename Container, bool is_const>
1661  friend class internal::hash_map_iterator;
1662 
1663 public:
1664  using key_type = Key;
1665  using mapped_type = T;
1666  using value_type = std::pair<const Key, T>;
1667  using size_type = hash_map_base::size_type;
1668  using difference_type = ptrdiff_t;
1669  using pointer = value_type *;
1670  using const_pointer = const value_type *;
1671  using reference = value_type &;
1672  using const_reference = const value_type &;
1673  using iterator =
1674  internal::hash_map_iterator<concurrent_hash_map, false>;
1675  using const_iterator =
1676  internal::hash_map_iterator<concurrent_hash_map, true>;
1677  using hasher = Hash;
1678  using key_equal =
1679  typename internal::key_equal_type<Hash, KeyEqual>::type;
1680 
1681 protected:
1682  friend class const_accessor;
1683  struct node;
1684 
1688  struct node : public node_base {
1689  value_type item;
1690  node(const Key &key, const node_base_ptr_t &_next = OID_NULL)
1691  : node_base(_next), item(key, T())
1692  {
1693  }
1694 
1695  node(const Key &key, const T &t,
1696  const node_base_ptr_t &_next = OID_NULL)
1697  : node_base(_next), item(key, t)
1698  {
1699  }
1700 
1701  node(const Key &key, T &&t, node_base_ptr_t &&_next = OID_NULL)
1702  : node_base(std::move(_next)), item(key, std::move(t))
1703  {
1704  }
1705 
1706  node(value_type &&i, node_base_ptr_t &&_next = OID_NULL)
1707  : node_base(std::move(_next)), item(std::move(i))
1708  {
1709  }
1710 
1711  node(value_type &&i, const node_base_ptr_t &_next = OID_NULL)
1712  : node_base(_next), item(std::move(i))
1713  {
1714  }
1715 
1716  template <typename... Args>
1717  node(Args &&... args, node_base_ptr_t &&_next = OID_NULL)
1718  : node_base(std::forward<node_base_ptr_t>(_next)),
1719  item(std::forward<Args>(args)...)
1720  {
1721  }
1722 
1723  node(const value_type &i,
1724  const node_base_ptr_t &_next = OID_NULL)
1725  : node_base(_next), item(i)
1726  {
1727  }
1728  };
1729 
1730  using persistent_node_ptr_t = detail::persistent_pool_ptr<node>;
1731 
1732  void
1733  delete_node(const node_base_ptr_t &n)
1734  {
1735  delete_persistent<node>(
1736  detail::static_persistent_pool_pointer_cast<node>(n)
1737  .get_persistent_ptr(my_pool_uuid));
1738  }
1739 
1740  static void
1741  allocate_node_copy_construct(pool_base &pop,
1742  persistent_ptr<node> &node_ptr,
1743  const void *param,
1744  const node_base_ptr_t &next = OID_NULL)
1745  {
1746  const value_type *v = static_cast<const value_type *>(param);
1747  internal::make_persistent_object<node>(pop, node_ptr, *v, next);
1748  }
1749 
1750  static void
1751  allocate_node_move_construct(pool_base &pop,
1752  persistent_ptr<node> &node_ptr,
1753  const void *param,
1754  const node_base_ptr_t &next = OID_NULL)
1755  {
1756  const value_type *v = static_cast<const value_type *>(param);
1757  internal::make_persistent_object<node>(
1758  pop, node_ptr, std::move(*const_cast<value_type *>(v)),
1759  next);
1760  }
1761 
1762  static void
1763  allocate_node_default_construct(pool_base &pop,
1764  persistent_ptr<node> &node_ptr,
1765  const void *param,
1766  const node_base_ptr_t &next = OID_NULL)
1767  {
1768  const Key &key = *static_cast<const Key *>(param);
1769  internal::make_persistent_object<node>(pop, node_ptr, key,
1770  next);
1771  }
1772 
1773  static void
1774  do_not_allocate_node(pool_base &, persistent_ptr<node> &node_ptr,
1775  const void *,
1776  const node_base_ptr_t &next = OID_NULL)
1777  {
1778  assert(false);
1779  }
1780 
1781  template <typename K>
1782  persistent_node_ptr_t
1783  search_bucket(const K &key, bucket *b) const
1784  {
1785  assert(b->is_rehashed(std::memory_order_relaxed));
1786  assert(!is_valid(b->tmp_node));
1787 
1788  persistent_node_ptr_t n =
1789  detail::static_persistent_pool_pointer_cast<node>(
1790  b->node_list);
1791 
1792  while (is_valid(n) &&
1793  !key_equal{}(key, n.get(my_pool_uuid)->item.first)) {
1794  n = detail::static_persistent_pool_pointer_cast<node>(
1795  n.get(my_pool_uuid)->next);
1796  }
1797 
1798  return n;
1799  }
1800 
1805  class bucket_accessor : public bucket::scoped_t {
1806  bucket *my_b;
1807 
1808  public:
1809  bucket_accessor(concurrent_hash_map *base, const hashcode_t h,
1810  bool writer = false)
1811  {
1812  acquire(base, h, writer);
1813  }
1814 
1819  inline void
1820  acquire(concurrent_hash_map *base, const hashcode_t h,
1821  bool writer = false)
1822  {
1823  my_b = base->get_bucket(h);
1824 
1825  if (my_b->is_rehashed(std::memory_order_acquire) ==
1826  false &&
1827  try_acquire(my_b->mutex, /*write=*/true)) {
1828  if (my_b->is_rehashed(
1829  std::memory_order_relaxed) ==
1830  false) {
1831  /* recursive rehashing */
1832  base->rehash_bucket<false>(my_b, h);
1833  }
1834  } else {
1835  bucket::scoped_t::acquire(my_b->mutex, writer);
1836  }
1837 
1838  if (is_valid(my_b->tmp_node)) {
1839  /* The condition is true only when insert
1840  * operation was interupted on previous run */
1841  if (!this->is_writer())
1842  this->upgrade_to_writer();
1843  assert(this->is_writer());
1844  base->correct_bucket(my_b);
1845  }
1846 
1847  assert(my_b->is_rehashed(std::memory_order_relaxed));
1848  }
1849 
1853  bool
1854  is_writer() const
1855  {
1856  return bucket::scoped_t::is_writer;
1857  }
1858 
1863  bucket *
1864  get() const
1865  {
1866  return my_b;
1867  }
1868 
1873  bucket *operator->() const
1874  {
1875  return this->get();
1876  }
1877  };
1878 
1883  bucket *my_b;
1884 
1885  public:
1887  const hashcode_t h, bool writer = false)
1888  {
1889  acquire(base, h, writer);
1890  }
1891 
1892  /*
1893  * Find a bucket by masked hashcode, optionally rehash
1894  */
1895  inline void
1896  acquire(concurrent_hash_map *base, const hashcode_t h,
1897  bool writer = false)
1898  {
1899  my_b = base->get_bucket(h);
1900 
1901  if (my_b->is_rehashed(std::memory_order_relaxed) ==
1902  false) {
1903  /* recursive rehashing */
1904  base->rehash_bucket<true>(my_b, h);
1905  }
1906 
1907  base->correct_bucket(my_b);
1908 
1909  assert(my_b->is_rehashed(std::memory_order_relaxed));
1910  }
1911 
1918  bool
1919  is_writer() const
1920  {
1921  return true;
1922  }
1923 
1930  bool
1932  {
1933  return true;
1934  }
1935 
1940  bucket *
1941  get() const
1942  {
1943  return my_b;
1944  }
1945 
1950  bucket *operator->() const
1951  {
1952  return this->get();
1953  }
1954  };
1955 
1956  hashcode_t
1957  get_hash_code(node_base_ptr_t &n)
1958  {
1959  return hasher{}(
1960  detail::static_persistent_pool_pointer_cast<node>(n)(
1961  my_pool_uuid)
1962  ->item.first);
1963  }
1964 
1965  template <bool serial>
1966  void
1967  rehash_bucket(bucket *b_new, const hashcode_t h)
1968  {
1969  using accessor_type = typename std::conditional<
1970  serial, serial_bucket_accessor, bucket_accessor>::type;
1971 
1972  /* First two bucket should be always rehashed */
1973  assert(h > 1);
1974 
1975  pool_base pop = get_pool_base();
1976  node_base_ptr_t *p_new = &(b_new->node_list);
1977  bool restore_after_crash = *p_new != nullptr;
1978 
1979  /* get parent mask from the topmost bit */
1980  hashcode_t mask = (1u << internal::Log2(h)) - 1;
1981  assert((h & mask) < h);
1982  bool writer = false;
1983  accessor_type b_old(this, h & mask, writer);
1984 
1985  /* get full mask for new bucket */
1986  mask = (mask << 1) | 1;
1987  assert((mask & (mask + 1)) == 0 && (h & mask) == h);
1988  restart:
1989  for (node_base_ptr_t *p_old = &(b_old->node_list), n = *p_old;
1990  is_valid(n); n = *p_old) {
1991  hashcode_t c = get_hash_code(n);
1992 #ifndef NDEBUG
1993  hashcode_t bmask = h & (mask >> 1);
1994 
1995  bmask = bmask == 0
1996  ? 1 /* minimal mask of parent bucket */
1997  : (1u << (internal::Log2(bmask) + 1)) - 1;
1998 
1999  assert((c & bmask) == (h & bmask));
2000 #endif
2001 
2002  if ((c & mask) == h) {
2003  if (!b_old.is_writer() &&
2004  !b_old.upgrade_to_writer()) {
2005  goto restart;
2006  /* node ptr can be invalid due to
2007  * concurrent erase */
2008  }
2009 
2010  if (restore_after_crash) {
2011  while (*p_new != nullptr &&
2012  (mask & get_hash_code(*p_new)) ==
2013  h &&
2014  *p_new != n) {
2015  p_new = &((*p_new)(my_pool_uuid)
2016  ->next);
2017  }
2018 
2019  restore_after_crash = false;
2020  }
2021 
2022  /* Add to new b_new */
2023  *p_new = n;
2024  pop.persist(p_new, sizeof(*p_new));
2025 
2026  /* exclude from b_old */
2027  *p_old = n(my_pool_uuid)->next;
2028  pop.persist(p_old, sizeof(*p_old));
2029 
2030  p_new = &(n(my_pool_uuid)->next);
2031  } else {
2032  /* iterate to next item */
2033  p_old = &(n(my_pool_uuid)->next);
2034  }
2035  }
2036 
2037  if (restore_after_crash) {
2038  while (*p_new != nullptr &&
2039  (mask & get_hash_code(*p_new)) == h)
2040  p_new = &((*p_new)(my_pool_uuid)->next);
2041  }
2042 
2043  *p_new = nullptr;
2044  pop.persist(p_new, sizeof(*p_new));
2045 
2046  /* mark rehashed */
2047  b_new->set_rehashed(std::memory_order_release);
2048  pop.persist(b_new->rehashed);
2049  }
2050 
2051  struct call_clear_on_leave {
2052  concurrent_hash_map *my_ch_map;
2053  call_clear_on_leave(concurrent_hash_map *a_ch_map)
2054  : my_ch_map(a_ch_map)
2055  {
2056  }
2057 
2058  void
2059  dismiss()
2060  {
2061  my_ch_map = 0;
2062  }
2063 
2064  ~call_clear_on_leave()
2065  {
2066  if (my_ch_map)
2067  my_ch_map->clear();
2068  }
2069  };
2070 
2071 public:
2072  class accessor;
2077  : private node::scoped_t /*which derived from no_copy*/ {
2078  friend class concurrent_hash_map<Key, T, Hash, KeyEqual>;
2079  friend class accessor;
2081 
2082  public:
2086  using value_type =
2087  const typename concurrent_hash_map::value_type;
2088 
2093  bool
2094  empty() const
2095  {
2096  return !my_node;
2097  }
2098 
2102  void
2104  {
2105  if (my_node) {
2106  node::scoped_t::release();
2107  my_node = 0;
2108  }
2109  }
2110 
2114  const_reference operator*() const
2115  {
2116  assert(my_node);
2117 
2118  return my_node->item;
2119  }
2120 
2124  const_pointer operator->() const
2125  {
2126  return &operator*();
2127  }
2128 
2132  const_accessor() : my_node(OID_NULL)
2133  {
2134  }
2135 
2140  {
2141  my_node = OID_NULL; // scoped lock's release() is called
2142  // in its destructor
2143  }
2144 
2145  protected:
2146  bool
2147  is_writer()
2148  {
2149  return node::scoped_t::is_writer;
2150  }
2151 
2152  node_ptr_t my_node;
2153 
2154  hashcode_t my_hash;
2155  };
2156 
2161  class accessor : public const_accessor {
2162  public:
2164  using value_type = typename concurrent_hash_map::value_type;
2165 
2167  reference operator*() const
2168  {
2169  assert(this->my_node);
2170 
2171  return this->my_node->item;
2172  }
2173 
2175  pointer operator->() const
2176  {
2177  return &operator*();
2178  }
2179  };
2180 
2184  concurrent_hash_map() : internal::hash_map_base()
2185  {
2186  }
2187 
2192  concurrent_hash_map(size_type n) : internal::hash_map_base()
2193  {
2194  reserve(n);
2195  }
2196 
2201  : internal::hash_map_base()
2202  {
2203  reserve(table.size());
2204 
2205  internal_copy(table);
2206  }
2207 
2212  : internal::hash_map_base()
2213  {
2214  swap(table);
2215  }
2216 
2220  template <typename I>
2221  concurrent_hash_map(I first, I last)
2222  {
2223  reserve(static_cast<size_type>(std::distance(first, last)));
2224 
2225  internal_copy(first, last);
2226  }
2227 
2231  concurrent_hash_map(std::initializer_list<value_type> il)
2232  {
2233  reserve(il.size());
2234 
2235  internal_copy(il.begin(), il.end());
2236  }
2237 
2243  void
2244  initialize(bool graceful_shutdown = false)
2245  {
2246  if (!graceful_shutdown) {
2247  auto actual_size =
2248  std::distance(this->begin(), this->end());
2249  assert(actual_size >= 0);
2250  this->restore_size(size_type(actual_size));
2251  } else {
2252  assert(this->size() ==
2253  size_type(std::distance(this->begin(),
2254  this->end())));
2255  }
2256  }
2257 
2265  {
2266  if (this != &table) {
2267  clear();
2268  internal_copy(table);
2269  }
2270 
2271  return *this;
2272  }
2273 
2280  operator=(std::initializer_list<value_type> il)
2281  {
2282  clear();
2283 
2284  reserve(il.size());
2285 
2286  internal_copy(il.begin(), il.end());
2287 
2288  return *this;
2289  }
2290 
2297  void rehash(size_type n = 0);
2298 
2304  void clear();
2305 
2310  {
2311  clear();
2312  }
2313 
2314  //------------------------------------------------------------------------
2315  // STL support - not thread-safe methods
2316  //------------------------------------------------------------------------
2317 
2322  iterator
2324  {
2325  return iterator(this, 0);
2326  }
2327 
2332  iterator
2334  {
2335  return iterator(this, mask() + 1);
2336  }
2337 
2342  const_iterator
2343  begin() const
2344  {
2345  return const_iterator(this, 0);
2346  }
2347 
2352  const_iterator
2353  end() const
2354  {
2355  return const_iterator(this, mask() + 1);
2356  }
2357 
2361  size_type
2362  size() const
2363  {
2364  return my_size.get_ro();
2365  }
2366 
2370  bool
2371  empty() const
2372  {
2373  return my_size.get_ro() == 0;
2374  }
2375 
2379  size_type
2380  max_size() const
2381  {
2382  return (~size_type(0)) / sizeof(node);
2383  }
2384 
2388  size_type
2390  {
2391  return mask() + 1;
2392  }
2393 
2397  void swap(concurrent_hash_map &table);
2398 
2399  //------------------------------------------------------------------------
2400  // concurrent map operations
2401  //------------------------------------------------------------------------
2402 
2406  size_type
2407  count(const Key &key) const
2408  {
2409  return const_cast<concurrent_hash_map *>(this)
2410  ->lookup</*insert*/ false>(key, nullptr, nullptr,
2411  /*write=*/false,
2412  &do_not_allocate_node);
2413  }
2414 
2424  template <typename K,
2425  typename = typename std::enable_if<
2426  internal::has_transparent_key_equal<hasher>::value,
2427  K>::type>
2428  size_type
2429  count(const K &key) const
2430  {
2431  return const_cast<concurrent_hash_map *>(this)
2432  ->lookup</*insert*/ false>(key, nullptr, nullptr,
2433  /*write=*/false,
2434  &do_not_allocate_node);
2435  }
2436 
2441  bool
2442  find(const_accessor &result, const Key &key) const
2443  {
2444  result.release();
2445 
2446  return const_cast<concurrent_hash_map *>(this)
2447  ->lookup</*insert*/ false>(key, nullptr, &result,
2448  /*write=*/false,
2449  &do_not_allocate_node);
2450  }
2451 
2463  template <typename K,
2464  typename = typename std::enable_if<
2465  internal::has_transparent_key_equal<hasher>::value,
2466  K>::type>
2467  bool
2468  find(const_accessor &result, const K &key) const
2469  {
2470  result.release();
2471 
2472  return const_cast<concurrent_hash_map *>(this)
2473  ->lookup</*insert*/ false>(key, nullptr, &result,
2474  /*write=*/false,
2475  &do_not_allocate_node);
2476  }
2477 
2482  bool
2483  find(accessor &result, const Key &key)
2484  {
2485  result.release();
2486 
2487  return lookup</*insert*/ false>(key, nullptr, &result,
2488  /*write*/ true,
2489  &do_not_allocate_node);
2490  }
2491 
2503  template <typename K,
2504  typename = typename std::enable_if<
2505  internal::has_transparent_key_equal<hasher>::value,
2506  K>::type>
2507  bool
2508  find(accessor &result, const K &key)
2509  {
2510  result.release();
2511 
2512  return lookup</*insert*/ false>(key, nullptr, &result,
2513  /*write*/ true,
2514  &do_not_allocate_node);
2515  }
2522  bool
2523  insert(const_accessor &result, const Key &key)
2524  {
2525  result.release();
2526 
2527  return lookup</*insert*/ true>(
2528  key, &key, &result,
2529  /*write=*/false, &allocate_node_default_construct);
2530  }
2531 
2538  bool
2539  insert(accessor &result, const Key &key)
2540  {
2541  result.release();
2542 
2543  return lookup</*insert*/ true>(
2544  key, &key, &result,
2545  /*write=*/true, &allocate_node_default_construct);
2546  }
2547 
2554  bool
2555  insert(const_accessor &result, const value_type &value)
2556  {
2557  result.release();
2558 
2559  return lookup</*insert*/ true>(value.first, &value, &result,
2560  /*write=*/false,
2561  &allocate_node_copy_construct);
2562  }
2563 
2569  bool
2570  insert(accessor &result, const value_type &value)
2571  {
2572  result.release();
2573 
2574  return lookup</*insert*/ true>(value.first, &value, &result,
2575  /*write=*/true,
2576  &allocate_node_copy_construct);
2577  }
2578 
2583  bool
2584  insert(const value_type &value)
2585  {
2586  return lookup</*insert*/ true>(value.first, &value, nullptr,
2587  /*write=*/false,
2588  &allocate_node_copy_construct);
2589  }
2590 
2597  bool
2598  insert(const_accessor &result, value_type &&value)
2599  {
2600  return generic_move_insert(result, std::move(value));
2601  }
2602 
2609  bool
2610  insert(accessor &result, value_type &&value)
2611  {
2612  return generic_move_insert(result, std::move(value));
2613  }
2614 
2620  bool
2621  insert(value_type &&value)
2622  {
2623  return generic_move_insert(accessor_not_used(),
2624  std::move(value));
2625  }
2626 
2631  template <typename I>
2632  void
2633  insert(I first, I last)
2634  {
2635  for (; first != last; ++first)
2636  insert(*first);
2637  }
2638 
2643  void
2644  insert(std::initializer_list<value_type> il)
2645  {
2646  insert(il.begin(), il.end());
2647  }
2648 
2654  bool
2655  erase(const Key &key)
2656  {
2657  return internal_erase(key);
2658  }
2659 
2672  template <typename K,
2673  typename = typename std::enable_if<
2674  internal::has_transparent_key_equal<hasher>::value,
2675  K>::type>
2676  bool
2677  erase(const K &key)
2678  {
2679  return internal_erase(key);
2680  }
2681 
2682 protected:
2686  template <bool OpInsert, typename K>
2687  bool lookup(const K &key, const void *param, const_accessor *result,
2688  bool write,
2689  void (*allocate_node)(pool_base &, persistent_ptr<node> &,
2690  const void *,
2691  const node_base_ptr_t &));
2692 
2693  template <typename K>
2694  bool internal_erase(const K &key);
2695 
2696  struct accessor_not_used {
2697  void
2698  release()
2699  {
2700  }
2701  };
2702 
2703  friend const_accessor *
2704  accessor_location(accessor_not_used const &)
2705  {
2706  return nullptr;
2707  }
2708 
2709  friend const_accessor *
2710  accessor_location(const_accessor &a)
2711  {
2712  return &a;
2713  }
2714 
2715  friend bool
2716  is_write_access_needed(accessor const &)
2717  {
2718  return true;
2719  }
2720 
2721  friend bool
2722  is_write_access_needed(const_accessor const &)
2723  {
2724  return false;
2725  }
2726 
2727  friend bool
2728  is_write_access_needed(accessor_not_used const &)
2729  {
2730  return false;
2731  }
2732 
2733  template <typename Accessor>
2734  bool
2735  generic_move_insert(Accessor &&result, value_type &&value)
2736  {
2737  result.release();
2738  return lookup</*insert*/ true>(value.first, &value,
2739  accessor_location(result),
2740  is_write_access_needed(result),
2741  &allocate_node_move_construct);
2742  }
2743 
2744  void clear_segment(segment_index_t s);
2745 
2749  void internal_copy(const concurrent_hash_map &source);
2750 
2751  template <typename I>
2752  void internal_copy(I first, I last);
2753 
2754 }; // class concurrent_hash_map
2755 
2756 template <typename Key, typename T, typename Hash, typename KeyEqual>
2757 template <bool OpInsert, typename K>
2758 bool
2760  const K &key, const void *param, const_accessor *result, bool write,
2761  void (*allocate_node)(pool_base &, persistent_ptr<node> &, const void *,
2762  const node_base_ptr_t &))
2763 {
2764  assert(!result || !result->my_node);
2765 
2766  bool return_value = false;
2767  hashcode_t const h = hasher{}(key);
2768  hashcode_t m = mask().load(std::memory_order_acquire);
2769 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
2770  ANNOTATE_HAPPENS_AFTER(&my_mask);
2771 #endif
2772  persistent_node_ptr_t n;
2773  size_type sz = 0;
2774 
2775 restart : { /* lock scope */
2776  assert((m & (m + 1)) == 0);
2777 
2778  return_value = false;
2779 
2780  /* get bucket */
2781  bucket_accessor b(this, h & m);
2782 
2783  /* find a node */
2784  n = search_bucket(key, b.get());
2785 
2786  if (OpInsert) {
2787  /* [opt] insert a key */
2788  if (!n) {
2789  if (!b.is_writer() && !b.upgrade_to_writer()) {
2790  /* Rerun search_list, in case another thread
2791  * inserted the item during the upgrade. */
2792  n = search_bucket(key, b.get());
2793  if (is_valid(n)) {
2794  /* unfortunately, it did */
2795  b.downgrade_to_reader();
2796  goto exists;
2797  }
2798  }
2799 
2800  if (check_mask_race(h, m))
2801  goto restart; /* b.release() is done in ~b(). */
2802 
2803  assert(!is_valid(b->tmp_node));
2804 
2805  /* insert and set flag to grow the container */
2806  pool_base pop = get_pool_base();
2807 
2808  allocate_node(pop,
2809  reinterpret_cast<persistent_ptr<node> &>(
2810  b->tmp_node),
2811  param, b->node_list);
2812 
2813  n = b->tmp_node;
2814  sz = insert_new_node(pop, b.get());
2815  return_value = true;
2816  }
2817  } else { /* find or count */
2818  if (!n) {
2819  if (check_mask_race(h, m))
2820  goto restart; /* b.release() is done in ~b(). */
2821  return false;
2822  }
2823 
2824  return_value = true;
2825  }
2826 exists:
2827  if (!result)
2828  goto check_growth;
2829 
2830  /* acquire the item */
2831  if (!result->try_acquire(n.get(my_pool_uuid)->mutex, write)) {
2832  for (internal::atomic_backoff backoff(true);;) {
2833  if (result->try_acquire(n.get(my_pool_uuid)->mutex,
2834  write))
2835  break;
2836 
2837  if (!backoff.bounded_pause()) {
2838  /* the wait takes really long, restart the
2839  * operation */
2840  b.release();
2841 
2842  assert(!OpInsert || !return_value);
2843 
2844  std::this_thread::yield();
2845 
2846  m = mask().load(std::memory_order_acquire);
2847 
2848  goto restart;
2849  }
2850  }
2851  }
2852 } /* lock scope */
2853 
2854  result->my_node = n.get_persistent_ptr(my_pool_uuid);
2855  result->my_hash = h;
2856 check_growth:
2857  /* [opt] grow the container */
2858  check_growth(m, sz);
2859 
2860  return return_value;
2861 }
2862 
2863 template <typename Key, typename T, typename Hash, typename KeyEqual>
2864 template <typename K>
2865 bool
2867 {
2868  node_base_ptr_t n;
2869  hashcode_t const h = hasher{}(key);
2870  hashcode_t m = mask().load(std::memory_order_acquire);
2871  pool_base pop = get_pool_base();
2872 
2873 restart : {
2874  /* lock scope */
2875  /* get bucket */
2876  bucket_accessor b(this, h & m);
2877 
2878 search:
2879  node_base_ptr_t *p = &b->node_list;
2880  n = *p;
2881 
2882  while (is_valid(n) &&
2883  !key_equal{}(key,
2884  detail::static_persistent_pool_pointer_cast<node>(
2885  n)(my_pool_uuid)
2886  ->item.first)) {
2887  p = &n(my_pool_uuid)->next;
2888  n = *p;
2889  }
2890 
2891  if (!n) {
2892  /* not found, but mask could be changed */
2893  if (check_mask_race(h, m))
2894  goto restart;
2895 
2896  return false;
2897  } else if (!b.is_writer() && !b.upgrade_to_writer()) {
2898  if (check_mask_race(h, m)) /* contended upgrade, check mask */
2899  goto restart;
2900 
2901  goto search;
2902  }
2903 
2904  try {
2905  transaction::manual tx(pop);
2906 
2907  tmp_node_ptr_t del = n(my_pool_uuid);
2908 
2909  *p = del->next;
2910 
2911  {
2912  /* We cannot remove this element immediately because
2913  * other threads might work with this element via
2914  * accessors. The item_locker required to wait while
2915  * other threads use the node. */
2916  typename node::scoped_t item_locker(del->mutex,
2917  /*write=*/true);
2918  }
2919 
2920  /* Only one thread can delete it due to write lock on the bucket
2921  */
2922  delete_node(del);
2923 
2925  } catch (const pmem::transaction_free_error &e) {
2926  throw std::runtime_error(e);
2927  }
2928 
2929  --(my_size.get_rw());
2930  pop.persist(my_size);
2931 }
2932 
2933  return true;
2934 }
2935 
2936 template <typename Key, typename T, typename Hash, typename KeyEqual>
2937 void
2940 {
2941  internal_swap(table);
2942 }
2943 
2944 template <typename Key, typename T, typename Hash, typename KeyEqual>
2945 void
2947 {
2948  reserve(sz);
2949  hashcode_t m = mask();
2950 
2951  /* only the last segment should be scanned for rehashing size or first
2952  * index of the last segment */
2953  hashcode_t b = (m + 1) >> 1;
2954 
2955  /* zero or power of 2 */
2956  assert((b & (b - 1)) == 0);
2957 
2958  for (; b <= m; ++b) {
2959  bucket *bp = get_bucket(b);
2960  node_base_ptr_t n = bp->node_list;
2961 
2962  assert(is_valid(n) || n == internal::empty_bucket ||
2963  bp->is_rehashed(std::memory_order_relaxed) == false);
2964 
2965  internal::assert_not_locked(bp->mutex);
2966 
2967  if (bp->is_rehashed(std::memory_order_relaxed) == false)
2968  rehash_bucket<true>(bp, b);
2969  }
2970 }
2971 
2972 template <typename Key, typename T, typename Hash, typename KeyEqual>
2973 void
2975 {
2976  hashcode_t m = mask();
2977 
2978  assert((m & (m + 1)) == 0);
2979 
2980 #ifndef NDEBUG
2981  /* check consistency */
2982  for (segment_index_t b = 0; b <= m; ++b) {
2983  bucket *bp = get_bucket(b);
2984  node_base_ptr_t n = bp->node_list;
2985 
2986  assert(is_valid(n) || n == internal::empty_bucket ||
2987  bp->is_rehashed(std::memory_order_relaxed) == false);
2988 
2989  internal::assert_not_locked(bp->mutex);
2990  }
2991 #endif
2992 
2993  pool_base pop = get_pool_base();
2994  try { /* transaction scope */
2995 
2996  transaction::manual tx(pop);
2997 
2998  my_size.get_rw() = 0;
2999  segment_index_t s = segment_traits_t::segment_index_of(m);
3000 
3001  assert(s + 1 == block_table_size ||
3002  !segment_facade_t(my_table, s + 1).is_valid());
3003 
3004  do {
3005  clear_segment(s);
3006  } while (s-- > 0);
3007 
3009  } catch (const pmem::transaction_error &e) {
3010  throw std::runtime_error(e);
3011  }
3012  mask().store(embedded_buckets - 1, std::memory_order_relaxed);
3013 }
3014 
3015 template <typename Key, typename T, typename Hash, typename KeyEqual>
3016 void
3018 {
3019  segment_facade_t segment(my_table, s);
3020 
3021  assert(segment.is_valid());
3022 
3023  size_type sz = segment.size();
3024  for (segment_index_t i = 0; i < sz; ++i) {
3025  for (node_base_ptr_t n = segment[i].node_list; is_valid(n);
3026  n = segment[i].node_list) {
3027  segment[i].node_list = n(my_pool_uuid)->next;
3028  delete_node(n);
3029  }
3030  }
3031 
3032  if (s >= segment_traits_t::embedded_segments)
3033  segment.disable();
3034 }
3035 
3036 template <typename Key, typename T, typename Hash, typename KeyEqual>
3037 void
3039  const concurrent_hash_map &source)
3040 {
3041  reserve(source.my_size.get_ro());
3042  internal_copy(source.begin(), source.end());
3043 }
3044 
3045 template <typename Key, typename T, typename Hash, typename KeyEqual>
3046 template <typename I>
3047 void
3049 {
3050  hashcode_t m = mask();
3051  pool_base pop = get_pool_base();
3052 
3053  for (; first != last; ++first) {
3054  hashcode_t h = hasher{}(first->first);
3055  bucket *b = get_bucket(h & m);
3056 
3057  assert(b->is_rehashed(std::memory_order_relaxed));
3058 
3059  allocate_node_copy_construct(
3060  pop,
3061  reinterpret_cast<persistent_ptr<node> &>(b->tmp_node),
3062  &(*first), b->node_list);
3063 
3064  insert_new_node(pop, b);
3065  }
3066 }
3067 
3068 template <typename Key, typename T, typename Hash, typename KeyEqual>
3069 inline bool
3070 operator==(const concurrent_hash_map<Key, T, Hash, KeyEqual> &a,
3071  const concurrent_hash_map<Key, T, Hash, KeyEqual> &b)
3072 {
3073  if (a.size() != b.size())
3074  return false;
3075 
3076  typename concurrent_hash_map<Key, T, Hash, KeyEqual>::const_iterator i(
3077  a.begin()),
3078  i_end(a.end());
3079 
3080  typename concurrent_hash_map<Key, T, Hash, KeyEqual>::const_iterator j,
3081  j_end(b.end());
3082 
3083  for (; i != i_end; ++i) {
3084  j = b.equal_range(i->first).first;
3085 
3086  if (j == j_end || !(i->second == j->second))
3087  return false;
3088  }
3089 
3090  return true;
3091 }
3092 
3093 template <typename Key, typename T, typename Hash, typename KeyEqual>
3094 inline bool
3095 operator!=(const concurrent_hash_map<Key, T, Hash, KeyEqual> &a,
3096  const concurrent_hash_map<Key, T, Hash, KeyEqual> &b)
3097 {
3098  return !(a == b);
3099 }
3100 
3101 template <typename Key, typename T, typename Hash, typename KeyEqual>
3102 inline void
3103 swap(concurrent_hash_map<Key, T, Hash, KeyEqual> &a,
3104  concurrent_hash_map<Key, T, Hash, KeyEqual> &b)
3105 {
3106  a.swap(b);
3107 }
3108 
3109 } /* namespace experimental */
3110 } /* namespace obj */
3111 } /* namespace pmem */
3112 
3113 #endif /* PMEMOBJ_CONCURRENT_HASH_MAP_HPP */
pmem::obj::experimental::concurrent_hash_map::concurrent_hash_map
concurrent_hash_map(std::initializer_list< value_type > il)
Construct table with initializer list.
Definition: concurrent_hash_map.hpp:2231
pmem::obj::experimental::concurrent_hash_map::accessor::operator->
pointer operator->() const
Return pointer to associated value in hash table.
Definition: concurrent_hash_map.hpp:2175
pmem::obj::experimental::concurrent_hash_map::bucket_count
size_type bucket_count() const
Definition: concurrent_hash_map.hpp:2389
pmem::obj::experimental::concurrent_hash_map::const_accessor::empty
bool empty() const
Definition: concurrent_hash_map.hpp:2094
pmem::obj::transaction::commit
static void commit()
Manually commit a transaction.
Definition: transaction.hpp:345
pmem::obj::mutex
Persistent memory resident mutex implementation.
Definition: mutex.hpp:60
pmem::obj::experimental::concurrent_hash_map::concurrent_hash_map
concurrent_hash_map(const concurrent_hash_map &table)
Copy constructor.
Definition: concurrent_hash_map.hpp:2200
pmem::transaction_free_error
Custom transaction error class.
Definition: pexceptions.hpp:94
pmem::obj::experimental::get
T & get(pmem::obj::experimental::array< T, N > &a)
Non-member get function.
Definition: array.hpp:908
pmem::obj::experimental::concurrent_hash_map::find
bool find(const_accessor &result, const Key &key) const
Find item and acquire a read lock on the item.
Definition: concurrent_hash_map.hpp:2442
pmem::obj::operator+=
p< T > & operator+=(p< T > &lhs, const p< Y > &rhs)
Addition assignment operator overload.
Definition: pext.hpp:123
pmem::obj::experimental::concurrent_hash_map::begin
const_iterator begin() const
Definition: concurrent_hash_map.hpp:2343
pmem::transaction_error
Custom transaction error class.
Definition: pexceptions.hpp:63
pmem::obj::pool_base::persist
void persist(const void *addr, size_t len) noexcept
Performs persist operation on a given chunk of memory.
Definition: pool.hpp:284
pmem::obj::p::get_ro
const T & get_ro() const noexcept
Retrieves read-only const reference of the object.
Definition: p.hpp:157
pmem::obj::experimental::concurrent_hash_map::find
bool find(accessor &result, const K &key)
Find item and acquire a write lock on the item.
Definition: concurrent_hash_map.hpp:2508
pmem::obj::experimental::concurrent_hash_map::node
Node structure to store Key/Value pair.
Definition: concurrent_hash_map.hpp:1688
pmem::obj::experimental::concurrent_hash_map::bucket_accessor::get
bucket * get() const
Get bucket pointer.
Definition: concurrent_hash_map.hpp:1864
pmem::obj::experimental::concurrent_hash_map::serial_bucket_accessor::operator->
bucket * operator->() const
Overloaded arrow operator.
Definition: concurrent_hash_map.hpp:1950
pmem::obj::p::swap
void swap(p &other)
Swaps two p objects of the same type.
Definition: p.hpp:169
common.hpp
Commonly used functionality.
pmem::obj::experimental::concurrent_hash_map::insert
bool insert(accessor &result, const Key &key)
Insert item (if not already present) and acquire a write lock on the item.
Definition: concurrent_hash_map.hpp:2539
template_helpers.hpp
Commonly used SFINAE helpers.
pmem::obj::experimental::concurrent_hash_map::count
size_type count(const Key &key) const
Definition: concurrent_hash_map.hpp:2407
pmem::obj::experimental::concurrent_hash_map::operator=
concurrent_hash_map & operator=(const concurrent_hash_map &table)
Assignment.
Definition: concurrent_hash_map.hpp:2264
pmem::obj::experimental::begin
pmem::obj::experimental::array< T, N >::iterator begin(pmem::obj::experimental::array< T, N > &a)
Non-member begin.
Definition: array.hpp:817
pmem::obj::experimental::concurrent_hash_map::accessor
Allows write access to elements and combines data access, locking, and garbage collection.
Definition: concurrent_hash_map.hpp:2161
pmem::obj::experimental::concurrent_hash_map::const_accessor::value_type
const typename concurrent_hash_map::value_type value_type
Type of value.
Definition: concurrent_hash_map.hpp:2087
pmem::obj::experimental::concurrent_hash_map::concurrent_hash_map
concurrent_hash_map()
Construct empty table.
Definition: concurrent_hash_map.hpp:2184
pmem::obj::experimental::concurrent_hash_map::insert
bool insert(value_type &&value)
Insert item by copying if there is no such key present already.
Definition: concurrent_hash_map.hpp:2621
pmem::obj::pool_base::drain
void drain(void) noexcept
Performs drain operation.
Definition: pool.hpp:353
pmem::obj::experimental::concurrent_hash_map::const_accessor::operator*
const_reference operator*() const
Definition: concurrent_hash_map.hpp:2114
pmem::obj::experimental::concurrent_hash_map::initialize
void initialize(bool graceful_shutdown=false)
Intialize persistent concurrent hash map after process restart.
Definition: concurrent_hash_map.hpp:2244
pmem::obj::experimental::concurrent_hash_map::insert
bool insert(accessor &result, value_type &&value)
Insert item by copying if there is no such key present already and acquire a write lock on the item.
Definition: concurrent_hash_map.hpp:2610
pmem::obj::p
Resides on pmem class.
Definition: p.hpp:64
pmem::obj::experimental::concurrent_hash_map::operator=
concurrent_hash_map & operator=(std::initializer_list< value_type > il)
Assignment.
Definition: concurrent_hash_map.hpp:2280
pmem::obj::experimental::concurrent_hash_map::const_accessor
Combines data access, locking, and garbage collection.
Definition: concurrent_hash_map.hpp:2077
pmem::obj::experimental::swap
void swap(pmem::obj::experimental::array< T, N > &lhs, pmem::obj::experimental::array< T, N > &rhs)
Non-member swap function.
Definition: array.hpp:897
pmem::obj::experimental::concurrent_hash_map::bucket_accessor::operator->
bucket * operator->() const
Overloaded arrow operator.
Definition: concurrent_hash_map.hpp:1873
make_persistent.hpp
Persistent_ptr transactional allocation functions for objects.
pmem::obj::experimental::v::get
T & get(Args &&... args) noexcept
Retrieves reference to the object.
Definition: v.hpp:136
pmem::obj::operator!=
bool operator!=(const allocator< T, P, Tr > &lhs, const OtherAllocator &rhs)
Determines if memory from another allocator can be deallocated from this one.
Definition: allocator.hpp:516
pmem::obj::experimental::concurrent_hash_map::clear
void clear()
Clear hash map content.
Definition: concurrent_hash_map.hpp:2974
pmem::obj::experimental::concurrent_hash_map::serial_bucket_accessor::is_writer
bool is_writer() const
This method is added for consistency with bucket_accessor class.
Definition: concurrent_hash_map.hpp:1919
pmem::obj::experimental::concurrent_hash_map::insert
bool insert(const value_type &value)
Insert item by copying if there is no such key present already.
Definition: concurrent_hash_map.hpp:2584
pmem::obj::experimental::concurrent_hash_map::lookup
bool lookup(const K &key, const void *param, const_accessor *result, bool write, void(*allocate_node)(pool_base &, persistent_ptr< node > &, const void *, const node_base_ptr_t &))
Insert or find item and optionally acquire a lock on the item.
Definition: concurrent_hash_map.hpp:2759
pmem::obj::experimental::concurrent_hash_map::bucket_accessor::is_writer
bool is_writer() const
Check whether bucket is locked for write.
Definition: concurrent_hash_map.hpp:1854
pmem::obj::experimental::concurrent_hash_map::bucket_accessor
Bucket accessor is to find, rehash, acquire a lock, and access a bucket.
Definition: concurrent_hash_map.hpp:1805
pmem::obj::experimental::concurrent_hash_map::serial_bucket_accessor
Serial bucket accessor used to access bucket in a serial operations.
Definition: concurrent_hash_map.hpp:1882
pmem::obj::experimental::concurrent_hash_map::accessor::operator*
reference operator*() const
Return reference to associated value in hash table.
Definition: concurrent_hash_map.hpp:2167
pmem::obj::experimental::concurrent_hash_map::insert
bool insert(const_accessor &result, const Key &key)
Insert item (if not already present) and acquire a read lock on the item.
Definition: concurrent_hash_map.hpp:2523
pmem::obj::experimental::concurrent_hash_map::begin
iterator begin()
Definition: concurrent_hash_map.hpp:2323
pmem::obj::experimental::concurrent_hash_map::const_accessor::~const_accessor
~const_accessor()
Destroy result after releasing the underlying reference.
Definition: concurrent_hash_map.hpp:2139
pmem::obj::experimental::concurrent_hash_map::concurrent_hash_map
concurrent_hash_map(I first, I last)
Construction table with copying iteration range.
Definition: concurrent_hash_map.hpp:2221
transaction.hpp
C++ pmemobj transactions.
pmem::obj::experimental::end
pmem::obj::experimental::array< T, N >::iterator end(pmem::obj::experimental::array< T, N > &a)
Non-member end.
Definition: array.hpp:837
pmem::obj::experimental::concurrent_hash_map::empty
bool empty() const
Definition: concurrent_hash_map.hpp:2371
pmem::obj::experimental::concurrent_hash_map::max_size
size_type max_size() const
Upper bound on size.
Definition: concurrent_hash_map.hpp:2380
pmem::obj::experimental::concurrent_hash_map::end
const_iterator end() const
Definition: concurrent_hash_map.hpp:2353
pmem::obj::experimental::concurrent_hash_map::find
bool find(accessor &result, const Key &key)
Find item and acquire a write lock on the item.
Definition: concurrent_hash_map.hpp:2483
pmem::obj::experimental::concurrent_hash_map::insert
bool insert(const_accessor &result, value_type &&value)
Insert item by copying if there is no such key present already and acquire a read lock on the item.
Definition: concurrent_hash_map.hpp:2598
pmem::obj::pool_base::flush
void flush(const void *addr, size_t len) noexcept
Performs flush operation on a given chunk of memory.
Definition: pool.hpp:320
pmem::obj::experimental::concurrent_hash_map::rehash
void rehash(size_type n=0)
Rehashes and optionally resizes the whole table.
Definition: concurrent_hash_map.hpp:2946
pmem::obj::experimental::concurrent_hash_map::concurrent_hash_map
concurrent_hash_map(size_type n)
Construct empty table with n preallocated buckets.
Definition: concurrent_hash_map.hpp:2192
pmem::obj::experimental::concurrent_hash_map::count
size_type count(const K &key) const
This overload only participates in overload resolution if the qualified-id Hash::transparent_key_equa...
Definition: concurrent_hash_map.hpp:2429
pmem::obj::experimental::concurrent_hash_map::find
bool find(const_accessor &result, const K &key) const
Find item and acquire a read lock on the item.
Definition: concurrent_hash_map.hpp:2468
pmem::obj::experimental::concurrent_hash_map::size
size_type size() const
Definition: concurrent_hash_map.hpp:2362
pmem::obj::experimental::concurrent_hash_map::internal_copy
void internal_copy(const concurrent_hash_map &source)
Copy "source" to *this, where *this must start out empty.
Definition: concurrent_hash_map.hpp:3038
pmem::obj::experimental::concurrent_hash_map::insert
bool insert(accessor &result, const value_type &value)
Insert item by copying if there is no such key present already and acquire a write lock on the item.
Definition: concurrent_hash_map.hpp:2570
pmem::obj::persistent_ptr
Persistent pointer class.
Definition: persistent_ptr.hpp:132
shared_mutex.hpp
Pmem-resident shared mutex.
make_persistent_atomic.hpp
Persistent_ptr atomic allocation functions for objects.
pmem::obj::experimental::concurrent_hash_map::swap
void swap(concurrent_hash_map &table)
Swap two instances.
Definition: concurrent_hash_map.hpp:2938
p.hpp
Resides on pmem property template.
pmem::obj::p::get_rw
T & get_rw()
Retrieves read-write reference of the object.
Definition: p.hpp:142
pmem::obj::operator-=
p< T > & operator-=(p< T > &lhs, const p< Y > &rhs)
Subtraction assignment operator overload.
Definition: pext.hpp:145
pmem::obj::experimental::concurrent_hash_map::const_accessor::const_accessor
const_accessor()
Create empty result.
Definition: concurrent_hash_map.hpp:2132
pmem::obj::operator==
bool operator==(standard_alloc_policy< T > const &, standard_alloc_policy< T2 > const &)
Determines if memory from another allocator can be deallocated from this one.
Definition: allocator.hpp:400
std
A persistent version of concurrent hash map implementation Ref: https://arxiv.org/abs/1509....
Definition: concurrent_hash_map.hpp:75
pmem::obj::experimental::concurrent_hash_map::~concurrent_hash_map
~concurrent_hash_map()
Clear table and destroy it.
Definition: concurrent_hash_map.hpp:2309
pmem::obj::experimental::concurrent_hash_map::erase
bool erase(const Key &key)
Remove element with corresponding key.
Definition: concurrent_hash_map.hpp:2655
pmem::obj::mutex::unlock
void unlock()
Unlocks a previously locked mutex.
Definition: mutex.hpp:143
pmem::obj::experimental::concurrent_hash_map::end
iterator end()
Definition: concurrent_hash_map.hpp:2333
pmem::obj::mutex::lock
void lock()
Locks the mutex, blocks if already locked.
Definition: mutex.hpp:98
pmem::obj::experimental::concurrent_hash_map::erase
bool erase(const K &key)
Remove element with corresponding key.
Definition: concurrent_hash_map.hpp:2677
pmem::obj::experimental::concurrent_hash_map::serial_bucket_accessor::get
bucket * get() const
Get bucket pointer.
Definition: concurrent_hash_map.hpp:1941
pmem::obj::experimental::concurrent_hash_map::concurrent_hash_map
concurrent_hash_map(concurrent_hash_map &&table)
Move constructor.
Definition: concurrent_hash_map.hpp:2211
pmem::obj::experimental::v
pmem::obj::experimental::v - volatile resides on pmem class.
Definition: v.hpp:68
pmem::obj::operator++
p< T > & operator++(p< T > &pp)
Prefix increment operator overload.
Definition: pext.hpp:77
pmem::obj::operator+
persistent_ptr< T > operator+(persistent_ptr< T > const &lhs, std::ptrdiff_t s)
Addition operator for persistent pointers.
Definition: persistent_ptr.hpp:607
pmem::obj::pool_base
The non-template pool base class.
Definition: pool.hpp:67
pmem::obj::experimental::concurrent_hash_map::insert
bool insert(const_accessor &result, const value_type &value)
Insert item by copying if there is no such key present already and acquire a read lock on the item.
Definition: concurrent_hash_map.hpp:2555
pmem::obj::experimental::concurrent_hash_map::serial_bucket_accessor::upgrade_to_writer
bool upgrade_to_writer() const
This method is added for consistency with bucket_accessor class.
Definition: concurrent_hash_map.hpp:1931
persistent_ptr.hpp
Persistent smart pointer.
pmem::obj::shared_mutex
Persistent memory resident shared_mutex implementation.
Definition: shared_mutex.hpp:59
pmem::obj::operator-
persistent_ptr< T > operator-(persistent_ptr< T > const &lhs, std::ptrdiff_t s)
Subtraction operator for persistent pointers.
Definition: persistent_ptr.hpp:621
pmem::obj::experimental::concurrent_hash_map
Persistent memory aware implementation of Intel TBB concurrent_hash_map.
Definition: concurrent_hash_map.hpp:1659
v.hpp
Volatile resides on pmem property template.
pmem::obj::experimental::concurrent_hash_map::const_accessor::operator->
const_pointer operator->() const
Definition: concurrent_hash_map.hpp:2124
pmem::obj::transaction::manual
C++ manual scope transaction class.
Definition: transaction.hpp:92
pmem::obj::experimental::concurrent_hash_map::bucket_accessor::acquire
void acquire(concurrent_hash_map *base, const hashcode_t h, bool writer=false)
Find a bucket by masked hashcode, optionally rehash, and acquire the lock.
Definition: concurrent_hash_map.hpp:1820
pmem::obj::operator--
p< T > & operator--(p< T > &pp)
Prefix decrement operator overload.
Definition: pext.hpp:88
pmem::obj::experimental::concurrent_hash_map::insert
void insert(I first, I last)
Insert range [first, last)
Definition: concurrent_hash_map.hpp:2633
mutex.hpp
Pmem-resident mutex.
pmem::obj::experimental::concurrent_hash_map::insert
void insert(std::initializer_list< value_type > il)
Insert initializer list.
Definition: concurrent_hash_map.hpp:2644
pmem::obj::experimental::concurrent_hash_map::const_accessor::release
void release()
Release accessor.
Definition: concurrent_hash_map.hpp:2103