38 #ifndef PMEMOBJ_CONCURRENT_HASH_MAP_HPP
39 #define PMEMOBJ_CONCURRENT_HASH_MAP_HPP
41 #include <libpmemobj++/detail/atomic_backoff.hpp>
51 #include <libpmemobj++/detail/persistent_pool_ptr.hpp>
57 #include <initializer_list>
62 #include <type_traits>
71 struct hash<
pmem::obj::p<T>> {
75 return hash<T>()(x.
get_ro());
85 namespace concurrent_hash_map_internal
87 template <
typename SharedMutexT>
88 class shared_mutex_scoped_lock {
89 using rw_mutex_type = SharedMutexT;
92 shared_mutex_scoped_lock(
const shared_mutex_scoped_lock &) =
delete;
93 shared_mutex_scoped_lock &
94 operator=(
const shared_mutex_scoped_lock &) =
delete;
97 shared_mutex_scoped_lock() : mutex(nullptr), is_writer(false)
102 shared_mutex_scoped_lock(rw_mutex_type &m,
bool write =
true)
109 ~shared_mutex_scoped_lock()
117 acquire(rw_mutex_type &m,
bool write =
true)
124 mutex->lock_shared();
134 rw_mutex_type *m = mutex;
148 try_acquire(rw_mutex_type &m,
bool write =
true)
153 result = write ? m.try_lock() : m.try_lock_shared();
164 rw_mutex_type *mutex;
172 template <
typename ScopedLockType>
173 using scoped_lock_upgrade_to_writer =
174 decltype(std::declval<ScopedLockType>().upgrade_to_writer());
176 template <
typename ScopedLockType>
177 using scoped_lock_has_upgrade_to_writer =
178 detail::supports<ScopedLockType, scoped_lock_upgrade_to_writer>;
180 template <
typename ScopedLockType>
181 using scoped_lock_downgrade_to_reader =
182 decltype(std::declval<ScopedLockType>().downgrade_to_reader());
184 template <
typename ScopedLockType>
185 using scoped_lock_has_downgrade_to_reader =
186 detail::supports<ScopedLockType, scoped_lock_downgrade_to_reader>;
188 template <
typename ScopedLockType,
189 bool = scoped_lock_has_upgrade_to_writer<ScopedLockType>::value
190 &&scoped_lock_has_downgrade_to_reader<ScopedLockType>::value>
191 class scoped_lock_traits {
193 using scope_lock_type = ScopedLockType;
196 initial_rw_state(
bool write)
203 upgrade_to_writer(scope_lock_type &lock)
205 return lock.upgrade_to_writer();
209 downgrade_to_reader(scope_lock_type &lock)
211 return lock.downgrade_to_reader();
215 template <
typename ScopedLockType>
216 class scoped_lock_traits<ScopedLockType, false> {
218 using scope_lock_type = ScopedLockType;
221 initial_rw_state(
bool write)
229 upgrade_to_writer(scope_lock_type &lock)
238 downgrade_to_reader(scope_lock_type &lock)
250 template <
typename Key,
typename T,
typename Hash = std::hash<Key>,
251 typename KeyEqual = std::equal_to<Key>,
252 typename MutexType = pmem::obj::shared_mutex,
253 typename ScopedLockType = concurrent_hash_map_
internal::
254 shared_mutex_scoped_lock<MutexType>>
255 class concurrent_hash_map;
258 namespace concurrent_hash_map_internal
264 if (pmemobj_tx_stage() != TX_STAGE_NONE)
266 "Function called inside transaction scope.");
269 template <
typename Hash>
270 using transparent_key_equal =
typename Hash::transparent_key_equal;
272 template <
typename Hash>
273 using has_transparent_key_equal = detail::supports<Hash, transparent_key_equal>;
275 template <
typename Hash,
typename Pred,
276 bool = has_transparent_key_equal<Hash>::value>
277 struct key_equal_type {
278 using type =
typename Hash::transparent_key_equal;
281 template <
typename Hash,
typename Pred>
282 struct key_equal_type<Hash, Pred, false> {
286 template <
typename Mutex,
typename ScopedLockType>
288 assert_not_locked(Mutex &mtx)
291 ScopedLockType scoped_lock;
292 assert(scoped_lock.try_acquire(mtx));
293 scoped_lock.release();
299 template <
typename Key,
typename T,
typename MutexType,
typename ScopedLockType>
300 struct hash_map_node {
302 using mutex_t = MutexType;
305 using scoped_t = ScopedLockType;
307 using value_type = std::pair<const Key, T>;
310 using node_ptr_t = detail::persistent_pool_ptr<
311 hash_map_node<Key, T, mutex_t, scoped_t>>;
322 hash_map_node() : next(OID_NULL)
326 hash_map_node(
const node_ptr_t &_next) : next(_next)
330 hash_map_node(node_ptr_t &&_next) : next(
std::move(_next))
334 hash_map_node(
const node_ptr_t &_next,
const Key &key)
335 : next(_next), item(key, T())
339 hash_map_node(
const node_ptr_t &_next,
const Key &key,
const T &t)
340 : next(_next), item(key, t)
344 hash_map_node(
const node_ptr_t &_next, value_type &&i)
345 : next(_next), item(
std::move(i))
349 template <
typename... Args>
350 hash_map_node(node_ptr_t &&_next, Args &&... args)
351 : next(
std::forward<node_ptr_t>(_next)),
352 item(
std::forward<Args>(args)...)
356 hash_map_node(
const node_ptr_t &_next,
const value_type &i)
357 : next(_next), item(i)
362 hash_map_node(
const hash_map_node &) =
delete;
365 hash_map_node &operator=(
const hash_map_node &) =
delete;
372 template <
typename Bucket>
373 class segment_traits {
376 using segment_index_t = size_t;
377 using size_type = size_t;
378 using bucket_type = Bucket;
382 constexpr
static size_type max_allocation_size = PMEMOBJ_MAX_ALLOC_SIZE;
385 constexpr
static segment_index_t first_big_block = 27;
390 constexpr
static size_type big_block_size = size_type(1)
394 static_assert((big_block_size *
sizeof(bucket_type)) <
396 "Block size exceeds max_allocation_size");
399 constexpr
static segment_index_t
400 first_block_in_segment(segment_index_t seg)
402 return seg < first_big_block
405 (segment_index_t(1) << (seg - first_big_block)) - 1);
409 constexpr
static size_type
410 blocks_in_segment(segment_index_t seg)
412 return seg < first_big_block
414 : segment_index_t(1) << (seg - first_big_block);
418 constexpr
static size_type
419 block_size(segment_index_t b)
421 return b < first_big_block ? segment_size(b ? b : 1)
427 constexpr
static segment_index_t embedded_segments = 1;
430 constexpr
static size_type embedded_buckets = 1 << embedded_segments;
433 constexpr
static segment_index_t number_of_segments = 32;
436 static const size_type first_block = 8;
439 constexpr
static segment_index_t
442 return first_block_in_segment(number_of_segments);
446 static segment_index_t
447 segment_index_of(size_type index)
449 return segment_index_t(detail::Log2(index | 1));
453 constexpr
static segment_index_t
454 segment_base(segment_index_t k)
456 return (segment_index_t(1) << k) & ~segment_index_t(1);
460 constexpr
static size_type
461 segment_size(segment_index_t k)
463 return size_type(1) << k;
466 embedded_segments < first_big_block,
467 "Number of embedded segments cannot exceed max_allocation_size");
486 template <
typename BlockTable,
typename SegmentTraits,
bool is_const>
487 class segment_facade_impl :
public SegmentTraits {
489 using traits_type = SegmentTraits;
490 using traits_type::block_size;
491 using traits_type::blocks_in_segment;
492 using traits_type::embedded_buckets;
493 using traits_type::embedded_segments;
494 using traits_type::first_block;
495 using traits_type::first_block_in_segment;
496 using traits_type::segment_base;
497 using traits_type::segment_size;
500 using table_reference =
501 typename std::conditional<is_const,
const BlockTable &,
504 using table_pointer =
505 typename std::conditional<is_const,
const BlockTable *,
508 using bucket_type =
typename traits_type::bucket_type;
509 using segment_index_t =
typename traits_type::segment_index_t;
510 using size_type =
typename traits_type::size_type;
513 segment_facade_impl(table_reference table, segment_index_t s)
514 : my_table(&table), my_seg(s)
516 assert(my_seg < traits_type::number_of_segments);
520 segment_facade_impl(
const segment_facade_impl &src)
521 : my_table(src.my_table), my_seg(src.my_seg)
525 segment_facade_impl(segment_facade_impl &&src) =
default;
528 segment_facade_impl &
529 operator=(
const segment_facade_impl &src)
531 my_table = src.my_table;
537 segment_facade_impl &
538 operator=(segment_facade_impl &&src)
540 my_table = src.my_table;
551 bucket_type &operator[](size_type i)
const
555 segment_index_t table_block = first_block_in_segment(my_seg);
556 size_type b_size = block_size(table_block);
558 table_block += i / b_size;
561 return (*my_table)[table_block][
static_cast<std::ptrdiff_t
>(i)];
567 segment_facade_impl &
580 segment_facade_impl tmp = *
this;
588 segment_facade_impl &
601 segment_facade_impl tmp = *
this;
609 segment_facade_impl &
619 segment_facade_impl &
632 return segment_facade_impl(*(this->my_table),
642 return segment_facade_impl(*(this->my_table),
650 enable(pool_base &pop)
652 assert(my_seg >= embedded_segments);
654 if (my_seg < first_block) {
655 enable_first_block(pop);
657 enable_big_segment(pop);
667 assert(my_seg >= embedded_segments);
669 if (my_seg < first_block) {
670 if (my_seg == embedded_segments) {
671 size_type sz = segment_size(first_block) -
673 delete_persistent<bucket_type[]>(
674 (*my_table)[my_seg], sz);
676 (*my_table)[my_seg] =
nullptr;
678 block_range blocks = segment_blocks(my_seg);
680 for (segment_index_t b = blocks.first;
681 b < blocks.second; ++b) {
682 if ((*my_table)[b] !=
nullptr) {
683 delete_persistent<bucket_type[]>(
684 (*my_table)[b], block_size(b));
685 (*my_table)[b] =
nullptr;
697 return segment_size(my_seg ? my_seg : 1);
708 block_range blocks = segment_blocks(my_seg);
710 for (segment_index_t b = blocks.first; b < blocks.second; ++b) {
711 if ((*my_table)[b] ==
nullptr)
719 using block_range = std::pair<segment_index_t, segment_index_t>;
725 segment_blocks(segment_index_t seg)
727 segment_index_t
begin = first_block_in_segment(seg);
729 return block_range(begin, begin + blocks_in_segment(seg));
733 enable_first_block(pool_base &pop)
735 assert(my_seg == embedded_segments);
737 transaction::manual tx(pop);
740 segment_size(first_block) - embedded_buckets;
741 (*my_table)[my_seg] =
742 make_persistent<bucket_type[]>(sz);
744 persistent_ptr<bucket_type> base =
745 (*my_table)[embedded_segments].raw();
747 for (segment_index_t s = my_seg + 1; s < first_block;
750 static_cast<std::ptrdiff_t
>(
752 segment_base(my_seg));
754 (*my_table)[s] = (base + off).raw();
762 enable_big_segment(pool_base &pop)
764 block_range blocks = segment_blocks(my_seg);
766 transaction::manual tx(pop);
768 for (segment_index_t b = blocks.first;
769 b < blocks.second; ++b) {
770 assert((*my_table)[b] ==
nullptr);
771 (*my_table)[b] = make_persistent<bucket_type[]>(
780 table_pointer my_table;
783 segment_index_t my_seg;
792 template <
typename Key,
typename T,
typename MutexType,
typename ScopedLockType>
793 class hash_map_base {
795 using mutex_t = MutexType;
796 using scoped_t = ScopedLockType;
799 using size_type = size_t;
802 using hashcode_type = size_t;
805 using node = hash_map_node<Key, T, mutex_t, scoped_t>;
808 using node_ptr_t = detail::persistent_pool_ptr<node>;
812 using mutex_t = MutexType;
813 using scoped_t = ScopedLockType;
819 p<std::atomic<uint64_t>> rehashed;
822 node_ptr_t node_list;
825 bucket() : node_list(nullptr)
827 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
828 VALGRIND_HG_DISABLE_CHECKING(&rehashed,
831 rehashed.get_rw() =
false;
840 is_rehashed(std::memory_order order)
842 return rehashed.get_ro().load(order);
846 set_rehashed(std::memory_order order)
848 rehashed.get_rw().store(
true, order);
852 bucket(
const bucket &) =
delete;
855 bucket &operator=(
const bucket &) =
delete;
859 using segment_traits_t = segment_traits<bucket>;
862 using segment_index_t =
typename segment_traits_t::segment_index_t;
865 static const size_type embedded_buckets =
866 segment_traits_t::embedded_buckets;
869 static const size_type first_block = segment_traits_t::first_block;
872 constexpr
static size_type block_table_size =
873 segment_traits_t::number_of_blocks();
876 using segment_ptr_t = persistent_ptr<bucket[]>;
879 using bucket_ptr_t = persistent_ptr<bucket>;
882 using blocks_table_t = segment_ptr_t[block_table_size];
896 p<uint64_t> my_pool_uuid;
900 features layout_features;
904 std::aligned_storage<
sizeof(size_t),
sizeof(
size_t)>::type
909 p<std::atomic<hashcode_type>> my_mask;
912 std::aligned_storage<32, 8>::type padding1;
918 blocks_table_t my_table;
923 p<std::atomic<size_type>> my_size;
926 std::aligned_storage<24, 8>::type padding2;
929 std::aligned_storage<64, 8>::type reserved;
932 segment_enable_mutex_t my_segment_enable_mutex;
935 bucket my_embedded_segment[embedded_buckets];
940 static constexpr features
946 const std::atomic<hashcode_type> &
947 mask() const noexcept
949 return my_mask.get_ro();
952 std::atomic<hashcode_type> &
955 return my_mask.get_rw();
959 using const_segment_facade_t =
960 segment_facade_impl<blocks_table_t, segment_traits_t, true>;
963 using segment_facade_t =
964 segment_facade_impl<blocks_table_t, segment_traits_t, false>;
970 sizeof(size_type) ==
sizeof(std::atomic<size_type>),
971 "std::atomic should have the same layout as underlying integral type");
973 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
974 VALGRIND_HG_DISABLE_CHECKING(&my_size,
sizeof(my_size));
975 VALGRIND_HG_DISABLE_CHECKING(&my_mask,
sizeof(my_mask));
977 layout_features = header_features();
979 my_size.get_rw() = 0;
980 PMEMoid oid = pmemobj_oid(
this);
982 assert(!OID_IS_NULL(oid));
984 my_pool_uuid = oid.pool_uuid_lo;
986 pool_base pop = get_pool_base();
988 for (size_type i = 0; i < segment_traits_t::embedded_segments;
991 pmemobj_oid(my_embedded_segment +
992 segment_traits_t::segment_base(i));
993 segment_facade_t seg(my_table, i);
994 mark_rehashed<false>(pop, seg);
1004 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
1005 VALGRIND_HG_DISABLE_CHECKING(&my_size,
sizeof(my_size));
1006 VALGRIND_HG_DISABLE_CHECKING(&my_mask,
sizeof(my_mask));
1008 #if LIBPMEMOBJ_CPP_VG_PMEMCHECK_ENABLED
1009 VALGRIND_PMC_REMOVE_PMEM_MAPPING(&my_mask,
sizeof(my_mask));
1012 hashcode_type m = embedded_buckets - 1;
1014 const_segment_facade_t segment(
1015 my_table, segment_traits_t::embedded_segments);
1017 while (segment.is_valid()) {
1018 m += segment.size();
1022 mask().store(m, std::memory_order_relaxed);
1026 restore_size(size_type actual_size)
1028 my_size.get_rw().store(actual_size, std::memory_order_relaxed);
1029 pool_base pop = get_pool_base();
1030 pop.persist(my_size);
1036 template <
bool Flush = true>
1038 mark_rehashed(pool_base &pop, segment_facade_t &segment)
1040 for (size_type i = 0; i < segment.size(); ++i) {
1041 bucket *b = &(segment[i]);
1043 assert_not_locked<mutex_t, scoped_t>(b->mutex);
1045 b->set_rehashed(std::memory_order_relaxed);
1050 for (size_type i = 0; i < segment.size(); ++i) {
1051 bucket *b = &(segment[i]);
1052 pop.flush(b->rehashed);
1063 enable_segment(segment_index_t k,
bool is_initial =
false)
1067 pool_base pop = get_pool_base();
1070 if (k >= first_block) {
1071 segment_facade_t new_segment(my_table, k);
1073 sz = new_segment.size();
1074 if (!new_segment.is_valid())
1075 new_segment.enable(pop);
1078 mark_rehashed(pop, new_segment);
1085 assert(k == segment_traits_t::embedded_segments);
1087 for (segment_index_t i = k; i < first_block; ++i) {
1088 segment_facade_t new_segment(my_table, i);
1090 if (!new_segment.is_valid())
1091 new_segment.enable(pop);
1094 mark_rehashed(pop, new_segment);
1098 sz = segment_traits_t::segment_size(first_block);
1100 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
1101 ANNOTATE_HAPPENS_BEFORE(&my_mask);
1103 mask().store(sz - 1, std::memory_order_release);
1111 get_bucket(hashcode_type h)
const
1113 segment_index_t s = segment_traits_t::segment_index_of(h);
1115 h -= segment_traits_t::segment_base(s);
1117 const_segment_facade_t segment(my_table, s);
1119 assert(segment.is_valid());
1121 return &(segment[h]);
1128 check_mask_race(hashcode_type h, hashcode_type &m)
const
1130 hashcode_type m_now, m_old = m;
1132 m_now = mask().load(std::memory_order_acquire);
1133 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
1134 ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
1138 return check_rehashing_collision(h, m_old, m = m_now);
1147 check_rehashing_collision(hashcode_type h, hashcode_type m_old,
1148 hashcode_type m)
const
1152 if ((h & m_old) != (h & m)) {
1157 for (++m_old; !(h & m_old); m_old <<= 1)
1160 m_old = (m_old << 1) - 1;
1162 assert((m_old & (m_old + 1)) == 0 && m_old <= m);
1165 bucket *b = get_bucket(h & m_old);
1166 return b->is_rehashed(std::memory_order_acquire);
1177 template <
typename Node,
typename... Args>
1179 insert_new_node(bucket *b, detail::persistent_pool_ptr<Node> &new_node,
1182 pool_base pop = get_pool_base();
1185 new_node = pmem::obj::make_persistent<Node>(
1186 b->node_list, std::forward<Args>(args)...);
1187 b->node_list = new_node;
1192 size_t sz = ++(my_size.get_rw());
1193 pop.persist(&my_size,
sizeof(my_size));
1203 check_growth(hashcode_type m, size_type sz)
1206 segment_index_t new_seg =
1207 static_cast<segment_index_t
>(detail::Log2(
1211 assert(segment_facade_t(my_table, new_seg - 1)
1214 std::unique_lock<segment_enable_mutex_t> lock(
1215 my_segment_enable_mutex, std::try_to_lock);
1218 if (mask().load(std::memory_order_relaxed) ==
1222 enable_segment(new_seg);
1236 reserve(size_type buckets)
1243 bool is_initial = (my_size.get_ro() == 0);
1245 for (size_type m = mask(); buckets > m; m = mask())
1247 segment_traits_t::segment_index_of(m + 1),
1256 internal_swap(hash_map_base<Key, T, mutex_t, scoped_t> &table)
1258 pool_base p = get_pool_base();
1260 transaction::manual tx(p);
1262 this->my_pool_uuid.swap(table.my_pool_uuid);
1264 this->mask() = table.mask().exchange(
1265 this->mask(), std::memory_order_relaxed);
1268 this->my_size.get_rw() =
1269 table.my_size.get_rw().exchange(
1270 this->my_size.get_ro(),
1271 std::memory_order_relaxed);
1273 for (size_type i = 0; i < embedded_buckets; ++i)
1274 this->my_embedded_segment[i].node_list.swap(
1275 table.my_embedded_segment[i].node_list);
1277 for (size_type i = segment_traits_t::embedded_segments;
1278 i < block_table_size; ++i)
1279 this->my_table[i].
swap(table.my_table[i]);
1293 pmemobj_pool_by_oid(PMEMoid{my_pool_uuid, 0});
1295 return pool_base(pop);
1304 template <
typename Container,
bool is_const>
1305 class hash_map_iterator {
1307 using iterator_category = std::forward_iterator_tag;
1308 using difference_type = ptrdiff_t;
1309 using map_type = Container;
1310 using value_type =
typename map_type::value_type;
1311 using node =
typename map_type::node;
1312 using bucket =
typename map_type::bucket;
1313 using map_ptr =
typename std::conditional<is_const,
const map_type *,
1316 typename std::conditional<is_const,
1317 typename map_type::const_reference,
1318 typename map_type::reference>::type;
1320 typename std::conditional<is_const,
1321 typename map_type::const_pointer,
1322 typename map_type::pointer>::type;
1324 template <
typename C,
bool M,
bool U>
1325 friend bool operator==(
const hash_map_iterator<C, M> &i,
1326 const hash_map_iterator<C, U> &j);
1328 template <
typename C,
bool M,
bool U>
1329 friend bool operator!=(
const hash_map_iterator<C, M> &i,
1330 const hash_map_iterator<C, U> &j);
1332 friend class hash_map_iterator<map_type, true>;
1334 #if !defined(_MSC_VER) || defined(__INTEL_COMPILER)
1336 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
1337 typename MutexType,
typename ScopedLockType>
1338 friend class ::pmem::obj::concurrent_hash_map;
1342 hash_map_iterator(map_ptr map,
size_t index)
1343 : my_map(map), my_index(index), my_bucket(nullptr), my_node(nullptr)
1345 if (my_index <= my_map->mask()) {
1346 bucket_accessor acc(my_map, my_index);
1347 my_bucket = acc.get();
1348 my_node =
static_cast<node *
>(
1349 my_bucket->node_list.get(my_map->my_pool_uuid));
1352 advance_to_next_bucket();
1359 hash_map_iterator() =
default;
1362 hash_map_iterator(
const hash_map_iterator &other)
1363 : my_map(other.my_map),
1364 my_index(other.my_index),
1365 my_bucket(other.my_bucket),
1366 my_node(other.my_node)
1371 template <
typename U = void,
1372 typename =
typename std::enable_if<is_const, U>::type>
1373 hash_map_iterator(
const hash_map_iterator<map_type, false> &other)
1374 : my_map(other.my_map),
1375 my_index(other.my_index),
1376 my_bucket(other.my_bucket),
1377 my_node(other.my_node)
1381 hash_map_iterator &operator=(
const hash_map_iterator &it) =
default;
1384 reference operator*()
const
1387 return my_node->item;
1391 pointer operator->()
const
1393 return &operator*();
1400 my_node =
static_cast<node *
>(
1401 my_node->next.get((my_map->my_pool_uuid)));
1404 advance_to_next_bucket();
1413 hash_map_iterator old(*
this);
1420 map_ptr my_map =
nullptr;
1423 size_t my_index = 0;
1426 bucket *my_bucket =
nullptr;
1429 node *my_node =
nullptr;
1431 class bucket_accessor {
1433 bucket_accessor(map_ptr m,
size_t index)
1435 my_bucket = m->get_bucket(index);
1449 advance_to_next_bucket()
1451 size_t k = my_index + 1;
1455 while (k <= my_map->mask()) {
1456 bucket_accessor acc(my_map, k);
1457 my_bucket = acc.get();
1459 if (my_bucket->node_list) {
1460 my_node =
static_cast<node *
>(
1461 my_bucket->node_list.get(
1462 my_map->my_pool_uuid));
1478 template <
typename Container,
bool M,
bool U>
1480 operator==(
const hash_map_iterator<Container, M> &i,
1481 const hash_map_iterator<Container, U> &j)
1483 return i.my_node == j.my_node && i.my_map == j.my_map;
1486 template <
typename Container,
bool M,
bool U>
1488 operator!=(
const hash_map_iterator<Container, M> &i,
1489 const hash_map_iterator<Container, U> &j)
1491 return i.my_node != j.my_node || i.my_map != j.my_map;
1542 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
1543 typename MutexType,
typename ScopedLockType>
1545 :
protected concurrent_hash_map_internal::hash_map_base<Key, T, MutexType,
1547 template <
typename Container,
bool is_const>
1548 friend class concurrent_hash_map_internal::hash_map_iterator;
1551 using size_type =
typename concurrent_hash_map_internal::hash_map_base<
1552 Key, T, MutexType, ScopedLockType>::size_type;
1553 using hashcode_type =
1554 typename concurrent_hash_map_internal::hash_map_base<
1555 Key, T, MutexType, ScopedLockType>::hashcode_type;
1556 using key_type = Key;
1557 using mapped_type = T;
1558 using value_type =
typename concurrent_hash_map_internal::hash_map_base<
1559 Key, T, MutexType, ScopedLockType>::node::value_type;
1560 using difference_type = ptrdiff_t;
1561 using pointer = value_type *;
1562 using const_pointer =
const value_type *;
1563 using reference = value_type &;
1564 using const_reference =
const value_type &;
1565 using iterator = concurrent_hash_map_internal::hash_map_iterator<
1567 using const_iterator = concurrent_hash_map_internal::hash_map_iterator<
1569 using hasher = Hash;
1570 using key_equal =
typename concurrent_hash_map_internal::key_equal_type<
1571 Hash, KeyEqual>::type;
1574 using mutex_t = MutexType;
1575 using scoped_t = ScopedLockType;
1579 using hash_map_base =
1580 concurrent_hash_map_internal::hash_map_base<Key, T, mutex_t,
1582 using hash_map_base::calculate_mask;
1583 using hash_map_base::check_growth;
1584 using hash_map_base::check_mask_race;
1585 using hash_map_base::embedded_buckets;
1586 using hash_map_base::get_bucket;
1587 using hash_map_base::get_pool_base;
1588 using hash_map_base::header_features;
1589 using hash_map_base::insert_new_node;
1590 using hash_map_base::internal_swap;
1591 using hash_map_base::layout_features;
1592 using hash_map_base::mask;
1593 using hash_map_base::reserve;
1594 using node =
typename hash_map_base::node;
1595 using node_mutex_t =
typename node::mutex_t;
1596 using node_ptr_t =
typename hash_map_base::node_ptr_t;
1597 using bucket =
typename hash_map_base::bucket;
1598 using bucket_lock_type =
typename bucket::scoped_t;
1599 using segment_index_t =
typename hash_map_base::segment_index_t;
1600 using segment_traits_t =
typename hash_map_base::segment_traits_t;
1601 using segment_facade_t =
typename hash_map_base::segment_facade_t;
1602 using scoped_lock_traits_type =
1603 concurrent_hash_map_internal::scoped_lock_traits<scoped_t>;
1606 using persistent_node_ptr_t = detail::persistent_pool_ptr<node>;
1609 delete_node(
const node_ptr_t &n)
1611 delete_persistent<node>(
1612 detail::static_persistent_pool_pointer_cast<node>(n)
1613 .get_persistent_ptr(this->my_pool_uuid));
1616 template <
typename K>
1617 persistent_node_ptr_t
1618 search_bucket(
const K &key, bucket *b)
const
1620 assert(b->is_rehashed(std::memory_order_relaxed));
1622 persistent_node_ptr_t n =
1623 detail::static_persistent_pool_pointer_cast<node>(
1628 n.get(this->my_pool_uuid)->item.first)) {
1629 n = detail::static_persistent_pool_pointer_cast<node>(
1630 n.get(this->my_pool_uuid)->next);
1645 const hashcode_type h,
bool writer =
false)
1656 bool writer =
false)
1658 my_b = base->get_bucket(h);
1660 if (my_b->is_rehashed(std::memory_order_acquire) ==
1662 bucket_lock_type::try_acquire(this->my_b->mutex,
1664 if (my_b->is_rehashed(
1665 std::memory_order_relaxed) ==
1668 base->rehash_bucket<
false>(my_b, h);
1671 bucket_lock_type::acquire(my_b->mutex, writer);
1674 assert(my_b->is_rehashed(std::memory_order_relaxed));
1683 return bucket_lock_type::is_writer;
1714 const hashcode_type h,
1715 bool writer =
false)
1717 acquire(base, h, writer);
1725 bool writer =
false)
1727 my_b = base->get_bucket(h);
1729 if (my_b->is_rehashed(std::memory_order_relaxed) ==
1732 base->rehash_bucket<
true>(my_b, h);
1735 assert(my_b->is_rehashed(std::memory_order_relaxed));
1771 get_hash_code(node_ptr_t &n)
1774 detail::static_persistent_pool_pointer_cast<node>(n)(
1779 template <
bool serial>
1781 rehash_bucket(bucket *b_new,
const hashcode_type h)
1783 using accessor_type =
typename std::conditional<
1784 serial, serial_bucket_accessor, bucket_accessor>::type;
1786 using scoped_lock_traits_type =
1787 concurrent_hash_map_internal::scoped_lock_traits<
1793 pool_base pop = get_pool_base();
1794 node_ptr_t *p_new = &(b_new->node_list);
1798 if (*p_new !=
nullptr) {
1799 assert(!b_new->is_rehashed(std::memory_order_relaxed));
1801 b_new->set_rehashed(std::memory_order_relaxed);
1802 pop.persist(b_new->rehashed);
1808 hashcode_type mask = (1u << detail::Log2(h)) - 1;
1809 assert((h & mask) < h);
1810 accessor_type b_old(
1812 scoped_lock_traits_type::initial_rw_state(
true));
1816 mask = (mask << 1) | 1;
1817 assert((mask & (mask + 1)) == 0 && (h & mask) == h);
1820 for (node_ptr_t *p_old = &(b_old->node_list),
1823 hashcode_type c = get_hash_code(n);
1825 hashcode_type bmask = h & (mask >> 1);
1829 : (1u << (detail::Log2(bmask) + 1)) - 1;
1831 assert((c & bmask) == (h & bmask));
1834 if ((c & mask) == h) {
1835 if (!b_old.is_writer() &&
1836 !scoped_lock_traits_type::
1837 upgrade_to_writer(b_old)) {
1847 *p_old = n(this->my_pool_uuid)->next;
1849 p_new = &(n(this->my_pool_uuid)->next);
1852 p_old = &(n(this->my_pool_uuid)->next);
1860 b_new->set_rehashed(std::memory_order_release);
1861 pop.persist(b_new->rehashed);
1867 if (layout_features.incompat != header_features().incompat)
1869 "Incompat flags mismatch, for more details go to: https://pmem.io/pmdk/cpp_obj/ \n");
1878 :
protected node::scoped_t {
1883 using node::scoped_t::try_acquire;
1890 const typename concurrent_hash_map::value_type;
1911 concurrent_hash_map_internal::check_outside_tx();
1914 node::scoped_t::release();
1926 return my_node->item;
1944 concurrent_hash_map_internal::check_outside_tx();
1959 hashcode_type my_hash;
1974 assert(this->my_node);
1976 return this->my_node->item;
2012 reserve(table.
size());
2030 template <
typename I>
2035 reserve(
static_cast<size_type
>(std::distance(first, last)));
2067 if (!graceful_shutdown) {
2069 std::distance(this->
begin(), this->
end());
2070 assert(actual_size >= 0);
2071 this->restore_size(size_type(actual_size));
2073 assert(this->
size() ==
2074 size_type(std::distance(this->
begin(),
2090 concurrent_hash_map &
2093 if (
this != &table) {
2132 void rehash(size_type n = 0);
2163 return iterator(
this, 0);
2173 return iterator(
this, mask() + 1);
2183 return const_iterator(
this, 0);
2193 return const_iterator(
this, mask() + 1);
2202 return this->my_size.get_ro();
2211 return this->my_size.get_ro() == 0;
2220 return (~size_type(0)) /
sizeof(node);
2249 concurrent_hash_map_internal::check_outside_tx();
2252 key,
nullptr,
false);
2266 template <
typename K,
2267 typename =
typename std::enable_if<
2268 concurrent_hash_map_internal::
2269 has_transparent_key_equal<hasher>::value,
2274 concurrent_hash_map_internal::check_outside_tx();
2277 key,
nullptr,
false);
2289 concurrent_hash_map_internal::check_outside_tx();
2294 key, &result,
false);
2310 template <
typename K,
2311 typename =
typename std::enable_if<
2312 concurrent_hash_map_internal::
2313 has_transparent_key_equal<hasher>::value,
2318 concurrent_hash_map_internal::check_outside_tx();
2323 key, &result,
false);
2335 concurrent_hash_map_internal::check_outside_tx();
2339 return internal_find(key, &result,
true);
2355 template <
typename K,
2356 typename =
typename std::enable_if<
2357 concurrent_hash_map_internal::
2358 has_transparent_key_equal<hasher>::value,
2363 concurrent_hash_map_internal::check_outside_tx();
2367 return internal_find(key, &result,
true);
2379 concurrent_hash_map_internal::check_outside_tx();
2383 return internal_insert(key, &result,
false, key);
2396 concurrent_hash_map_internal::check_outside_tx();
2400 return internal_insert(key, &result,
true, key);
2413 concurrent_hash_map_internal::check_outside_tx();
2417 return internal_insert(value.first, &result,
false, value);
2430 concurrent_hash_map_internal::check_outside_tx();
2434 return internal_insert(value.first, &result,
true, value);
2446 concurrent_hash_map_internal::check_outside_tx();
2448 return internal_insert(value.first,
nullptr,
false, value);
2461 concurrent_hash_map_internal::check_outside_tx();
2465 return internal_insert(value.first, &result,
false,
2479 concurrent_hash_map_internal::check_outside_tx();
2483 return internal_insert(value.first, &result,
true,
2496 concurrent_hash_map_internal::check_outside_tx();
2498 return internal_insert(value.first,
nullptr,
false,
2507 template <
typename I>
2511 concurrent_hash_map_internal::check_outside_tx();
2513 for (; first != last; ++first)
2525 concurrent_hash_map_internal::check_outside_tx();
2527 insert(il.begin(), il.end());
2541 concurrent_hash_map_internal::check_outside_tx();
2543 return internal_erase(key);
2560 template <
typename K,
2561 typename =
typename std::enable_if<
2562 concurrent_hash_map_internal::
2563 has_transparent_key_equal<hasher>::value,
2568 concurrent_hash_map_internal::check_outside_tx();
2570 return internal_erase(key);
2580 bool try_acquire_item(const_accessor *result, node_mutex_t &
mutex,
2583 template <
typename K>
2584 bool internal_find(
const K &key, const_accessor *result,
bool write);
2586 template <
typename K,
typename... Args>
2587 bool internal_insert(
const K &key, const_accessor *result,
bool write,
2591 template <
bool Bucket_rw_lock,
typename K>
2592 persistent_node_ptr_t
2593 get_node(
const K &key, bucket_accessor &b)
2596 auto n = search_bucket(key, b.get());
2599 if (Bucket_rw_lock && !b.is_writer() &&
2600 !scoped_lock_traits_type::upgrade_to_writer(b)) {
2604 n = search_bucket(key, b.get());
2607 scoped_lock_traits_type::
2608 downgrade_to_reader(b);
2617 template <
typename K>
2618 bool internal_erase(
const K &key);
2620 void clear_segment(segment_index_t s);
2627 template <
typename I>
2632 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
2633 typename MutexType,
typename ScopedLockType>
2635 concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
2636 ScopedLockType>::try_acquire_item(const_accessor *result,
2637 node_mutex_t &mutex,
2641 if (!result->try_acquire(mutex, write)) {
2642 for (detail::atomic_backoff backoff(
true);;) {
2643 if (result->try_acquire(mutex, write))
2646 if (!backoff.bounded_pause())
2654 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
2655 typename MutexType,
typename ScopedLockType>
2656 template <
typename K>
2658 concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
2659 ScopedLockType>::internal_find(
const K &key,
2660 const_accessor *result,
2663 assert(!result || !result->my_node);
2665 hashcode_type m = mask().load(std::memory_order_acquire);
2666 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
2667 ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
2670 assert((m & (m + 1)) == 0);
2672 hashcode_type
const h = hasher{}(key);
2674 persistent_node_ptr_t node;
2680 scoped_lock_traits_type::initial_rw_state(
false));
2681 node = get_node<false>(key, b);
2685 if (check_mask_race(h, m)) {
2696 result, node.get(this->my_pool_uuid)->mutex, write))
2703 std::this_thread::yield();
2705 m = mask().load(std::memory_order_acquire);
2706 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
2707 ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
2712 result->my_node = node.get_persistent_ptr(this->my_pool_uuid);
2713 result->my_hash = h;
2719 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
2720 typename MutexType,
typename ScopedLockType>
2721 template <
typename K,
typename... Args>
2723 concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
2724 ScopedLockType>::internal_insert(
const K &key,
2725 const_accessor *result,
2729 assert(!result || !result->my_node);
2731 hashcode_type m = mask().load(std::memory_order_acquire);
2732 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
2733 ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
2736 assert((m & (m + 1)) == 0);
2738 hashcode_type
const h = hasher{}(key);
2740 persistent_node_ptr_t node;
2741 size_t new_size = 0;
2742 bool inserted =
false;
2748 scoped_lock_traits_type::initial_rw_state(
true));
2749 node = get_node<true>(key, b);
2753 if (check_mask_race(h, m)) {
2759 new_size = insert_new_node(b.get(), node,
2760 std::forward<Args>(args)...);
2767 result, node.get(this->my_pool_uuid)->mutex, write))
2774 std::this_thread::yield();
2776 m = mask().load(std::memory_order_acquire);
2777 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
2778 ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
2783 result->my_node = node.get_persistent_ptr(this->my_pool_uuid);
2784 result->my_hash = h;
2787 check_growth(m, new_size);
2792 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
2793 typename MutexType,
typename ScopedLockType>
2794 template <
typename K>
2796 concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
2797 ScopedLockType>::internal_erase(
const K &key)
2800 hashcode_type
const h = hasher{}(key);
2801 hashcode_type m = mask().load(std::memory_order_acquire);
2802 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
2803 ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
2806 pool_base pop = get_pool_base();
2811 bucket_accessor b(
this, h & m,
2812 scoped_lock_traits_type::initial_rw_state(
true));
2815 node_ptr_t *p = &b->node_list;
2820 detail::static_persistent_pool_pointer_cast<node>(
2821 n)(this->my_pool_uuid)
2823 p = &n(this->my_pool_uuid)->next;
2829 if (check_mask_race(h, m))
2833 }
else if (!b.is_writer() &&
2834 !scoped_lock_traits_type::upgrade_to_writer(b)) {
2835 if (check_mask_race(h, m))
2841 persistent_ptr<node> del = n(this->my_pool_uuid);
2849 if (!try_acquire_item(&acc, del->mutex,
true)) {
2853 std::this_thread::yield();
2855 m = mask().load(std::memory_order_acquire);
2867 --(this->my_size.get_rw());
2868 pop.persist(this->my_size);
2874 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
2875 typename MutexType,
typename ScopedLockType>
2880 internal_swap(table);
2883 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
2884 typename MutexType,
typename ScopedLockType>
2889 concurrent_hash_map_internal::check_outside_tx();
2892 hashcode_type m = mask();
2896 hashcode_type b = (m + 1) >> 1;
2899 assert((b & (b - 1)) == 0);
2901 for (; b <= m; ++b) {
2902 bucket *bp = get_bucket(b);
2904 concurrent_hash_map_internal::assert_not_locked<mutex_t,
2908 if (bp->is_rehashed(std::memory_order_relaxed) ==
false)
2909 rehash_bucket<true>(bp, b);
2913 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
2914 typename MutexType,
typename ScopedLockType>
2918 hashcode_type m = mask();
2920 assert((m & (m + 1)) == 0);
2924 for (segment_index_t b = 0; b <= m; ++b) {
2925 bucket *bp = get_bucket(b);
2926 concurrent_hash_map_internal::assert_not_locked<mutex_t,
2937 this->my_size.get_rw() = 0;
2938 segment_index_t s = segment_traits_t::segment_index_of(m);
2940 assert(s + 1 == this->block_table_size ||
2941 !segment_facade_t(this->my_table, s + 1).is_valid());
2947 mask().store(embedded_buckets - 1, std::memory_order_relaxed);
2953 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
2954 typename MutexType,
typename ScopedLockType>
2957 ScopedLockType>::clear_segment(segment_index_t s)
2959 segment_facade_t segment(this->my_table, s);
2961 assert(segment.is_valid());
2963 size_type sz = segment.size();
2964 for (segment_index_t i = 0; i < sz; ++i) {
2965 for (node_ptr_t n = segment[i].node_list; n;
2966 n = segment[i].node_list) {
2967 segment[i].node_list = n(this->my_pool_uuid)->next;
2972 if (s >= segment_traits_t::embedded_segments)
2976 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
2977 typename MutexType,
typename ScopedLockType>
2982 reserve(source.my_size.get_ro());
2983 internal_copy(source.
begin(), source.
end());
2986 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
2987 typename MutexType,
typename ScopedLockType>
2988 template <
typename I>
2991 ScopedLockType>::internal_copy(I first, I last)
2993 hashcode_type m = mask();
2995 for (; first != last; ++first) {
2996 hashcode_type h = hasher{}(first->first);
2997 bucket *b = get_bucket(h & m);
2999 assert(b->is_rehashed(std::memory_order_relaxed));
3001 detail::persistent_pool_ptr<node> p;
3002 insert_new_node(b, p, *first);
3006 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
3007 typename MutexType,
typename ScopedLockType>
3009 operator==(
const concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3011 const concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3014 if (a.size() != b.size())
3017 typename concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3018 ScopedLockType>::const_iterator
3022 typename concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3023 ScopedLockType>::const_iterator j,
3026 for (; i != i_end; ++i) {
3027 j = b.equal_range(i->first).first;
3029 if (j == j_end || !(i->second == j->second))
3036 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
3037 typename MutexType,
typename ScopedLockType>
3039 operator!=(
const concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3041 const concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3047 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual,
3048 typename MutexType,
typename ScopedLockType>
3050 swap(concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType, ScopedLockType> &a,
3051 concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType, ScopedLockType> &b)