38 #ifndef PMEMOBJ_CONCURRENT_HASH_MAP_HPP
39 #define PMEMOBJ_CONCURRENT_HASH_MAP_HPP
51 #if LIBPMEMOBJ_CPP_USE_TBB_RW_MUTEX
52 #include "tbb/spin_rw_mutex.h"
57 #include <libpmemobj++/detail/persistent_pool_ptr.hpp>
62 #include <initializer_list>
67 #include <type_traits>
80 struct hash<pmem::obj::p<T>> {
84 return hash<T>()(x.
get_ro());
93 namespace experimental
96 using namespace pmem::obj;
98 template <
typename Key,
typename T,
typename Hash = std::hash<Key>,
99 typename KeyEqual = std::equal_to<Key>>
100 class concurrent_hash_map;
105 template <
typename Hash>
106 using transparent_key_equal =
typename Hash::transparent_key_equal;
108 template <
typename Hash>
109 using has_transparent_key_equal = detail::supports<Hash, transparent_key_equal>;
111 template <
typename Hash,
typename Pred,
112 bool = has_transparent_key_equal<Hash>::value>
113 struct key_equal_type {
114 using type =
typename Hash::transparent_key_equal;
117 template <
typename Hash,
typename Pred>
118 struct key_equal_type<Hash, Pred, false> {
122 template <
typename Mutex>
124 assert_not_locked(Mutex &mtx)
127 assert(mtx.try_lock());
134 template <
typename Mutex>
138 assert_not_locked<Mutex>(mtx.
get());
146 _BitScanReverse64(&j, x);
147 return static_cast<int>(j);
149 #elif __GNUC__ || __clang__
154 return 8 * int(
sizeof(x)) - __builtin_clzll(x) - 1;
167 static const int table[64] = {
168 0, 58, 1, 59, 47, 53, 2, 60, 39, 48, 27, 54, 33, 42, 3, 61,
169 51, 37, 40, 49, 18, 28, 20, 55, 30, 34, 11, 43, 14, 22, 4, 62,
170 57, 46, 52, 38, 26, 32, 41, 50, 36, 17, 19, 29, 10, 13, 21, 56,
171 45, 25, 31, 35, 16, 9, 12, 44, 24, 15, 8, 23, 7, 6, 5, 63};
173 return table[(x * 0x03f6eaf2cd271461) >> 58];
177 class atomic_backoff {
183 static const int32_t LOOPS_BEFORE_YIELD = 16;
187 __pause(int32_t delay)
189 for (; delay > 0; --delay) {
192 #elif __GNUC__ && (__i386__ || __x86_64__)
194 __builtin_ia32_pause();
203 atomic_backoff(
const atomic_backoff &) =
delete;
207 atomic_backoff &operator=(
const atomic_backoff &) =
delete;
213 atomic_backoff() : count(1)
220 atomic_backoff(
bool) : count(1)
231 if (count <= LOOPS_BEFORE_YIELD) {
238 std::this_thread::yield();
249 if (count < LOOPS_BEFORE_YIELD) {
269 template <
typename T,
typename InitFunctor>
270 class atomic_wrapper {
272 using value_type = T;
273 using atomic_type = std::atomic<value_type>;
274 using init_type = InitFunctor;
276 atomic_wrapper() noexcept
280 atomic_wrapper(
const value_type &val) noexcept : my_atomic(val)
284 template <
typename... Args>
285 atomic_wrapper(Args &&... args)
286 : my_atomic(init_type()(
std::forward<Args>(args)...))
290 operator atomic_type &() noexcept
296 atomic_type my_atomic;
303 template <
typename T,
typename U,
typename... Args>
307 #if LIBPMEMOBJ_CPP_CONCURRENT_HASH_MAP_USE_ATOMIC_ALLOCATOR
308 make_persistent_atomic<T>(pop, ptr, std::forward<Args>(args)...);
312 ptr = make_persistent<T>(std::forward<Args>(args)...);
315 throw std::bad_alloc();
320 #if !LIBPMEMOBJ_CPP_USE_TBB_RW_MUTEX
321 class shared_mutex_scoped_lock {
325 shared_mutex_scoped_lock(
const shared_mutex_scoped_lock &) =
delete;
326 shared_mutex_scoped_lock &
327 operator=(
const shared_mutex_scoped_lock &) =
delete;
330 shared_mutex_scoped_lock() :
mutex(nullptr), is_writer(false)
335 shared_mutex_scoped_lock(rw_mutex_type &m,
bool write =
true)
342 ~shared_mutex_scoped_lock()
350 acquire(rw_mutex_type &m,
bool write =
true)
357 mutex->lock_shared();
372 mutex->unlock_shared();
385 rw_mutex_type *m =
mutex;
401 downgrade_to_reader()
412 try_acquire(rw_mutex_type &m,
bool write =
true)
417 result = write ? m.try_lock() : m.try_lock_shared();
428 rw_mutex_type *
mutex;
437 struct hash_map_node_base {
438 #if LIBPMEMOBJ_CPP_USE_TBB_RW_MUTEX
443 using scoped_t = tbb::spin_rw_mutex::scoped_lock;
449 using scoped_t = shared_mutex_scoped_lock;
453 using node_base_ptr_t = detail::persistent_pool_ptr<hash_map_node_base>;
456 node_base_ptr_t next;
461 hash_map_node_base() : next(OID_NULL)
465 hash_map_node_base(
const node_base_ptr_t &_next) : next(_next)
469 hash_map_node_base(node_base_ptr_t &&_next) : next(
std::move(_next))
474 hash_map_node_base(
const hash_map_node_base &) =
delete;
477 hash_map_node_base &operator=(
const hash_map_node_base &) =
delete;
481 static detail::persistent_pool_ptr<hash_map_node_base>
const empty_bucket =
488 template <
typename Bucket>
489 class segment_traits {
492 using segment_index_t = size_t;
493 using size_type = size_t;
494 using bucket_type = Bucket;
498 constexpr
static size_type max_allocation_size = PMEMOBJ_MAX_ALLOC_SIZE;
501 constexpr
static segment_index_t first_big_block = 27;
506 constexpr
static size_type big_block_size = size_type(1)
510 static_assert((big_block_size *
sizeof(bucket_type)) <
512 "Block size exceeds max_allocation_size");
515 constexpr
static segment_index_t
516 first_block_in_segment(segment_index_t seg)
518 return seg < first_big_block
521 (segment_index_t(1) << (seg - first_big_block)) - 1);
525 constexpr
static size_type
526 blocks_in_segment(segment_index_t seg)
528 return seg < first_big_block
530 : segment_index_t(1) << (seg - first_big_block);
534 constexpr
static size_type
535 block_size(segment_index_t b)
537 return b < first_big_block ? segment_size(b ? b : 1)
543 constexpr
static segment_index_t embedded_segments = 1;
546 constexpr
static size_type embedded_buckets = 1 << embedded_segments;
549 constexpr
static segment_index_t number_of_segments = 32;
552 static const size_type first_block = 8;
555 constexpr
static segment_index_t
558 return first_block_in_segment(number_of_segments);
562 static segment_index_t
563 segment_index_of(size_type index)
565 return segment_index_t(Log2(index | 1));
569 constexpr
static segment_index_t
570 segment_base(segment_index_t k)
572 return (segment_index_t(1) << k) & ~segment_index_t(1);
576 constexpr
static size_type
577 segment_size(segment_index_t k)
579 return size_type(1) << k;
582 embedded_segments < first_big_block,
583 "Number of embedded segments cannot exceed max_allocation_size");
602 template <
typename BlockTable,
typename SegmentTraits,
bool is_const>
603 class segment_facade_impl :
public SegmentTraits {
605 using traits_type = SegmentTraits;
606 using traits_type::block_size;
607 using traits_type::blocks_in_segment;
608 using traits_type::embedded_buckets;
609 using traits_type::embedded_segments;
610 using traits_type::first_block;
611 using traits_type::first_block_in_segment;
612 using traits_type::segment_base;
613 using traits_type::segment_size;
616 using table_reference =
617 typename std::conditional<is_const,
const BlockTable &,
620 using table_pointer =
621 typename std::conditional<is_const,
const BlockTable *,
624 using bucket_type =
typename traits_type::bucket_type;
625 using segment_index_t =
typename traits_type::segment_index_t;
626 using size_type =
typename traits_type::size_type;
629 segment_facade_impl(table_reference table, segment_index_t s)
630 : my_table(&table), my_seg(s)
632 assert(my_seg < traits_type::number_of_segments);
636 segment_facade_impl(
const segment_facade_impl &src)
637 : my_table(src.my_table), my_seg(src.my_seg)
641 segment_facade_impl(segment_facade_impl &&src) =
default;
644 segment_facade_impl &
645 operator=(
const segment_facade_impl &src)
647 my_table = src.my_table;
653 segment_facade_impl &
654 operator=(segment_facade_impl &&src)
656 my_table = src.my_table;
667 bucket_type &operator[](size_type i)
const
671 segment_index_t table_block = first_block_in_segment(my_seg);
672 size_type b_size = block_size(table_block);
674 table_block += i / b_size;
677 return (*my_table)[table_block][
static_cast<std::ptrdiff_t
>(i)];
683 segment_facade_impl &
696 segment_facade_impl tmp = *
this;
704 segment_facade_impl &
717 segment_facade_impl tmp = *
this;
725 segment_facade_impl &
735 segment_facade_impl &
748 return segment_facade_impl(*(this->my_table),
758 return segment_facade_impl(*(this->my_table),
768 assert(my_seg >= embedded_segments);
770 if (my_seg < first_block) {
771 enable_first_block(pop);
773 enable_big_segment(pop);
783 assert(my_seg >= embedded_segments);
785 if (my_seg < first_block) {
786 if (my_seg == embedded_segments) {
787 size_type sz = segment_size(first_block) -
789 delete_persistent<bucket_type[]>(
790 (*my_table)[my_seg], sz);
792 (*my_table)[my_seg] =
nullptr;
794 block_range blocks = segment_blocks(my_seg);
796 for (segment_index_t b = blocks.first;
797 b < blocks.second; ++b) {
798 if ((*my_table)[b] !=
nullptr) {
799 delete_persistent<bucket_type[]>(
800 (*my_table)[b], block_size(b));
801 (*my_table)[b] =
nullptr;
813 return segment_size(my_seg ? my_seg : 1);
824 block_range blocks = segment_blocks(my_seg);
826 for (segment_index_t b = blocks.first; b < blocks.second; ++b) {
827 if ((*my_table)[b] ==
nullptr)
835 using block_range = std::pair<segment_index_t, segment_index_t>;
841 segment_blocks(segment_index_t seg)
843 segment_index_t
begin = first_block_in_segment(seg);
845 return block_range(begin, begin + blocks_in_segment(seg));
851 assert(my_seg == embedded_segments);
856 segment_size(first_block) - embedded_buckets;
857 (*my_table)[my_seg] =
858 make_persistent<bucket_type[]>(sz);
861 (*my_table)[embedded_segments].raw();
863 for (segment_index_t s = my_seg + 1; s < first_block;
866 static_cast<std::ptrdiff_t
>(
868 segment_base(my_seg));
870 (*my_table)[s] = (base + off).raw();
875 throw std::bad_alloc();
882 block_range blocks = segment_blocks(my_seg);
883 #if LIBPMEMOBJ_CPP_CONCURRENT_HASH_MAP_USE_ATOMIC_ALLOCATOR
884 for (segment_index_t b = blocks.first; b < blocks.second; ++b) {
885 if ((*my_table)[b] ==
nullptr)
886 make_persistent_atomic<bucket_type[]>(
887 pop, (*my_table)[b], block_size(b));
893 for (segment_index_t b = blocks.first;
894 b < blocks.second; ++b) {
895 assert((*my_table)[b] ==
nullptr);
896 (*my_table)[b] = make_persistent<bucket_type[]>(
902 throw std::bad_alloc();
908 table_pointer my_table;
911 segment_index_t my_seg;
918 class hash_map_base {
921 using size_type = size_t;
924 using hashcode_t = size_t;
927 using node_base = hash_map_node_base;
930 using node_base_ptr_t = detail::persistent_pool_ptr<node_base>;
937 #if LIBPMEMOBJ_CPP_USE_TBB_RW_MUTEX
942 using scoped_t = tbb::spin_rw_mutex::scoped_lock;
948 using scoped_t = shared_mutex_scoped_lock;
958 node_base_ptr_t node_list;
961 tmp_node_ptr_t tmp_node;
964 bucket() : node_list(empty_bucket), tmp_node(nullptr)
966 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
967 VALGRIND_HG_DISABLE_CHECKING(&rehashed,
970 rehashed.
get_rw() =
false;
979 is_rehashed(std::memory_order order)
981 return rehashed.
get_ro().load(order);
985 set_rehashed(std::memory_order order)
987 rehashed.
get_rw().store(
true, order);
991 bucket(
const bucket &) =
delete;
994 bucket &operator=(
const bucket &) =
delete;
998 using segment_traits_t = segment_traits<bucket>;
1001 using segment_index_t =
typename segment_traits_t::segment_index_t;
1004 static const size_type embedded_buckets =
1005 segment_traits_t::embedded_buckets;
1008 static const size_type first_block = segment_traits_t::first_block;
1011 constexpr
static size_type block_table_size =
1012 segment_traits_t::number_of_blocks();
1021 using blocks_table_t = segment_ptr_t[block_table_size];
1023 class calculate_mask {
1026 operator()(
const hash_map_base *map_base)
const
1028 return map_base->calculate_mask();
1033 using mask_type = v<atomic_wrapper<hashcode_t, calculate_mask>>;
1046 blocks_table_t my_table;
1054 bucket my_embedded_segment[embedded_buckets];
1060 segment_enable_mutex_t my_segment_enable_mutex;
1066 return reinterpret_cast<uintptr_t
>(ptr) > uintptr_t(63);
1069 template <
typename U>
1071 is_valid(
const detail::persistent_pool_ptr<U> &ptr)
1073 return ptr.raw() > uint64_t(63);
1076 template <
typename U>
1080 return ptr.raw().off > uint64_t(63);
1083 const std::atomic<hashcode_t> &
1084 mask() const noexcept
1086 return const_cast<mask_type &
>(my_mask).
get(
this);
1089 std::atomic<hashcode_t> &
1092 return my_mask.get(
this);
1096 using const_segment_facade_t =
1097 segment_facade_impl<blocks_table_t, segment_traits_t, true>;
1100 using segment_facade_t =
1101 segment_facade_impl<blocks_table_t, segment_traits_t, false>;
1107 sizeof(size_type) ==
sizeof(std::atomic<size_type>),
1108 "std::atomic should have the same layout as underlying integral type");
1110 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
1111 VALGRIND_HG_DISABLE_CHECKING(&my_size,
sizeof(my_size));
1112 VALGRIND_HG_DISABLE_CHECKING(&my_mask,
sizeof(my_mask));
1116 PMEMoid oid = pmemobj_oid(
this);
1118 assert(!OID_IS_NULL(oid));
1120 my_pool_uuid = oid.pool_uuid_lo;
1124 for (size_type i = 0; i < segment_traits_t::embedded_segments;
1127 pmemobj_oid(my_embedded_segment +
1128 segment_traits_t::segment_base(i));
1129 segment_facade_t seg(my_table, i);
1130 mark_rehashed<false>(pop, seg);
1133 assert(mask() == embedded_buckets - 1);
1140 calculate_mask()
const
1142 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
1143 VALGRIND_HG_DISABLE_CHECKING(&my_size,
sizeof(my_size));
1144 VALGRIND_HG_DISABLE_CHECKING(&my_mask,
sizeof(my_mask));
1146 hashcode_t m = embedded_buckets - 1;
1148 const_segment_facade_t segment(
1149 my_table, segment_traits_t::embedded_segments);
1151 while (segment.is_valid()) {
1152 m += segment.size();
1159 restore_size(size_type actual_size)
1161 my_size.
get_rw().store(actual_size, std::memory_order_relaxed);
1169 template <
bool Flush = true>
1171 mark_rehashed(
pool_base &pop, segment_facade_t &segment)
1173 for (size_type i = 0; i < segment.size(); ++i) {
1174 bucket *b = &(segment[i]);
1176 assert_not_locked(b->mutex);
1178 b->set_rehashed(std::memory_order_relaxed);
1183 for (size_type i = 0; i < segment.size(); ++i) {
1184 bucket *b = &(segment[i]);
1185 pop.
flush(b->rehashed);
1196 add_to_bucket(bucket *b,
pool_base &pop)
1198 assert(b->is_rehashed(std::memory_order_relaxed) ==
true);
1199 assert(is_valid(b->tmp_node));
1200 assert(b->tmp_node->next == b->node_list);
1202 b->node_list = b->tmp_node;
1203 pop.
persist(&(b->node_list),
sizeof(b->node_list));
1210 enable_segment(segment_index_t k,
bool is_initial =
false)
1217 if (k >= first_block) {
1218 segment_facade_t new_segment(my_table, k);
1220 sz = new_segment.size();
1221 if (!new_segment.is_valid())
1222 new_segment.enable(pop);
1225 mark_rehashed(pop, new_segment);
1232 assert(k == segment_traits_t::embedded_segments);
1234 for (segment_index_t i = k; i < first_block; ++i) {
1235 segment_facade_t new_segment(my_table, i);
1237 if (!new_segment.is_valid())
1238 new_segment.enable(pop);
1241 mark_rehashed(pop, new_segment);
1245 sz = segment_traits_t::segment_size(first_block);
1247 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
1248 ANNOTATE_HAPPENS_BEFORE(&my_mask);
1250 mask().store(sz - 1, std::memory_order_release);
1258 get_bucket(hashcode_t h)
const
1260 segment_index_t s = segment_traits_t::segment_index_of(h);
1262 h -= segment_traits_t::segment_base(s);
1264 const_segment_facade_t segment(my_table, s);
1266 assert(segment.is_valid());
1268 return &(segment[h]);
1275 check_mask_race(hashcode_t h, hashcode_t &m)
const
1277 hashcode_t m_now, m_old = m;
1279 m_now = mask().load(std::memory_order_acquire);
1282 return check_rehashing_collision(h, m_old, m = m_now);
1291 check_rehashing_collision(hashcode_t h, hashcode_t m_old,
1296 if ((h & m_old) != (h & m)) {
1301 for (++m_old; !(h & m_old); m_old <<= 1)
1304 m_old = (m_old << 1) - 1;
1306 assert((m_old & (m_old + 1)) == 0 && m_old <= m);
1309 bucket *b = get_bucket(h & m_old);
1310 return b->is_rehashed(std::memory_order_acquire);
1320 correct_bucket(bucket *b)
1324 if (is_valid(b->tmp_node)) {
1325 if (b->tmp_node->next == b->node_list) {
1326 insert_new_node(pop, b);
1328 b->tmp_node.raw_ptr()->off = 0;
1329 pop.
persist(&(b->tmp_node.raw().off),
1330 sizeof(b->tmp_node.raw().off));
1340 insert_new_node(
pool_base &pop, bucket *b)
1342 add_to_bucket(b, pop);
1346 size_t sz = ++(my_size.
get_rw());
1347 pop.
persist(&my_size,
sizeof(my_size));
1349 b->tmp_node.raw_ptr()->off = 0;
1350 pop.
persist(&(b->tmp_node.raw().off),
1351 sizeof(b->tmp_node.raw().off));
1361 check_growth(hashcode_t m, size_type sz)
1364 segment_index_t new_seg =
static_cast<segment_index_t
>(
1367 assert(segment_facade_t(my_table, new_seg - 1)
1370 std::unique_lock<segment_enable_mutex_t> lock(
1371 my_segment_enable_mutex, std::try_to_lock);
1374 if (mask().load(std::memory_order_relaxed) ==
1378 enable_segment(new_seg);
1392 reserve(size_type buckets)
1399 bool is_initial = (my_size.
get_ro() == 0);
1401 for (size_type m = mask(); buckets > m; m = mask())
1403 segment_traits_t::segment_index_of(m + 1),
1412 internal_swap(hash_map_base &table)
1418 this->my_pool_uuid.
swap(table.my_pool_uuid);
1420 this->mask() = table.mask().exchange(
1421 this->mask(), std::memory_order_relaxed);
1425 table.my_size.get_rw().exchange(
1427 std::memory_order_relaxed);
1429 for (size_type i = 0; i < embedded_buckets; ++i)
1430 this->my_embedded_segment[i].node_list.swap(
1431 table.my_embedded_segment[i].node_list);
1433 for (size_type i = segment_traits_t::embedded_segments;
1434 i < block_table_size; ++i)
1435 this->my_table[i].
swap(table.my_table[i]);
1439 throw std::runtime_error(e);
1451 pmemobj_pool_by_oid(PMEMoid{my_pool_uuid, 0});
1462 template <
typename Container,
bool is_const>
1463 class hash_map_iterator {
1465 using iterator_category = std::forward_iterator_tag;
1466 using difference_type = ptrdiff_t;
1467 using map_type = Container;
1468 using value_type =
typename map_type::value_type;
1469 using node =
typename map_type::node;
1470 using bucket =
typename map_type::bucket;
1471 using map_ptr =
typename std::conditional<is_const,
const map_type *,
1474 typename std::conditional<is_const,
1475 typename map_type::const_reference,
1476 typename map_type::reference>::type;
1478 typename std::conditional<is_const,
1479 typename map_type::const_pointer,
1480 typename map_type::pointer>::type;
1482 template <
typename C,
bool M,
bool U>
1483 friend bool operator==(
const hash_map_iterator<C, M> &i,
1484 const hash_map_iterator<C, U> &j);
1486 template <
typename C,
bool M,
bool U>
1487 friend bool operator!=(
const hash_map_iterator<C, M> &i,
1488 const hash_map_iterator<C, U> &j);
1490 friend class hash_map_iterator<map_type, true>;
1492 #if !defined(_MSC_VER) || defined(__INTEL_COMPILER)
1494 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual>
1499 hash_map_iterator(map_ptr map,
size_t index)
1500 : my_map(map), my_index(index), my_bucket(nullptr), my_node(nullptr)
1502 if (my_index <= my_map->mask()) {
1503 bucket_accessor acc(my_map, my_index);
1504 my_bucket = acc.get();
1505 my_node =
static_cast<node *
>(
1506 my_bucket->node_list.get(my_map->my_pool_uuid));
1508 if (!hash_map_base::is_valid(my_node)) {
1509 advance_to_next_bucket();
1516 hash_map_iterator() : my_map(), my_index(), my_bucket(), my_node()
1521 hash_map_iterator(
const hash_map_iterator &other)
1522 : my_map(other.my_map),
1523 my_index(other.my_index),
1524 my_bucket(other.my_bucket),
1525 my_node(other.my_node)
1530 template <
typename U = void,
1531 typename =
typename std::enable_if<is_const, U>::type>
1532 hash_map_iterator(
const hash_map_iterator<map_type, false> &other)
1533 : my_map(other.my_map),
1534 my_index(other.my_index),
1535 my_bucket(other.my_bucket),
1536 my_node(other.my_node)
1541 reference operator*()
const
1543 assert(hash_map_base::is_valid(my_node));
1544 return my_node->item;
1548 pointer operator->()
const
1550 return &operator*();
1557 my_node =
static_cast<node *
>(
1558 my_node->next.get((my_map->my_pool_uuid)));
1561 advance_to_next_bucket();
1570 hash_map_iterator old(*
this);
1588 class bucket_accessor {
1590 bucket_accessor(map_ptr m,
size_t index)
1592 my_bucket = m->get_bucket(index);
1593 const_cast<map_type *
>(m)->correct_bucket(my_bucket);
1607 advance_to_next_bucket()
1609 size_t k = my_index + 1;
1613 while (k <= my_map->mask()) {
1614 bucket_accessor acc(my_map, k);
1615 my_bucket = acc.get();
1617 if (hash_map_base::is_valid(my_bucket->node_list)) {
1618 my_node =
static_cast<node *
>(
1619 my_bucket->node_list.get(
1620 my_map->my_pool_uuid));
1636 template <
typename Container,
bool M,
bool U>
1638 operator==(
const hash_map_iterator<Container, M> &i,
1639 const hash_map_iterator<Container, U> &j)
1641 return i.my_node == j.my_node && i.my_map == j.my_map;
1644 template <
typename Container,
bool M,
bool U>
1646 operator!=(
const hash_map_iterator<Container, M> &i,
1647 const hash_map_iterator<Container, U> &j)
1649 return i.my_node != j.my_node || i.my_map != j.my_map;
1658 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual>
1660 template <
typename Container,
bool is_const>
1661 friend class internal::hash_map_iterator;
1664 using key_type = Key;
1665 using mapped_type = T;
1666 using value_type = std::pair<const Key, T>;
1667 using size_type = hash_map_base::size_type;
1668 using difference_type = ptrdiff_t;
1669 using pointer = value_type *;
1670 using const_pointer =
const value_type *;
1671 using reference = value_type &;
1672 using const_reference =
const value_type &;
1674 internal::hash_map_iterator<concurrent_hash_map, false>;
1675 using const_iterator =
1676 internal::hash_map_iterator<concurrent_hash_map, true>;
1677 using hasher = Hash;
1679 typename internal::key_equal_type<Hash, KeyEqual>::type;
1690 node(
const Key &key,
const node_base_ptr_t &_next = OID_NULL)
1691 : node_base(_next), item(key, T())
1695 node(
const Key &key,
const T &t,
1696 const node_base_ptr_t &_next = OID_NULL)
1697 : node_base(_next), item(key, t)
1701 node(
const Key &key, T &&t, node_base_ptr_t &&_next = OID_NULL)
1702 : node_base(std::move(_next)), item(key, std::move(t))
1706 node(value_type &&i, node_base_ptr_t &&_next = OID_NULL)
1707 : node_base(std::move(_next)), item(std::move(i))
1711 node(value_type &&i,
const node_base_ptr_t &_next = OID_NULL)
1712 : node_base(_next), item(std::move(i))
1716 template <
typename... Args>
1717 node(Args &&... args, node_base_ptr_t &&_next = OID_NULL)
1718 : node_base(std::forward<node_base_ptr_t>(_next)),
1719 item(std::forward<Args>(args)...)
1723 node(
const value_type &i,
1724 const node_base_ptr_t &_next = OID_NULL)
1725 : node_base(_next), item(i)
1730 using persistent_node_ptr_t = detail::persistent_pool_ptr<node>;
1733 delete_node(
const node_base_ptr_t &n)
1735 delete_persistent<node>(
1736 detail::static_persistent_pool_pointer_cast<node>(n)
1737 .get_persistent_ptr(my_pool_uuid));
1741 allocate_node_copy_construct(
pool_base &pop,
1744 const node_base_ptr_t &next = OID_NULL)
1746 const value_type *
v =
static_cast<const value_type *
>(param);
1747 internal::make_persistent_object<node>(pop, node_ptr, *
v, next);
1751 allocate_node_move_construct(
pool_base &pop,
1754 const node_base_ptr_t &next = OID_NULL)
1756 const value_type *v =
static_cast<const value_type *
>(param);
1757 internal::make_persistent_object<node>(
1758 pop, node_ptr, std::move(*
const_cast<value_type *
>(v)),
1763 allocate_node_default_construct(
pool_base &pop,
1766 const node_base_ptr_t &next = OID_NULL)
1768 const Key &key = *
static_cast<const Key *
>(param);
1769 internal::make_persistent_object<node>(pop, node_ptr, key,
1776 const node_base_ptr_t &next = OID_NULL)
1781 template <
typename K>
1782 persistent_node_ptr_t
1783 search_bucket(
const K &key, bucket *b)
const
1785 assert(b->is_rehashed(std::memory_order_relaxed));
1786 assert(!is_valid(b->tmp_node));
1788 persistent_node_ptr_t n =
1789 detail::static_persistent_pool_pointer_cast<node>(
1792 while (is_valid(n) &&
1793 !key_equal{}(key, n.get(my_pool_uuid)->item.first)) {
1794 n = detail::static_persistent_pool_pointer_cast<node>(
1795 n.get(my_pool_uuid)->next);
1810 bool writer =
false)
1812 acquire(base, h, writer);
1821 bool writer =
false)
1823 my_b = base->get_bucket(h);
1825 if (my_b->is_rehashed(std::memory_order_acquire) ==
1827 try_acquire(my_b->mutex,
true)) {
1828 if (my_b->is_rehashed(
1829 std::memory_order_relaxed) ==
1832 base->rehash_bucket<
false>(my_b, h);
1835 bucket::scoped_t::acquire(my_b->mutex, writer);
1838 if (is_valid(my_b->tmp_node)) {
1841 if (!this->is_writer())
1842 this->upgrade_to_writer();
1843 assert(this->is_writer());
1844 base->correct_bucket(my_b);
1847 assert(my_b->is_rehashed(std::memory_order_relaxed));
1856 return bucket::scoped_t::is_writer;
1887 const hashcode_t h,
bool writer =
false)
1889 acquire(base, h, writer);
1897 bool writer =
false)
1899 my_b = base->get_bucket(h);
1901 if (my_b->is_rehashed(std::memory_order_relaxed) ==
1904 base->rehash_bucket<
true>(my_b, h);
1907 base->correct_bucket(my_b);
1909 assert(my_b->is_rehashed(std::memory_order_relaxed));
1957 get_hash_code(node_base_ptr_t &n)
1960 detail::static_persistent_pool_pointer_cast<node>(n)(
1965 template <
bool serial>
1967 rehash_bucket(bucket *b_new,
const hashcode_t h)
1969 using accessor_type =
typename std::conditional<
1970 serial, serial_bucket_accessor, bucket_accessor>::type;
1976 node_base_ptr_t *p_new = &(b_new->node_list);
1977 bool restore_after_crash = *p_new !=
nullptr;
1980 hashcode_t mask = (1u << internal::Log2(h)) - 1;
1981 assert((h & mask) < h);
1982 bool writer =
false;
1983 accessor_type b_old(
this, h & mask, writer);
1986 mask = (mask << 1) | 1;
1987 assert((mask & (mask + 1)) == 0 && (h & mask) == h);
1989 for (node_base_ptr_t *p_old = &(b_old->node_list), n = *p_old;
1990 is_valid(n); n = *p_old) {
1991 hashcode_t c = get_hash_code(n);
1993 hashcode_t bmask = h & (mask >> 1);
1997 : (1u << (internal::Log2(bmask) + 1)) - 1;
1999 assert((c & bmask) == (h & bmask));
2002 if ((c & mask) == h) {
2003 if (!b_old.is_writer() &&
2004 !b_old.upgrade_to_writer()) {
2010 if (restore_after_crash) {
2011 while (*p_new !=
nullptr &&
2012 (mask & get_hash_code(*p_new)) ==
2015 p_new = &((*p_new)(my_pool_uuid)
2019 restore_after_crash =
false;
2024 pop.
persist(p_new,
sizeof(*p_new));
2027 *p_old = n(my_pool_uuid)->next;
2028 pop.
persist(p_old,
sizeof(*p_old));
2030 p_new = &(n(my_pool_uuid)->next);
2033 p_old = &(n(my_pool_uuid)->next);
2037 if (restore_after_crash) {
2038 while (*p_new !=
nullptr &&
2039 (mask & get_hash_code(*p_new)) == h)
2040 p_new = &((*p_new)(my_pool_uuid)->next);
2044 pop.
persist(p_new,
sizeof(*p_new));
2047 b_new->set_rehashed(std::memory_order_release);
2051 struct call_clear_on_leave {
2052 concurrent_hash_map *my_ch_map;
2053 call_clear_on_leave(concurrent_hash_map *a_ch_map)
2054 : my_ch_map(a_ch_map)
2064 ~call_clear_on_leave()
2077 :
private node::scoped_t {
2087 const typename concurrent_hash_map::value_type;
2106 node::scoped_t::release();
2118 return my_node->item;
2126 return &operator*();
2149 return node::scoped_t::is_writer;
2169 assert(this->my_node);
2171 return this->my_node->item;
2177 return &operator*();
2201 : internal::hash_map_base()
2203 reserve(table.
size());
2205 internal_copy(table);
2212 : internal::hash_map_base()
2220 template <
typename I>
2223 reserve(
static_cast<size_type
>(std::distance(first, last)));
2225 internal_copy(first, last);
2235 internal_copy(il.begin(), il.end());
2246 if (!graceful_shutdown) {
2248 std::distance(this->begin(), this->
end());
2249 assert(actual_size >= 0);
2250 this->restore_size(size_type(actual_size));
2252 assert(this->size() ==
2253 size_type(std::distance(this->begin(),
2266 if (
this != &table) {
2268 internal_copy(table);
2286 internal_copy(il.begin(), il.end());
2297 void rehash(size_type n = 0);
2325 return iterator(
this, 0);
2335 return iterator(
this, mask() + 1);
2345 return const_iterator(
this, 0);
2355 return const_iterator(
this, mask() + 1);
2373 return my_size.
get_ro() == 0;
2382 return (~size_type(0)) /
sizeof(
node);
2410 ->lookup</*insert*/ false>(key,
nullptr,
nullptr,
2412 &do_not_allocate_node);
2424 template <
typename K,
2425 typename =
typename std::enable_if<
2426 internal::has_transparent_key_equal<hasher>::value,
2432 ->lookup</*insert*/ false>(key,
nullptr,
nullptr,
2434 &do_not_allocate_node);
2447 ->lookup</*insert*/ false>(key,
nullptr, &result,
2449 &do_not_allocate_node);
2463 template <
typename K,
2464 typename =
typename std::enable_if<
2465 internal::has_transparent_key_equal<hasher>::value,
2473 ->lookup</*insert*/ false>(key,
nullptr, &result,
2475 &do_not_allocate_node);
2487 return lookup<
false>(key,
nullptr, &result,
2489 &do_not_allocate_node);
2503 template <
typename K,
2504 typename =
typename std::enable_if<
2505 internal::has_transparent_key_equal<hasher>::value,
2512 return lookup<
false>(key,
nullptr, &result,
2514 &do_not_allocate_node);
2527 return lookup<
true>(
2529 false, &allocate_node_default_construct);
2543 return lookup<
true>(
2545 true, &allocate_node_default_construct);
2559 return lookup<
true>(value.first, &value, &result,
2561 &allocate_node_copy_construct);
2574 return lookup<
true>(value.first, &value, &result,
2576 &allocate_node_copy_construct);
2586 return lookup<
true>(value.first, &value,
nullptr,
2588 &allocate_node_copy_construct);
2600 return generic_move_insert(result, std::move(value));
2612 return generic_move_insert(result, std::move(value));
2623 return generic_move_insert(accessor_not_used(),
2631 template <
typename I>
2635 for (; first != last; ++first)
2646 insert(il.begin(), il.end());
2657 return internal_erase(key);
2672 template <
typename K,
2673 typename =
typename std::enable_if<
2674 internal::has_transparent_key_equal<hasher>::value,
2679 return internal_erase(key);
2686 template <
bool OpInsert,
typename K>
2687 bool lookup(
const K &key,
const void *param, const_accessor *result,
2691 const node_base_ptr_t &));
2693 template <
typename K>
2694 bool internal_erase(
const K &key);
2696 struct accessor_not_used {
2703 friend const_accessor *
2704 accessor_location(accessor_not_used
const &)
2709 friend const_accessor *
2710 accessor_location(const_accessor &a)
2716 is_write_access_needed(accessor
const &)
2722 is_write_access_needed(const_accessor
const &)
2728 is_write_access_needed(accessor_not_used
const &)
2733 template <
typename Accessor>
2735 generic_move_insert(Accessor &&result, value_type &&value)
2738 return lookup<
true>(value.first, &value,
2739 accessor_location(result),
2740 is_write_access_needed(result),
2741 &allocate_node_move_construct);
2744 void clear_segment(segment_index_t s);
2749 void internal_copy(
const concurrent_hash_map &source);
2751 template <
typename I>
2752 void internal_copy(I first, I last);
2756 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual>
2757 template <
bool OpInsert,
typename K>
2760 const K &key,
const void *param,
const_accessor *result,
bool write,
2762 const node_base_ptr_t &))
2764 assert(!result || !result->my_node);
2766 bool return_value =
false;
2767 hashcode_t
const h = hasher{}(key);
2768 hashcode_t m = mask().load(std::memory_order_acquire);
2769 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
2770 ANNOTATE_HAPPENS_AFTER(&my_mask);
2772 persistent_node_ptr_t n;
2776 assert((m & (m + 1)) == 0);
2778 return_value =
false;
2784 n = search_bucket(key, b.get());
2789 if (!b.is_writer() && !b.upgrade_to_writer()) {
2792 n = search_bucket(key, b.get());
2795 b.downgrade_to_reader();
2800 if (check_mask_race(h, m))
2803 assert(!is_valid(b->tmp_node));
2811 param, b->node_list);
2814 sz = insert_new_node(pop, b.get());
2815 return_value =
true;
2819 if (check_mask_race(h, m))
2824 return_value =
true;
2831 if (!result->try_acquire(n.get(my_pool_uuid)->mutex, write)) {
2832 for (internal::atomic_backoff backoff(
true);;) {
2833 if (result->try_acquire(n.get(my_pool_uuid)->mutex,
2837 if (!backoff.bounded_pause()) {
2842 assert(!OpInsert || !return_value);
2844 std::this_thread::yield();
2846 m = mask().load(std::memory_order_acquire);
2854 result->my_node = n.get_persistent_ptr(my_pool_uuid);
2855 result->my_hash = h;
2858 check_growth(m, sz);
2860 return return_value;
2863 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual>
2864 template <
typename K>
2869 hashcode_t
const h = hasher{}(key);
2870 hashcode_t m = mask().load(std::memory_order_acquire);
2876 bucket_accessor b(
this, h & m);
2879 node_base_ptr_t *
p = &b->node_list;
2882 while (is_valid(n) &&
2884 detail::static_persistent_pool_pointer_cast<node>(
2887 p = &n(my_pool_uuid)->next;
2893 if (check_mask_race(h, m))
2897 }
else if (!b.is_writer() && !b.upgrade_to_writer()) {
2898 if (check_mask_race(h, m))
2907 tmp_node_ptr_t del = n(my_pool_uuid);
2916 typename node::scoped_t item_locker(del->mutex,
2926 throw std::runtime_error(e);
2936 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual>
2941 internal_swap(table);
2944 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual>
2949 hashcode_t m = mask();
2953 hashcode_t b = (m + 1) >> 1;
2956 assert((b & (b - 1)) == 0);
2958 for (; b <= m; ++b) {
2959 bucket *bp = get_bucket(b);
2960 node_base_ptr_t n = bp->node_list;
2962 assert(is_valid(n) || n == internal::empty_bucket ||
2963 bp->is_rehashed(std::memory_order_relaxed) ==
false);
2965 internal::assert_not_locked(bp->mutex);
2967 if (bp->is_rehashed(std::memory_order_relaxed) ==
false)
2968 rehash_bucket<true>(bp, b);
2972 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual>
2976 hashcode_t m = mask();
2978 assert((m & (m + 1)) == 0);
2982 for (segment_index_t b = 0; b <= m; ++b) {
2983 bucket *bp = get_bucket(b);
2984 node_base_ptr_t n = bp->node_list;
2986 assert(is_valid(n) || n == internal::empty_bucket ||
2987 bp->is_rehashed(std::memory_order_relaxed) ==
false);
2989 internal::assert_not_locked(bp->mutex);
2999 segment_index_t s = segment_traits_t::segment_index_of(m);
3001 assert(s + 1 == block_table_size ||
3002 !segment_facade_t(my_table, s + 1).is_valid());
3010 throw std::runtime_error(e);
3012 mask().store(embedded_buckets - 1, std::memory_order_relaxed);
3015 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual>
3019 segment_facade_t segment(my_table, s);
3021 assert(segment.is_valid());
3023 size_type sz = segment.size();
3024 for (segment_index_t i = 0; i < sz; ++i) {
3025 for (node_base_ptr_t n = segment[i].node_list; is_valid(n);
3026 n = segment[i].node_list) {
3027 segment[i].node_list = n(my_pool_uuid)->next;
3032 if (s >= segment_traits_t::embedded_segments)
3036 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual>
3041 reserve(source.my_size.get_ro());
3042 internal_copy(source.
begin(), source.
end());
3045 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual>
3046 template <
typename I>
3050 hashcode_t m = mask();
3053 for (; first != last; ++first) {
3054 hashcode_t h = hasher{}(first->first);
3055 bucket *b = get_bucket(h & m);
3057 assert(b->is_rehashed(std::memory_order_relaxed));
3059 allocate_node_copy_construct(
3062 &(*first), b->node_list);
3064 insert_new_node(pop, b);
3068 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual>
3070 operator==(
const concurrent_hash_map<Key, T, Hash, KeyEqual> &a,
3071 const concurrent_hash_map<Key, T, Hash, KeyEqual> &b)
3073 if (a.size() != b.size())
3076 typename concurrent_hash_map<Key, T, Hash, KeyEqual>::const_iterator i(
3080 typename concurrent_hash_map<Key, T, Hash, KeyEqual>::const_iterator j,
3083 for (; i != i_end; ++i) {
3084 j = b.equal_range(i->first).first;
3086 if (j == j_end || !(i->second == j->second))
3093 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual>
3095 operator!=(
const concurrent_hash_map<Key, T, Hash, KeyEqual> &a,
3096 const concurrent_hash_map<Key, T, Hash, KeyEqual> &b)
3101 template <
typename Key,
typename T,
typename Hash,
typename KeyEqual>
3103 swap(concurrent_hash_map<Key, T, Hash, KeyEqual> &a,
3104 concurrent_hash_map<Key, T, Hash, KeyEqual> &b)