Skip to content

Commit b6b02fe

Browse files
committed
feat concurrent: migrate MonotonicConcurrentSet to C++20
commit_hash:19af9f46e21b4b45896c26bb878df4b6b71a054c
1 parent b734377 commit b6b02fe

1 file changed

Lines changed: 26 additions & 27 deletions

File tree

core/include/userver/concurrent/impl/monotonic_concurrent_set.hpp

Lines changed: 26 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -4,16 +4,14 @@
44
/// @brief @copybrief concurrent::impl::MonotonicConcurrentSet
55

66
#include <atomic>
7+
#include <concepts>
78
#include <cstddef>
89
#include <cstdint>
910
#include <functional>
1011
#include <memory>
1112
#include <mutex> // for std::lock_guard
12-
#include <type_traits>
1313
#include <utility>
1414

15-
#include <boost/atomic/atomic.hpp>
16-
1715
#include <userver/utils/assert.hpp>
1816
#include <userver/utils/impl/fused_allocations.hpp>
1917
#include <userver/utils/not_null.hpp>
@@ -59,35 +57,33 @@ inline std::uintptr_t MakeBucketValue(ItemNode<T>* ptr, bool locked) noexcept {
5957
// Bucket is an atomic stack with lowest bit used for locking.
6058
template <typename T>
6159
struct Bucket {
62-
boost::atomic<std::uintptr_t> value{0}; // TODO use std::atomic in C++20
60+
std::atomic<std::uintptr_t> value{0};
6361

6462
void lock() noexcept {
65-
std::uintptr_t expected = value.load(boost::memory_order_relaxed);
63+
std::uintptr_t expected = value.load(std::memory_order_relaxed);
6664
while (true) {
6765
// Wait if already locked
6866
while (IsBucketLocked(expected)) {
69-
value.wait(expected, boost::memory_order_relaxed);
70-
expected = value.load(boost::memory_order_relaxed);
67+
value.wait(expected, std::memory_order_relaxed);
68+
expected = value.load(std::memory_order_relaxed);
7169
}
7270

7371
// Try to acquire lock
7472
std::uintptr_t desired = expected | kLockBit;
75-
if (value
76-
.compare_exchange_weak(expected, desired, boost::memory_order_acquire, boost::memory_order_relaxed))
77-
{
73+
if (value.compare_exchange_weak(expected, desired, std::memory_order_acquire, std::memory_order_relaxed)) {
7874
return;
7975
}
8076
}
8177
}
8278

8379
void unlock() noexcept {
84-
std::uintptr_t val = value.load(boost::memory_order_relaxed);
80+
std::uintptr_t val = value.load(std::memory_order_relaxed);
8581
UASSERT(IsBucketLocked(val));
86-
value.store(val & kPtrMask, boost::memory_order_release);
82+
value.store(val & kPtrMask, std::memory_order_release);
8783
value.notify_one();
8884
}
8985

90-
ItemNode<T>* LoadHead(boost::memory_order order) const noexcept {
86+
ItemNode<T>* LoadHead(std::memory_order order) const noexcept {
9187
return reinterpret_cast<ItemNode<T>*>(value.load(order) & kPtrMask);
9288
}
9389
};
@@ -130,7 +126,7 @@ struct Table {
130126
utils::impl::FusedArray{item_capacity, items}
131127
);
132128

133-
::new (&*table) Table(buckets, nodes, items); // TODO use std::construct_at in C++20
129+
std::construct_at(&*table, buckets, nodes, items);
134130
std::uninitialized_value_construct_n(buckets.data(), buckets.size());
135131
std::uninitialized_default_construct_n(nodes.data(), nodes.size());
136132
std::uninitialized_default_construct_n(items.data(), items.size());
@@ -207,12 +203,14 @@ class MonotonicConcurrentSet final {
207203

208204
/// @brief Call the passed `visitor` on all contained items.
209205
/// @param visitor A callable that accepts `const T&`
210-
template <typename Visitor, typename = std::enable_if_t<std::is_invocable_v<Visitor&, const T&>>>
206+
template <typename Visitor>
207+
requires std::invocable<Visitor&, const T&>
211208
void Visit(Visitor visitor) const;
212209

213210
/// @brief Call the passed `visitor` on all contained items.
214211
/// @param visitor A callable that accepts `T&`
215-
template <typename Visitor, typename = std::enable_if_t<std::is_invocable_v<Visitor&, T&>>>
212+
template <typename Visitor>
213+
requires std::invocable<Visitor&, T&>
216214
void Visit(Visitor visitor);
217215

218216
private:
@@ -291,7 +289,7 @@ T* MonotonicConcurrentSet<T, Hash, KeyEqual>::DoFind(const Key& key) const {
291289
const std::size_t hash = hasher_(key);
292290
Table& table = *head_.load(std::memory_order_acquire);
293291
const auto& bucket = table.GetBucket(hash);
294-
return FindInBucket(bucket.LoadHead(boost::memory_order_acquire), key);
292+
return FindInBucket(bucket.LoadHead(std::memory_order_acquire), key);
295293
}
296294

297295
template <typename T, typename Hash, typename KeyEqual>
@@ -317,7 +315,7 @@ template <typename ItemReference, typename Visitor>
317315
void MonotonicConcurrentSet<T, Hash, KeyEqual>::DoVisit(Visitor visitor) const {
318316
auto table = head_.load(std::memory_order_acquire);
319317
for (const auto& bucket : table->buckets) {
320-
for (const ItemNode* node = bucket.LoadHead(boost::memory_order_acquire); node != nullptr; node = node->next) {
318+
for (const ItemNode* node = bucket.LoadHead(std::memory_order_acquire); node != nullptr; node = node->next) {
321319
T* item = node->item;
322320
UASSERT(item);
323321
visitor(static_cast<ItemReference>(*item));
@@ -326,13 +324,15 @@ void MonotonicConcurrentSet<T, Hash, KeyEqual>::DoVisit(Visitor visitor) const {
326324
}
327325

328326
template <typename T, typename Hash, typename KeyEqual>
329-
template <typename Visitor, typename>
327+
template <typename Visitor>
328+
requires std::invocable<Visitor&, const T&>
330329
void MonotonicConcurrentSet<T, Hash, KeyEqual>::Visit(Visitor visitor) const {
331330
DoVisit<const T&>(visitor);
332331
}
333332

334333
template <typename T, typename Hash, typename KeyEqual>
335-
template <typename Visitor, typename>
334+
template <typename Visitor>
335+
requires std::invocable<Visitor&, T&>
336336
void MonotonicConcurrentSet<T, Hash, KeyEqual>::Visit(Visitor visitor) {
337337
DoVisit<T&>(visitor);
338338
}
@@ -343,7 +343,7 @@ std::pair<T*, bool> MonotonicConcurrentSet<
343343
T,
344344
Hash,
345345
KeyEqual>::TryEmplaceLocked(Table& table, Bucket& bucket, const Key& key, Args&&... args) {
346-
ItemNode* const bucket_head = bucket.LoadHead(boost::memory_order_relaxed);
346+
ItemNode* const bucket_head = bucket.LoadHead(std::memory_order_relaxed);
347347

348348
if (T* const existing = FindInBucket(bucket_head, key)) {
349349
return {existing, false};
@@ -356,13 +356,13 @@ std::pair<T*, bool> MonotonicConcurrentSet<
356356
return {nullptr, false};
357357
}
358358

359-
::new (&table.items[item_index].item) T(std::forward<Args>(args)...); // TODO use std::construct_at in C++20
359+
std::construct_at(&table.items[item_index].item, std::forward<Args>(args)...);
360360
T& new_item = table.items[item_index].item;
361361

362362
ItemNode& new_node = GetNodeForItemIndex(table, item_index);
363363
new_node.item = &new_item;
364364
new_node.next = bucket_head;
365-
bucket.value.store(monotonic_concurrent_set::MakeBucketValue(&new_node, true), boost::memory_order_release);
365+
bucket.value.store(monotonic_concurrent_set::MakeBucketValue(&new_node, true), std::memory_order_release);
366366

367367
return {&new_item, true};
368368
}
@@ -385,9 +385,8 @@ void MonotonicConcurrentSet<T, Hash, KeyEqual>::FillNewTable(Table& old_table, T
385385

386386
ItemNode& new_node = *(next_new_node++);
387387
new_node.item = &item;
388-
new_node.next = bucket.LoadHead(boost::memory_order_relaxed);
389-
bucket.value
390-
.store(monotonic_concurrent_set::MakeBucketValue(&new_node, false), boost::memory_order_relaxed);
388+
new_node.next = bucket.LoadHead(std::memory_order_relaxed);
389+
bucket.value.store(monotonic_concurrent_set::MakeBucketValue(&new_node, false), std::memory_order_relaxed);
391390
}
392391
}
393392
}
@@ -454,7 +453,7 @@ std::pair<T&, bool> MonotonicConcurrentSet<T, Hash, KeyEqual>::TryEmplace(const
454453
auto& bucket = current.GetBucket(hash);
455454

456455
// Fast path: lock-free search (same as Find) - avoid lock when item already exists
457-
if (T* const existing = FindInBucket(bucket.LoadHead(boost::memory_order_acquire), key)) {
456+
if (T* const existing = FindInBucket(bucket.LoadHead(std::memory_order_acquire), key)) {
458457
return {*existing, false};
459458
}
460459

0 commit comments

Comments
 (0)