12#include <userver/concurrent/impl/asymmetric_fence.hpp>
13#include <userver/concurrent/impl/intrusive_hooks.hpp>
14#include <userver/concurrent/impl/intrusive_stack.hpp>
15#include <userver/concurrent/impl/striped_read_indicator.hpp>
16#include <userver/engine/async.hpp>
17#include <userver/engine/mutex.hpp>
18#include <userver/rcu/fwd.hpp>
19#include <userver/utils/assert.hpp>
20#include <userver/utils/impl/wait_token_storage.hpp>
22USERVER_NAMESPACE_BEGIN
34struct SnapshotRecord
final {
35 std::optional<T> data;
36 concurrent::impl::StripedReadIndicator indicator;
37 concurrent::impl::SinglyLinkedHook<SnapshotRecord> free_list_hook;
38 SnapshotRecord* next_retired{
nullptr};
44struct FreeListHookGetter {
45 static auto& GetHook(SnapshotRecord<T>& node)
noexcept {
return node.free_list_hook; }
49struct SnapshotRecordFreeList {
50 SnapshotRecordFreeList() =
default;
52 ~SnapshotRecordFreeList() {
53 list.DisposeUnsafe([](SnapshotRecord<T>& record) {
delete &record; });
56 concurrent::impl::IntrusiveStack<SnapshotRecord<T>, FreeListHookGetter<T>> list;
60class SnapshotRecordRetiredList
final {
62 SnapshotRecordRetiredList() =
default;
64 bool IsEmpty()
const noexcept {
return head_ ==
nullptr; }
66 void Push(SnapshotRecord<T>& record)
noexcept {
67 record.next_retired = head_;
71 template <
typename Predicate,
typename Disposer>
72 void RemoveAndDisposeIf(Predicate predicate, Disposer disposer) {
73 SnapshotRecord<T>** ptr_to_current = &head_;
75 while (*ptr_to_current !=
nullptr) {
76 SnapshotRecord<T>*
const current = *ptr_to_current;
78 if (predicate(*current)) {
79 *ptr_to_current = std::exchange(current->next_retired,
nullptr);
82 ptr_to_current = ¤t->next_retired;
88 SnapshotRecord<T>* head_{
nullptr};
91class ExclusiveMutex
final {
94 const bool was_locked = is_locked_.exchange(
true);
97 "Detected a race condition when multiple writers Assign to an rcu::Variable concurrently. The value that "
98 "will remain in rcu::Variable when the dust settles is unspecified."
102 void unlock()
noexcept { is_locked_.store(
false); }
105 std::atomic<
bool> is_locked_{
false};
114class SnapshotHandle
final {
116 SnapshotHandle(SnapshotHandle&& other)
noexcept
117 : record_(std::exchange(other.record_,
nullptr)), free_list_(std::exchange(other.free_list_,
nullptr)) {}
120 if (record_ !=
nullptr) {
121 UASSERT(free_list_ !=
nullptr);
122 record_->data.reset();
123 free_list_->list.Push(*record_);
128 template <
typename ,
typename Traits>
129 friend class Variable;
131 template <
typename ,
typename Traits>
132 friend class WritablePtr;
134 explicit SnapshotHandle(impl::SnapshotRecord<T>& record, impl::SnapshotRecordFreeList<T>& free_list)
noexcept
135 : record_(&record), free_list_(&free_list) {}
137 impl::SnapshotRecord<T>* record_;
138 impl::SnapshotRecordFreeList<T>* free_list_;
143struct SyncDeleter
final {
144 template <
typename T>
145 void Delete(SnapshotHandle<T>&& handle)
noexcept {
146 [[maybe_unused]]
const auto for_deletion = std::move(handle);
152class AsyncDeleter
final {
154 ~AsyncDeleter() { wait_token_storage_.WaitForAllTokens(); }
156 template <
typename T>
157 void Delete(SnapshotHandle<T>&& handle)
noexcept {
158 if constexpr (std::is_trivially_destructible_v<T> || std::is_same_v<T, std::string>) {
159 SyncDeleter{}.Delete(std::move(handle));
164 [token = wait_token_storage_.GetToken(), handle = std::move(handle)]()
mutable {}
176 utils::impl::WaitTokenStorage wait_token_storage_;
189 using MutexType = engine::Mutex;
194 using DeleterType = AsyncDeleter;
203 using DeleterType = SyncDeleter;
213 using MutexType = std::mutex;
214 using DeleterType = SyncDeleter;
222 using MutexType = impl::ExclusiveMutex;
223 using DeleterType = SyncDeleter;
231template <
typename T,
typename RcuTraits>
232class [[nodiscard]] ReadablePtr
final {
234 explicit ReadablePtr(
const Variable<T, RcuTraits>& ptr) {
235 auto* record = ptr.current_.load();
240 lock_ = record->indicator.GetLock();
262 concurrent::impl::AsymmetricThreadFenceLight();
266 auto* new_current = ptr.current_.load(std::memory_order_seq_cst);
267 if (new_current == record)
break;
270 record = new_current;
273 ptr_ = &*record->data;
276 ReadablePtr(ReadablePtr&& other)
noexcept =
default;
277 ReadablePtr& operator=(ReadablePtr&& other)
noexcept =
default;
278 ReadablePtr(
const ReadablePtr& other) =
default;
279 ReadablePtr& operator=(
const ReadablePtr& other) =
default;
280 ~ReadablePtr() =
default;
282 const T* Get()
const& {
287 const T* Get() && {
return GetOnRvalue(); }
289 const T* operator->()
const& {
return Get(); }
290 const T* operator->() && {
return GetOnRvalue(); }
292 const T& operator*()
const& {
return *Get(); }
293 const T& operator*() && {
return *GetOnRvalue(); }
296 const T* GetOnRvalue() {
297 static_assert(!
sizeof(T),
"Don't use temporary ReadablePtr, store it to a variable");
302 concurrent::impl::StripedReadIndicatorLock lock_;
315template <
typename T,
typename RcuTraits>
316class [[nodiscard]] WritablePtr
final {
320 explicit WritablePtr(Variable<T, RcuTraits>& var)
321 : var_(var), lock_(var.mutex_), record_(&var.EmplaceSnapshot(*var.current_.load()->data)) {}
324 template <
typename... Args>
325 WritablePtr(Variable<T, RcuTraits>& var, std::in_place_t, Args&&... initial_value_args)
326 : var_(var), lock_(var.mutex_), record_(&var.EmplaceSnapshot(std::forward<Args>(initial_value_args)...)) {}
329 WritablePtr(WritablePtr&& other)
noexcept
330 : var_(other.var_), lock_(std::move(other.lock_)), record_(std::exchange(other.record_,
nullptr)) {}
334 var_.DeleteSnapshot(*record_);
343 var_.DoAssign(*std::exchange(record_,
nullptr), lock_);
349 return &*record_->data;
352 T* Get() && {
return GetOnRvalue(); }
354 T* operator->() & {
return Get(); }
355 T* operator->() && {
return GetOnRvalue(); }
357 T& operator*() & {
return *Get(); }
358 T& operator*() && {
return *GetOnRvalue(); }
361 [[noreturn]]
static T* GetOnRvalue() {
362 static_assert(!
sizeof(T),
"Don't use temporary WritablePtr, store it to a variable");
366 Variable<T, RcuTraits>& var_;
367 std::unique_lock<
typename RcuTraits::MutexType> lock_;
368 impl::SnapshotRecord<T>* record_;
398template <
typename T,
typename RcuTraits>
399class Variable
final {
402 "RcuTraits should publicly inherit from rcu::DefaultRcuTraits"
406 using MutexType =
typename RcuTraits::MutexType;
407 using DeleterType =
typename RcuTraits::DeleterType;
412 template <
typename... Args>
414 Variable(Args&&... initial_value_args) : current_(&EmplaceSnapshot(std::forward<Args>(initial_value_args)...)) {}
416 Variable(
const Variable&) =
delete;
417 Variable(Variable&&) =
delete;
418 Variable& operator=(
const Variable&) =
delete;
419 Variable& operator=(Variable&&) =
delete;
423 auto* record = current_.load();
424 UASSERT_MSG(record->indicator.IsFree(),
"RCU variable is destroyed while being used");
428 retired_list_.RemoveAndDisposeIf(
429 [](impl::SnapshotRecord<T>&) {
return true; },
430 [](impl::SnapshotRecord<T>& record) {
431 UASSERT_MSG(record.indicator.IsFree(),
"RCU variable is destroyed while being used");
438 ReadablePtr<T, RcuTraits>
Read()
const {
return ReadablePtr<T, RcuTraits>(*
this); }
449 WritablePtr<T, RcuTraits>
StartWrite() {
return WritablePtr<T, RcuTraits>(*
this); }
453 template <
typename... Args>
455 return WritablePtr<T, RcuTraits>(*
this, std::in_place, std::forward<Args>(args)...);
459 void Assign(T new_value) { WritablePtr<T, RcuTraits>(*
this, std::in_place, std::move(new_value)).Commit(); }
462 template <
typename... Args>
464 WritablePtr<T, RcuTraits>(*
this, std::in_place, std::forward<Args>(args)...).Commit();
468 std::unique_lock lock(mutex_, std::try_to_lock);
469 if (!lock.owns_lock()) {
474 ScanRetiredList(lock);
478 friend class ReadablePtr<T, RcuTraits>;
479 friend class WritablePtr<T, RcuTraits>;
481 void DoAssign(impl::SnapshotRecord<T>& new_snapshot, std::unique_lock<MutexType>& lock) {
485 auto*
const old_snapshot = current_.load();
486 current_.store(&new_snapshot, std::memory_order_seq_cst);
489 retired_list_.Push(*old_snapshot);
490 ScanRetiredList(lock);
493 template <
typename... Args>
494 [[nodiscard]] impl::SnapshotRecord<T>& EmplaceSnapshot(Args&&... args) {
495 auto*
const free_list_record = free_list_.list.TryPop();
496 auto& record = free_list_record ? *free_list_record : *
new impl::SnapshotRecord<T>{};
500 record.data.emplace(std::forward<Args>(args)...);
502 free_list_.list.Push(record);
509 void ScanRetiredList(std::unique_lock<MutexType>& lock)
noexcept {
511 if (retired_list_.IsEmpty())
return;
513 concurrent::impl::AsymmetricThreadFenceHeavy();
515 retired_list_.RemoveAndDisposeIf(
516 [](impl::SnapshotRecord<T>& record) {
return record.indicator.IsFree(); },
517 [&](impl::SnapshotRecord<T>& record) { DeleteSnapshot(record); }
521 void DeleteSnapshot(impl::SnapshotRecord<T>& record)
noexcept {
523 noexcept(deleter_.Delete(SnapshotHandle<T>{record, free_list_})),
"DeleterType::Delete must be noexcept"
525 deleter_.Delete(SnapshotHandle<T>{record, free_list_});
530 impl::SnapshotRecordFreeList<T> free_list_;
531 impl::SnapshotRecordRetiredList<T> retired_list_;
534 DeleterType deleter_{};
537 std::atomic<impl::SnapshotRecord<T>*> current_;