12#include <userver/concurrent/impl/asymmetric_fence.hpp>
13#include <userver/concurrent/impl/intrusive_hooks.hpp>
14#include <userver/concurrent/impl/intrusive_stack.hpp>
15#include <userver/concurrent/impl/striped_read_indicator.hpp>
16#include <userver/engine/async.hpp>
17#include <userver/engine/mutex.hpp>
18#include <userver/rcu/fwd.hpp>
19#include <userver/utils/assert.hpp>
20#include <userver/utils/impl/wait_token_storage.hpp>
22USERVER_NAMESPACE_BEGIN
34struct SnapshotRecord
final {
35 std::optional<T> data;
36 concurrent::impl::StripedReadIndicator indicator;
37 concurrent::impl::SinglyLinkedHook<SnapshotRecord> free_list_hook;
38 SnapshotRecord* next_retired{
nullptr};
44struct FreeListHookGetter {
45 static auto& GetHook(SnapshotRecord<T>& node)
noexcept {
return node.free_list_hook; }
49struct SnapshotRecordFreeList {
50 SnapshotRecordFreeList() =
default;
52 ~SnapshotRecordFreeList() {
53 list.DisposeUnsafe([](SnapshotRecord<T>& record) {
delete &record; });
56 concurrent::impl::IntrusiveStack<SnapshotRecord<T>, FreeListHookGetter<T>> list;
60class SnapshotRecordRetiredList
final {
62 SnapshotRecordRetiredList() =
default;
64 bool IsEmpty()
const noexcept {
return head_ ==
nullptr; }
66 void Push(SnapshotRecord<T>& record)
noexcept {
67 record.next_retired = head_;
71 template <
typename Predicate,
typename Disposer>
72 void RemoveAndDisposeIf(Predicate predicate, Disposer disposer) {
73 SnapshotRecord<T>** ptr_to_current = &head_;
75 while (*ptr_to_current !=
nullptr) {
76 SnapshotRecord<T>*
const current = *ptr_to_current;
78 if (predicate(*current)) {
79 *ptr_to_current = std::exchange(current->next_retired,
nullptr);
82 ptr_to_current = ¤t->next_retired;
88 SnapshotRecord<T>* head_{
nullptr};
91class ExclusiveMutex
final {
94 const bool was_locked = is_locked_.exchange(
true);
97 "Detected a race condition when multiple writers Assign to an rcu::Variable concurrently. The value that "
98 "will remain in rcu::Variable when the dust settles is unspecified."
102 void unlock()
noexcept { is_locked_.store(
false); }
105 std::atomic<
bool> is_locked_{
false};
114class SnapshotHandle
final {
116 SnapshotHandle(SnapshotHandle&& other)
noexcept
117 : record_(std::exchange(other.record_,
nullptr)), free_list_(std::exchange(other.free_list_,
nullptr)) {}
120 if (record_ !=
nullptr) {
121 UASSERT(free_list_ !=
nullptr);
122 record_->data.reset();
123 free_list_->list.Push(*record_);
128 template <
typename ,
typename Traits>
129 friend class Variable;
131 template <
typename ,
typename Traits>
132 friend class WritablePtr;
134 explicit SnapshotHandle(impl::SnapshotRecord<T>& record, impl::SnapshotRecordFreeList<T>& free_list)
noexcept
135 : record_(&record), free_list_(&free_list) {}
137 impl::SnapshotRecord<T>* record_;
138 impl::SnapshotRecordFreeList<T>* free_list_;
143struct SyncDeleter
final {
144 template <
typename T>
145 void Delete(SnapshotHandle<T>&& handle)
noexcept {
146 [[maybe_unused]]
const auto for_deletion = std::move(handle);
152class AsyncDeleter
final {
154 ~AsyncDeleter() { wait_token_storage_.WaitForAllTokens(); }
156 template <
typename T>
157 void Delete(SnapshotHandle<T>&& handle)
noexcept {
158 if constexpr (std::is_trivially_destructible_v<T> || std::is_same_v<T, std::string>) {
159 SyncDeleter{}.Delete(std::move(handle));
164 [token = wait_token_storage_.GetToken(), handle = std::move(handle)]()
mutable {}
176 utils::
impl::WaitTokenStorage wait_token_storage_;
186struct DefaultRcuTraits {
194 using DeleterType = AsyncDeleter;
202struct SyncRcuTraits :
public DefaultRcuTraits {
203 using DeleterType = SyncDeleter;
212struct BlockingRcuTraits :
public DefaultRcuTraits {
213 using MutexType = std::mutex;
214 using DeleterType = SyncDeleter;
221struct ExclusiveRcuTraits :
public DefaultRcuTraits {
222 using MutexType = impl::ExclusiveMutex;
223 using DeleterType = SyncDeleter;
231template <
typename T,
typename RcuTraits>
232class [[nodiscard]] ReadablePtr
final {
234 explicit ReadablePtr(
const Variable<T, RcuTraits>& ptr) {
235 auto* record = ptr.current_.load();
240 lock_ = record->indicator.GetLock();
262 concurrent::impl::AsymmetricThreadFenceLight();
266 auto* new_current = ptr.current_.load(std::memory_order_seq_cst);
267 if (new_current == record) {
272 record = new_current;
275 ptr_ = &*record->data;
278 ReadablePtr(ReadablePtr&& other)
noexcept =
default;
279 ReadablePtr& operator=(ReadablePtr&& other)
noexcept =
default;
280 ReadablePtr(
const ReadablePtr& other) =
default;
281 ReadablePtr& operator=(
const ReadablePtr& other) =
default;
282 ~ReadablePtr() =
default;
284 const T* Get()
const& {
289 const T* Get() && {
return GetOnRvalue(); }
291 const T* operator->()
const& {
return Get(); }
292 const T* operator->() && {
return GetOnRvalue(); }
294 const T& operator*()
const& {
return *Get(); }
295 const T& operator*() && {
return *GetOnRvalue(); }
298 const T* GetOnRvalue() {
299 static_assert(!
sizeof(T),
"Don't use temporary ReadablePtr, store it to a variable");
304 concurrent::impl::StripedReadIndicatorLock lock_;
317template <
typename T,
typename RcuTraits>
318class [[nodiscard]] WritablePtr
final {
322 explicit WritablePtr(Variable<T, RcuTraits>& var)
325 record_(&var.EmplaceSnapshot(*var.current_.load()->data))
329 template <
typename... Args>
330 WritablePtr(Variable<T, RcuTraits>& var, std::in_place_t, Args&&... initial_value_args)
333 record_(&var.EmplaceSnapshot(std::forward<Args>(initial_value_args)...))
337 WritablePtr(WritablePtr&& other)
noexcept
338 : var_(other.var_), lock_(std::move(other.lock_)), record_(std::exchange(other.record_,
nullptr)) {}
342 var_.DeleteSnapshot(*record_);
351 var_.DoAssign(*std::exchange(record_,
nullptr), lock_);
357 return &*record_->data;
360 T* Get() && {
return GetOnRvalue(); }
362 T* operator->() & {
return Get(); }
363 T* operator->() && {
return GetOnRvalue(); }
365 T& operator*() & {
return *Get(); }
366 T& operator*() && {
return *GetOnRvalue(); }
369 [[noreturn]]
static T* GetOnRvalue() {
370 static_assert(!
sizeof(T),
"Don't use temporary WritablePtr, store it to a variable");
374 Variable<T, RcuTraits>& var_;
375 std::unique_lock<
typename RcuTraits::MutexType> lock_;
376 impl::SnapshotRecord<T>* record_;
406template <
typename T,
typename RcuTraits>
407class Variable
final {
409 std::is_base_of_v<DefaultRcuTraits, RcuTraits>,
410 "RcuTraits should publicly inherit from rcu::DefaultRcuTraits"
414 using MutexType =
typename RcuTraits::MutexType;
415 using DeleterType =
typename RcuTraits::DeleterType;
420 template <
typename... Args>
422 Variable(Args&&... initial_value_args)
423 : current_(&EmplaceSnapshot(std::forward<Args>(initial_value_args)...))
426 Variable(
const Variable&) =
delete;
427 Variable(Variable&&) =
delete;
428 Variable& operator=(
const Variable&) =
delete;
429 Variable& operator=(Variable&&) =
delete;
433 auto* record = current_.load();
434 UASSERT_MSG(record->indicator.IsFree(),
"RCU variable is destroyed while being used");
438 retired_list_.RemoveAndDisposeIf(
439 [](impl::SnapshotRecord<T>&) {
return true; },
440 [](impl::SnapshotRecord<T>& record) {
441 UASSERT_MSG(record.indicator.IsFree(),
"RCU variable is destroyed while being used");
448 ReadablePtr<T, RcuTraits> Read()
const {
return ReadablePtr<T, RcuTraits>(*
this); }
459 WritablePtr<T, RcuTraits> StartWrite() {
return WritablePtr<T, RcuTraits>(*
this); }
463 template <
typename... Args>
464 WritablePtr<T, RcuTraits> StartWriteEmplace(Args&&... args) {
465 return WritablePtr<T, RcuTraits>(*
this, std::in_place, std::forward<Args>(args)...);
469 void Assign(T new_value) { WritablePtr<T, RcuTraits>(*
this, std::in_place, std::move(new_value)).Commit(); }
472 template <
typename... Args>
473 void Emplace(Args&&... args) {
474 WritablePtr<T, RcuTraits>(*
this, std::in_place, std::forward<Args>(args)...).Commit();
478 std::unique_lock lock(mutex_, std::try_to_lock);
479 if (!lock.owns_lock()) {
484 ScanRetiredList(lock);
488 friend class ReadablePtr<T, RcuTraits>;
489 friend class WritablePtr<T, RcuTraits>;
491 void DoAssign(impl::SnapshotRecord<T>& new_snapshot, std::unique_lock<MutexType>& lock) {
495 auto*
const old_snapshot = current_.load();
496 current_.store(&new_snapshot, std::memory_order_seq_cst);
499 retired_list_.Push(*old_snapshot);
500 ScanRetiredList(lock);
503 template <
typename... Args>
504 [[nodiscard]] impl::SnapshotRecord<T>& EmplaceSnapshot(Args&&... args) {
505 auto*
const free_list_record = free_list_.list.TryPop();
506 auto& record = free_list_record ? *free_list_record : *
new impl::SnapshotRecord<T>{};
510 record.data.emplace(std::forward<Args>(args)...);
512 free_list_.list.Push(record);
519 void ScanRetiredList(std::unique_lock<MutexType>& lock)
noexcept {
521 if (retired_list_.IsEmpty()) {
525 concurrent::impl::AsymmetricThreadFenceHeavy();
527 retired_list_.RemoveAndDisposeIf(
528 [](impl::SnapshotRecord<T>& record) {
return record.indicator.IsFree(); },
529 [&](impl::SnapshotRecord<T>& record) { DeleteSnapshot(record); }
533 void DeleteSnapshot(impl::SnapshotRecord<T>& record)
noexcept {
535 noexcept(deleter_.Delete(SnapshotHandle<T>{record, free_list_})),
536 "DeleterType::Delete must be noexcept"
538 deleter_.Delete(SnapshotHandle<T>{record, free_list_});
543 impl::SnapshotRecordFreeList<T> free_list_;
544 impl::SnapshotRecordRetiredList<T> retired_list_;
547 DeleterType deleter_{};
550 std::atomic<impl::SnapshotRecord<T>*> current_;