userver: userver/concurrent/impl/intrusive_stack.hpp Source File
⚠️ This is the documentation for an old userver version. Click here to switch to the latest version.
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages Concepts
intrusive_stack.hpp
1#pragma once
2
3#include <atomic>
4#include <cstdint>
5#include <type_traits>
6
7#include <userver/concurrent/impl/intrusive_hooks.hpp>
8#include <userver/concurrent/impl/tagged_ptr.hpp>
9#include <userver/utils/assert.hpp>
10
11USERVER_NAMESPACE_BEGIN
12
13namespace concurrent::impl {
14
15/// @brief An intrusive stack of nodes of type `T` with ABA protection.
16///
17/// - The IntrusiveStack does not own the nodes. The user is responsible
18/// for deleting them, e.g. by calling `Pop` repeatedly
19/// - The element type `T` must contain IntrusiveStackHook,
20/// extracted by `HookExtractor`
21/// - The objects are not destroyed on insertion into IntrusiveStack
22/// - If a node is `Pop`-ed from an IntrusiveStack, it must not be destroyed
23/// until the IntrusiveStack stops being used
24///
25/// Implemented using Treiber stack with counted pointers. See
26/// Treiber, R.K., 1986. Systems programming: Coping with parallelism.
27template <typename T, typename HookExtractor>
28class IntrusiveStack final {
29 static_assert(std::is_empty_v<HookExtractor>);
30
31 public:
32 constexpr IntrusiveStack() = default;
33
34 IntrusiveStack(IntrusiveStack&&) = delete;
35 IntrusiveStack& operator=(IntrusiveStack&&) = delete;
36
37 void Push(T& node) noexcept {
38 UASSERT_MSG(GetNext(node).load(std::memory_order_relaxed) == nullptr,
39 "This node is already contained in an IntrusiveStack");
40
41 NodeTaggedPtr expected = stack_head_.load();
42 while (true) {
43 GetNext(node).store(expected.GetDataPtr());
44 const NodeTaggedPtr desired(&node, expected.GetTag());
45 if (stack_head_.compare_exchange_weak(expected, desired)) {
46 break;
47 }
48 }
49 }
50
51 T* TryPop() noexcept {
52 NodeTaggedPtr expected = stack_head_.load();
53 while (true) {
54 T* const expected_ptr = expected.GetDataPtr();
55 if (!expected_ptr) return nullptr;
56 const NodeTaggedPtr desired(GetNext(*expected_ptr).load(),
57 expected.GetNextTag());
58 if (stack_head_.compare_exchange_weak(expected, desired)) {
59 // 'relaxed' is OK, because popping a node must happen-before pushing it
60 GetNext(*expected_ptr).store(nullptr, std::memory_order_relaxed);
61 return expected_ptr;
62 }
63 }
64 }
65
66 template <typename Func>
67 void WalkUnsafe(const Func& func) {
68 DoWalk<T&>(func);
69 }
70
71 template <typename Func>
72 void WalkUnsafe(const Func& func) const {
73 DoWalk<const T&>(func);
74 }
75
76 template <typename DisposerFunc>
77 void DisposeUnsafe(const DisposerFunc& disposer) noexcept {
78 T* iter = stack_head_.load().GetDataPtr();
79 stack_head_.store(nullptr);
80 while (iter) {
81 T* const old_iter = iter;
82 iter = GetNext(*iter).load();
83 disposer(*old_iter);
84 }
85 }
86
87 std::size_t GetSizeUnsafe() const noexcept {
88 std::size_t size = 0;
89 WalkUnsafe([&](auto& /*item*/) { ++size; });
90 return size;
91 }
92
93 private:
94 using NodeTaggedPtr = TaggedPtr<T>;
95
96 static_assert(std::atomic<NodeTaggedPtr>::is_always_lock_free);
97 static_assert(std::has_unique_object_representations_v<NodeTaggedPtr>);
98
99 static std::atomic<T*>& GetNext(T& node) noexcept {
100 SinglyLinkedHook<T>& hook = HookExtractor{}(node);
101 return hook.next_;
102 }
103
104 template <typename U, typename Func>
105 void DoWalk(const Func& func) const {
106 for (auto* iter = stack_head_.load().GetDataPtr(); iter;
107 iter = GetNext(*iter).load()) {
108 func(static_cast<U>(*iter));
109 }
110 }
111
112 std::atomic<NodeTaggedPtr> stack_head_{nullptr};
113};
114
115} // namespace concurrent::impl
116
117USERVER_NAMESPACE_END