10#include <yaclib_std/atomic>
15template <
bool FIFO,
bool Batching>
19 return _sender.load(std::memory_order_relaxed) ==
expected &&
20 _sender.compare_exchange_strong(
expected, kLockedNoWaiters, std::memory_order_acquire,
21 std::memory_order_relaxed);
25 auto expected = _sender.load(std::memory_order_relaxed);
28 if (_sender.compare_exchange_weak(
expected, kLockedNoWaiters, std::memory_order_acquire,
29 std::memory_order_relaxed)) {
34 if (_sender.compare_exchange_weak(
expected,
reinterpret_cast<std::uintptr_t
>(&
curr), std::memory_order_release,
35 std::memory_order_relaxed)) {
43 YACLIB_ASSERT(_sender.load(std::memory_order_relaxed) != kNotLocked);
44 if (_receiver !=
nullptr) {
48 return _sender.load(std::memory_order_relaxed) ==
expected &&
49 _sender.compare_exchange_strong(
expected, kNotLocked, std::memory_order_release, std::memory_order_relaxed);
53 return Batching && _receiver !=
nullptr;
57 auto& next = GetHead();
65 auto& next = *_receiver;
66 _receiver =
static_cast<BaseCore*
>(next.next);
82 auto& next = GetHead();
84 if (_receiver !=
nullptr) {
85 _receiver =
static_cast<BaseCore*
>(next.next);
91 _receiver =
static_cast<BaseCore*
>(next.next);
109 if (_receiver !=
nullptr) {
112 auto expected = _sender.exchange(kLockedNoWaiters, std::memory_order_acquire);
113 if constexpr (
FIFO) {
115 Node* prev =
nullptr;
117 auto* next =
node->next;
121 }
while (
node !=
nullptr);
122 return *
static_cast<BaseCore*
>(prev);
124 return *
reinterpret_cast<BaseCore*
>(
expected);
128 static constexpr auto kLockedNoWaiters = std::uintptr_t{0};
129 static constexpr auto kNotLocked = std::numeric_limits<std::uintptr_t>::max();
133 BaseCore* _receiver =
nullptr;
143 if (_mutex.TryUnlockAwait()) {
146 if (_mutex.BatchingPossible()) {
149 _mutex.UnlockHereAwait();
153 template <
typename Promise>
154 YACLIB_INLINE
auto await_suspend(yaclib_std::coroutine_handle<Promise> handle)
noexcept {
155 return _mutex.AwaitUnlock(handle.promise());
175 template <
typename Promise>
176 YACLIB_INLINE
auto await_suspend(yaclib_std::coroutine_handle<Promise> handle)
noexcept {
177 return _mutex.AwaitUnlockOn(handle.promise(), _executor);
195template <
bool Batching = true,
bool FIFO = false>
284 template <
typename To,
typename From>
286 return static_cast<To&
>(
from);
virtual void Submit(Job &job) noexcept=0
Submit given job.
void Swap(IntrusivePtr &other) noexcept
auto Unlock() noexcept
The best way to unlock mutex, if you interested in batched critical section.
auto Lock() noexcept
Lock mutex.
auto GuardSticky() noexcept
Lock mutex and create StickyGuard for it.
auto TryGuard() noexcept
Try to lock mutex and create UniqueGuard for it.
auto Guard() noexcept
Lock mutex and create UniqueGuard for it.
static auto & Cast(From &from) noexcept
auto UnlockOn(IExecutor &e) noexcept
This method is an optimization for Unlock() and On()
UnlockAwaiter(M &m) noexcept
YACLIB_INLINE auto await_suspend(yaclib_std::coroutine_handle< Promise > handle) noexcept
YACLIB_INLINE bool await_ready() noexcept
constexpr void await_resume() noexcept
constexpr void await_resume() noexcept
UnlockOnAwaiter(M &m, IExecutor &e) noexcept
constexpr bool await_ready() noexcept
YACLIB_INLINE auto await_suspend(yaclib_std::coroutine_handle< Promise > handle) noexcept
#define YACLIB_TRANSFER(handle)
#define YACLIB_ASSERT(cond)
atomic< std::uintptr_t > atomic_uintptr_t
Contract< V, E > MakeContract()
Creates related future and promise.
auto AwaitUnlock(BaseCore &curr) noexcept
void UnlockHereAwait() noexcept
void UnlockHere() noexcept
YACLIB_INLINE bool BatchingPossible() const noexcept
bool TryUnlockAwait() noexcept
bool TryLockAwait() noexcept
bool AwaitLock(BaseCore &curr) noexcept
auto AwaitUnlockOn(BaseCore &curr, IExecutor &executor) noexcept