| // Copyright 2021 The Fuchsia Authors. All rights reserved. |
| // Use of this source code is governed by a BSD-style license that can be |
| // found in the LICENSE file. |
| |
| #ifndef LIB_CONCURRENT_CHAINLOCK_H_ |
| #define LIB_CONCURRENT_CHAINLOCK_H_ |
| |
| #include <lib/arch/intrin.h> |
| #include <lib/concurrent/common.h> |
| #include <zircon/assert.h> |
| #include <zircon/compiler.h> |
| #include <zircon/time.h> |
| |
| #include <atomic> |
| |
| namespace concurrent { |
| |
| class ChainLock; // fwd decl |
| |
| // Note: In a perfect world, Token would be an inner class of chainlock. |
| // Unfortunately, we want to declare a static constexpr instance of a Token |
| // inside of ChainLock to represent the "unlocked" state. To do this, there |
| // needs to be a defined constexpr constructor used to create the constexpr |
| // instance of the token. The problem is that even though the inner class |
| // definition is finished, the compiler does not see to think that the uint64_t |
| // based constructor is properly defined until the outer class definition is |
| // finished as well. |
| // |
| // So, to simulate the inner class syntax, we stuff the token definition into an |
| // internal namespace outside of ChainLock (guaranteeing that Token will be |
| // fully defined by the time we construct the unlocked token), and then define |
| // ChainLock::Token using a simple using statement alias. |
| namespace chain_lock_internal { |
| class Token { |
| public: |
| Token() : token_(token_generator_.fetch_add(1, std::memory_order_relaxed)) {} |
| |
| // Copy construction is allowed, but assignment is not. Generally speaking, |
| // users should not be re-assigning the values of token instances (and if they |
| // ever need to, it mandates some extra checks which are not possible in a |
| // trivial assignment operator) |
| constexpr Token(const Token&) = default; |
| Token& operator=(const Token& other) = delete; |
| |
| // A "valid" token is one which does not hold the unlocked sentinel value. A |
| // "reserved" token is one whose value is on the range (0, kReservedTokens] |
| // (which will never be generated by the token generator). |
| bool is_valid() const { return token_ != kUnlockedTokenValue; } |
| bool is_reserved() const { return is_valid() && (token_ <= kReservedTokens); } |
| |
| // Report the raw value of the token, typically used for debug logging. |
| uint64_t value() const { return token_; } |
| |
| private: |
| friend class ::concurrent::ChainLock; |
| |
| explicit inline constexpr Token(uint64_t token_val) : token_{token_val} {} |
| |
| // Comparison operators used as shorthand for comparing token values during |
| // acquire operations. Note, these are deliberately private. The internal |
| // value for a token and how it is used to determine who has priority during |
| // lock acquisition is considered to be an internal implementation detail. |
| // Users should really not be comparing token values, and if they ever fine |
| // them in a situation of legitimately needing to do so, it would a hint that |
| // the user-facing API is missing some functionality somewhere. |
| bool operator==(const Token& other) const { return token_ == other.token_; } |
| bool operator!=(const Token& other) const { return token_ != other.token_; } |
| bool operator<(const Token& other) const { return token_ < other.token_; } |
| bool operator>(const Token& other) const { return token_ > other.token_; } |
| |
| static inline constexpr uint64_t kUnlockedTokenValue{0}; |
| static inline constexpr uint64_t kReservedTokens{0x10000}; |
| static inline std::atomic<uint64_t> token_generator_{kReservedTokens + 1}; |
| uint64_t token_; |
| }; |
| } // namespace chain_lock_internal |
| |
| class __TA_CAPABILITY("mutex") ChainLock { |
| public: |
| using Token = chain_lock_internal::Token; |
| |
| enum class LockResult { |
| // Lock was successfully acquired |
| kOk, |
| |
| // Lock is held by someone with a higher priority (their_token < our_token) |
| // token. We need to invoke the backoff protocol, dropping all of the locks |
| // we currently hold before trying again. |
| kBackoff, |
| |
| // Lock is already held by our token. This is an error, and perhaps a fatal |
| // one depending on the situation. |
| kCycleDetected, |
| |
| // Lock is held by someone with a lower priority token (their_token > |
| // our_token). We can immediately try again, but some users (like the |
| // kernel) might sometimes need to see this result in order to perform |
| // accounting for contention times. |
| kRetry, |
| }; |
| |
| constexpr ChainLock() = default; |
| ~ChainLock() { |
| ZX_DEBUG_ASSERT_MSG(const Token lock_token = state_.load(std::memory_order_relaxed); |
| lock_token == kUnlockedToken, |
| "Destroying locked ChainLock (lock token 0x%lx).", lock_token.value()); |
| } |
| |
| // No copy, no move |
| ChainLock(const ChainLock&) = delete; |
| ChainLock(ChainLock&&) = delete; |
| ChainLock& operator=(const ChainLock&) = delete; |
| ChainLock& operator=(ChainLock&&); |
| |
| // Exclusive locking. |
| // |
| // Attempt to acquire the lock, aborting in the presence of a higher priority |
| // lock holder, or if a cycle is detected. Note that there are no annotations |
| // present on this method; use AssertHeld after successfully acquiring the |
| // lock in order to make the static analyzer happy. |
| LockResult Acquire(const Token token) { |
| // The token the user is passing cannot be invalid. |
| ZX_DEBUG_ASSERT_MSG(token.is_valid(), "Attempting to Acquire using an invalid token."); |
| // Run any OS/environment specific checks before acquiring. |
| return AcquireInternal(token).result; |
| } |
| |
| // Unconditionally acquire the lock. Do not stop trying, even in the presence |
| // of a higher priority lock holder. ASSERT in the case that a cycle is |
| // detected. Primarily meant to be used when when attempting to acquire a |
| // single ChainLock, while no other ChainLocks are currently held. |
| void AcquireUnconditionally(const Token token) __TA_ACQUIRE() { |
| // The token the user is passing cannot be invalid. |
| ZX_DEBUG_ASSERT_MSG(token.is_valid(), |
| "Attempting to AcquireUnconditionally using an invalid token."); |
| |
| while (true) { |
| if (const LockResult result = AcquireInternal(token).result; result == LockResult::kOk) { |
| return; |
| } else { |
| ZX_DEBUG_ASSERT_MSG(result != LockResult::kCycleDetected, |
| "Cycle detected during AcquireUnconditionally"); |
| arch::Yield(); |
| } |
| } |
| } |
| |
| // Attempt to acquire the lock, but do not ever spin trying. This method will |
| // immediately fail if the lock is contested in any way, and will ASSERT if a |
| // lock cycle is detected. |
| bool TryAcquire(const Token token) __TA_TRY_ACQUIRE(true) { |
| // Try to claim the lock with our CMPX. If this fails *and* the observed |
| // token is our token, ASSERT. |
| Token expected{kUnlockedToken}; |
| if (state_.compare_exchange_strong(expected, token, std::memory_order_acquire, |
| std::memory_order_relaxed)) { |
| return true; |
| } |
| |
| ZX_DEBUG_ASSERT_MSG(expected != token, "Cycle detected during TryAcquire"); |
| return false; |
| } |
| |
| // Unconditionally release the lock. ASSERTs if the lock is not currently |
| // held by anyone, but performs no checks against the token used to acquire |
| // the chain. |
| void Release() __TA_RELEASE() { |
| // Run any OS/environment specific checks before releasing. |
| ZX_DEBUG_ASSERT_MSG(state_.load(std::memory_order_relaxed) != kUnlockedToken, |
| "Attempting to unlock an already unlocked ChainLock."); |
| state_.store(kUnlockedToken, std::memory_order_release); |
| } |
| |
| // AssertHeld/AssertAcquired both verify that the lock is held by the provided |
| // token in a way which satisfies the static analyzer, however their semantics |
| // and intended uses are slightly different. |
| // |
| // AssertHeld will assert that the ChainLock instance is currently held by the |
| // provided token. It does not actually claim to acquire the lock, merely |
| // asserts that the lock is held. It is intended to be used in places where |
| // the lock should have already been acquired, but where it is not reasonable |
| // to be able to prove this at compile time based on Clang static thread |
| // analysis's scoping rules. |
| // |
| // AssertAcquired is meant to be used in conjunction with Acquire, and claims |
| // to be the function which actually acquires the lock, not just asserts that |
| // the lock is held. This stronger statement will cause the static analyzer |
| // to provide useful checks (such as demanding that the lock is eventually |
| // released) in places where use of AssertAcquired is appropriate. An example |
| // use might look something like this: |
| // |
| // ``` |
| // ChainLock::Token token; |
| // while (true) { |
| // lock1.AcquireUnconditionally(token); |
| // |
| // if (const ChainLock::LockResult res = lock2.Acquire(token); |
| // res == ChainLock::LockResult::kBackoff) { |
| // // The ACQUIRE annotation on AcquireUnconditionally |
| // // means we cannot forget to do this. |
| // lock1.Release(); |
| // continue; |
| // } |
| // // AssertAcquired provides the same ACQUIRE annotation as we have in |
| // // AcquireUnconditionally, and also ensures that we didn't fail the Acquire |
| // // op due to a detected cycle. |
| // lock2.AssertAcquired(token); |
| // |
| // /* Do stuff now that we hold both locks. */ |
| // |
| // // Can't forget to drop these locks now, because of the acquire |
| // // annotations. |
| // lock1.Release(); |
| // lock2.Release(); |
| // } |
| // ``` |
| void AssertHeld(const Token token) const __TA_ASSERT() { |
| ZX_DEBUG_ASSERT_MSG(Token lock_token = state_.load(std::memory_order_relaxed); |
| lock_token == token, |
| "Failed to assert ChainLock is held (lock token 0x%lx, our token 0x%lx).", |
| lock_token.value(), token.value()); |
| } |
| void AssertAcquired(const Token token) const __TA_ACQUIRE() { AssertHeld(token); } |
| |
| // A no-op version of assert held. USE WITH EXTREME CAUTION. This tells the |
| // static analyzer that we are holding the lock, but makes no actual runtime |
| // effort to verify this. |
| // |
| // There are very few places where it is OK to use this method, but here are |
| // two examples. |
| // |
| // Example 1: We just verified that we have the lock. |
| // ``` |
| // if (const ChainLock::LockResult res = my_obj.lock_.Acquire(token); |
| // res != ChainLock::LockResult::kOk) { |
| // return SomeError; |
| // } |
| // my_obj.lock_MarkHeld(); |
| // ``` |
| // It is OK to skip the actual assert operations here because we just checked |
| // to make sure that the operation succeeded. There is not much point in |
| // wasting cycles to do any more. |
| // |
| // Example 2: The analyzer is confused about which lock is which because of |
| // some tricky code, like a dynamic downcast. |
| // ``` |
| // Obj* GetObj(BaseClass* base) TA_REQ(base->lock_) { |
| // DerivedClass* derived = DowncastToDerived(base); |
| // |
| // if (derived) { |
| // derived->lock_.MarkHeld(); |
| // derived->protected_obj_; |
| // } |
| // |
| // return nullptr; |
| // } |
| // ``` |
| // |
| // In this case, we have a base class with a lock, and a derived class with a |
| // member which is protected by the base class's lock. We are also operating |
| // in an environment where RTTI is disabled, so we cannot use dynamic_cast. |
| // Instead, we need to write our own dynamic downcast function using some |
| // internal mechanism to ensure that we can do so safely. |
| // |
| // We are attempting to fetch the protected member from the derived class, but |
| // only if exists, which is may not if the base class pointer we have does not |
| // actually point to an instance of the derived class. |
| // |
| // We have already statically required that the base class instance be locked, |
| // but when we perform our downcast, the static analyzer is not smart enough |
| // to understand that base->lock_ is the same thing as derived->lock_, we we |
| // use the no-op form of AssertHeld to satisfy it instead. |
| // |
| void MarkHeld() const __TA_ASSERT() {} |
| void MarkNeedsRelease() const __TA_ACQUIRE() {} |
| |
| // Similar to MarkHeld/NeedRelease, MarkReleased pretends to release the |
| // lock, but actually does nothing. The only place this is currently used is |
| // to satisfy the analyzer during the edge case of context switching in the |
| // zircon kernel. |
| void MarkReleased() const __TA_RELEASE() {} |
| |
| // A simple runtime check to see if the lock is currently held by the provided |
| // token. Provides no memory ordering guarantees; mostly meant to be used |
| // when releasing locks in a partially acquired chain. |
| bool is_held(const Token token) const { return state_.load(std::memory_order_relaxed) == token; } |
| |
| // Test to see if the lock is in the unlocked state. Provides acquire |
| // semantics, allowing this method to be used (by thread A) in a polling |
| // fashion to test to see if another thread (B) has finally released the lock, |
| // guaranteeing that A will have seen all of the writes performed by B before |
| // releasing the lock. |
| bool is_unlocked() const { return state_.load(std::memory_order_acquire) == kUnlockedToken; } |
| |
| // Similar to the difference between AssertHeld and AssertAcquired, is_held |
| // and MarkNeedsReleaseIfHeld are related, but MarkNeedsReleaseIfHeld carries |
| // an extra TRY annotation so that if a user discovers that the lock is |
| // actually held, the analyzer knows this and will demand that the lock be |
| // released in the scope of the if-statement whose predicate established this |
| // fact. |
| bool MarkNeedsReleaseIfHeld(const Token token) const __TA_TRY_ACQUIRE(true) { |
| return state_.load(std::memory_order_relaxed) == token; |
| } |
| |
| // Returns the current token of the lock. Useful in some situations where |
| // obtaining the next lock in a chain requires knowing the previous lock's |
| // token, but the structure of the code makes it difficult or inefficient to |
| // carry this value outside of the scope of the previous locks' state. |
| // Requires that the caller be able to statically assert that the lock is |
| // currently held by them. |
| Token get_token() const __TA_REQUIRES(*this) { return state_.load(std::memory_order_relaxed); } |
| |
| protected: |
| struct AcquireInternalResult { |
| AcquireInternalResult(LockResult r, Token ot) : result(r), observed_token(ot) {} |
| LockResult result; |
| Token observed_token; |
| }; |
| |
| static Token CreateToken(uint32_t val) { return Token{val}; } |
| static void ReplaceToken(ChainLock::Token& old_token, ChainLock::Token new_token) { |
| old_token.token_ = new_token.token_; |
| } |
| |
| // Attempt to acquire the lock, automatically retrying if we detect that the |
| // lock is held by someone with a lower priority token. |
| inline AcquireInternalResult AcquireInternal(const Token token) { |
| while (true) { |
| const AcquireInternalResult result = AcquireInternalSingleAttempt(token); |
| if (result.result != LockResult::kRetry) { |
| return result; |
| } |
| |
| // The lock's token must be strictly greater than ours, meaning that the |
| // individual holding the lock must have come after us. Just spin, waiting |
| // for them to either complete their operation, or attempt to grab a lock |
| // that we hold, and back off as a result. |
| arch::Yield(); |
| } |
| } |
| |
| // Attempt to acquire the lock, but make only a single attempt. Do not |
| // attempt to automatically retry, even if the lock is currently held by |
| // someone with a lower priority than ours. |
| inline AcquireInternalResult AcquireInternalSingleAttempt(const Token token) { |
| // Try to CMPX Unlocked for our token. If this succeeds, great! We have |
| // the lock and can get out. Otherwise, what we do next will depend on the |
| // token id we observed during the failed CMPX. |
| Token expected{kUnlockedToken}; |
| if (state_.compare_exchange_strong(expected, token, std::memory_order_acquire, |
| std::memory_order_acquire)) { |
| return AcquireInternalResult{LockResult::kOk, expected}; |
| } |
| |
| // If the lock's token is strictly greater then our token, then the individual |
| // holding the lock came after us and we should retry the attempt to acquire the lock. |
| if (expected > token) { |
| return AcquireInternalResult{LockResult::kRetry, expected}; |
| } |
| |
| // If the lock's token is strictly less then our token, then the individual |
| // holding the lock came before us. We need to back off and give them a |
| // chance to complete. |
| if (expected < token) { |
| return AcquireInternalResult{LockResult::kBackoff, expected}; |
| } |
| |
| // The lock's token must be equal to ours. We are attempting to obtain a lock |
| // we already hold. Report this as a detected cycle. |
| return AcquireInternalResult{LockResult::kCycleDetected, expected}; |
| } |
| |
| static constexpr Token kUnlockedToken{Token::kUnlockedTokenValue}; |
| static constexpr Token kMaxToken{std::numeric_limits<decltype(Token::token_)>::max()}; |
| |
| static_assert(std::atomic<Token>::is_always_lock_free, |
| "Error! atomic ChainLock tokens must always be lock free!"); |
| std::atomic<Token> state_{kUnlockedToken}; |
| }; |
| |
| } // namespace concurrent |
| |
| #endif // LIB_CONCURRENT_CHAINLOCK_H_ |