Line data Source code
1 0 : // Distributed under the MIT License. 2 : // See LICENSE.txt for details. 3 : 4 : #pragma once 5 : 6 : #include <atomic> 7 : 8 : namespace Parallel { 9 : /*! 10 : * \brief A simple spinlock implemented in `std::atomic`s 11 : * 12 : * Implementation adapted from https://rigtorp.se/spinlock/ 13 : */ 14 1 : class Spinlock { 15 : public: 16 0 : Spinlock() = default; 17 0 : Spinlock(const Spinlock&) = delete; 18 0 : Spinlock& operator=(const Spinlock&) = delete; 19 0 : Spinlock(Spinlock&&) = delete; 20 0 : Spinlock& operator=(Spinlock&&) = delete; 21 0 : ~Spinlock() = default; 22 : 23 : /// \brief Acquire the lock. Will block until the lock is acquired. 24 1 : void lock() { 25 : for (;;) { 26 : // Optimistically assume the lock is free on the first try 27 : if (not lock_.exchange(true, std::memory_order_acquire)) { 28 : return; 29 : } 30 : // Wait for lock to be released without generating cache misses 31 : while (lock_.load(std::memory_order_relaxed)) { 32 : // Issue X86 PAUSE or ARM YIELD instruction to reduce contention between 33 : // hyper-threads 34 : // 35 : // If no hyperthreading is being used, this will actually slow down the 36 : // code. 37 : // 38 : // We keep this comment and code around just in case we want to 39 : // experiment with it in the future. 40 : // 41 : // __builtin_ia32_pause(); 42 : } 43 : } 44 : } 45 : 46 : /// \brief Try to acquire the lock. 47 : /// 48 : /// Returns `true` if the lock was acquired, `false` if it wasn't. 49 1 : bool try_lock() { 50 : // First do a relaxed load to check if lock is free in order to prevent 51 : // unnecessary cache misses if someone does while(!try_lock()) 52 : return not lock_.load(std::memory_order_relaxed) and 53 : not lock_.exchange(true, std::memory_order_acquire); 54 : } 55 : 56 : /// \brief Release the lock. 57 1 : void unlock() { lock_.store(false, std::memory_order_release); } 58 : 59 : private: 60 0 : std::atomic<bool> lock_{false}; 61 : }; 62 : } // namespace Parallel