spin/mutex/
spin.rs

1//! A naïve spinning mutex.
2//!
3//! Waiting threads hammer an atomic variable until it becomes available. Best-case latency is low, but worst-case
4//! latency is theoretically infinite.
5
6use crate::{
7    atomic::{AtomicBool, Ordering},
8    RelaxStrategy, Spin,
9};
10use core::{
11    cell::UnsafeCell,
12    fmt,
13    marker::PhantomData,
14    mem::ManuallyDrop,
15    ops::{Deref, DerefMut},
16};
17
18/// A [spin lock](https://en.m.wikipedia.org/wiki/Spinlock) providing mutually exclusive access to data.
19///
20/// # Example
21///
22/// ```
23/// use spin;
24///
25/// let lock = spin::mutex::SpinMutex::<_>::new(0);
26///
27/// // Modify the data
28/// *lock.lock() = 2;
29///
30/// // Read the data
31/// let answer = *lock.lock();
32/// assert_eq!(answer, 2);
33/// ```
34///
35/// # Thread safety example
36///
37/// ```
38/// use spin;
39/// use std::sync::{Arc, Barrier};
40///
41/// let thread_count = 1000;
42/// let spin_mutex = Arc::new(spin::mutex::SpinMutex::<_>::new(0));
43///
44/// // We use a barrier to ensure the readout happens after all writing
45/// let barrier = Arc::new(Barrier::new(thread_count + 1));
46///
47/// # let mut ts = Vec::new();
48/// for _ in (0..thread_count) {
49///     let my_barrier = barrier.clone();
50///     let my_lock = spin_mutex.clone();
51/// # let t =
52///     std::thread::spawn(move || {
53///         let mut guard = my_lock.lock();
54///         *guard += 1;
55///
56///         // Release the lock to prevent a deadlock
57///         drop(guard);
58///         my_barrier.wait();
59///     });
60/// # ts.push(t);
61/// }
62///
63/// barrier.wait();
64///
65/// let answer = { *spin_mutex.lock() };
66/// assert_eq!(answer, thread_count);
67///
68/// # for t in ts {
69/// #     t.join().unwrap();
70/// # }
71/// ```
72pub struct SpinMutex<T: ?Sized, R = Spin> {
73    phantom: PhantomData<R>,
74    pub(crate) lock: AtomicBool,
75    data: UnsafeCell<T>,
76}
77
78/// A guard that provides mutable data access.
79///
80/// When the guard falls out of scope it will release the lock.
81pub struct SpinMutexGuard<'a, T: ?Sized + 'a> {
82    lock: &'a AtomicBool,
83    data: *mut T,
84}
85
86// Same unsafe impls as `std::sync::Mutex`
87unsafe impl<T: ?Sized + Send, R> Sync for SpinMutex<T, R> {}
88unsafe impl<T: ?Sized + Send, R> Send for SpinMutex<T, R> {}
89
90unsafe impl<T: ?Sized + Sync> Sync for SpinMutexGuard<'_, T> {}
91unsafe impl<T: ?Sized + Send> Send for SpinMutexGuard<'_, T> {}
92
93impl<T, R> SpinMutex<T, R> {
94    /// Creates a new [`SpinMutex`] wrapping the supplied data.
95    ///
96    /// # Example
97    ///
98    /// ```
99    /// use spin::mutex::SpinMutex;
100    ///
101    /// static MUTEX: SpinMutex<()> = SpinMutex::<_>::new(());
102    ///
103    /// fn demo() {
104    ///     let lock = MUTEX.lock();
105    ///     // do something with lock
106    ///     drop(lock);
107    /// }
108    /// ```
109    #[inline(always)]
110    pub const fn new(data: T) -> Self {
111        SpinMutex {
112            lock: AtomicBool::new(false),
113            data: UnsafeCell::new(data),
114            phantom: PhantomData,
115        }
116    }
117
118    /// Consumes this [`SpinMutex`] and unwraps the underlying data.
119    ///
120    /// # Example
121    ///
122    /// ```
123    /// let lock = spin::mutex::SpinMutex::<_>::new(42);
124    /// assert_eq!(42, lock.into_inner());
125    /// ```
126    #[inline(always)]
127    pub fn into_inner(self) -> T {
128        // We know statically that there are no outstanding references to
129        // `self` so there's no need to lock.
130        let SpinMutex { data, .. } = self;
131        data.into_inner()
132    }
133
134    /// Returns a mutable pointer to the underlying data.
135    ///
136    /// This is mostly meant to be used for applications which require manual unlocking, but where
137    /// storing both the lock and the pointer to the inner data gets inefficient.
138    ///
139    /// # Example
140    /// ```
141    /// let lock = spin::mutex::SpinMutex::<_>::new(42);
142    ///
143    /// unsafe {
144    ///     core::mem::forget(lock.lock());
145    ///
146    ///     assert_eq!(lock.as_mut_ptr().read(), 42);
147    ///     lock.as_mut_ptr().write(58);
148    ///
149    ///     lock.force_unlock();
150    /// }
151    ///
152    /// assert_eq!(*lock.lock(), 58);
153    ///
154    /// ```
155    #[inline(always)]
156    pub fn as_mut_ptr(&self) -> *mut T {
157        self.data.get()
158    }
159}
160
161impl<T: ?Sized, R: RelaxStrategy> SpinMutex<T, R> {
162    /// Locks the [`SpinMutex`] and returns a guard that permits access to the inner data.
163    ///
164    /// The returned value may be dereferenced for data access
165    /// and the lock will be dropped when the guard falls out of scope.
166    ///
167    /// ```
168    /// let lock = spin::mutex::SpinMutex::<_>::new(0);
169    /// {
170    ///     let mut data = lock.lock();
171    ///     // The lock is now locked and the data can be accessed
172    ///     *data += 1;
173    ///     // The lock is implicitly dropped at the end of the scope
174    /// }
175    /// ```
176    #[inline(always)]
177    pub fn lock(&self) -> SpinMutexGuard<T> {
178        // Can fail to lock even if the spinlock is not locked. May be more efficient than `try_lock`
179        // when called in a loop.
180        while self
181            .lock
182            .compare_exchange_weak(false, true, Ordering::Acquire, Ordering::Relaxed)
183            .is_err()
184        {
185            // Wait until the lock looks unlocked before retrying
186            while self.is_locked() {
187                R::relax();
188            }
189        }
190
191        SpinMutexGuard {
192            lock: &self.lock,
193            data: unsafe { &mut *self.data.get() },
194        }
195    }
196}
197
198impl<T: ?Sized, R> SpinMutex<T, R> {
199    /// Returns `true` if the lock is currently held.
200    ///
201    /// # Safety
202    ///
203    /// This function provides no synchronization guarantees and so its result should be considered 'out of date'
204    /// the instant it is called. Do not use it for synchronization purposes. However, it may be useful as a heuristic.
205    #[inline(always)]
206    pub fn is_locked(&self) -> bool {
207        self.lock.load(Ordering::Relaxed)
208    }
209
210    /// Force unlock this [`SpinMutex`].
211    ///
212    /// # Safety
213    ///
214    /// This is *extremely* unsafe if the lock is not held by the current
215    /// thread. However, this can be useful in some instances for exposing the
216    /// lock to FFI that doesn't know how to deal with RAII.
217    #[inline(always)]
218    pub unsafe fn force_unlock(&self) {
219        self.lock.store(false, Ordering::Release);
220    }
221
222    /// Try to lock this [`SpinMutex`], returning a lock guard if successful.
223    ///
224    /// # Example
225    ///
226    /// ```
227    /// let lock = spin::mutex::SpinMutex::<_>::new(42);
228    ///
229    /// let maybe_guard = lock.try_lock();
230    /// assert!(maybe_guard.is_some());
231    ///
232    /// // `maybe_guard` is still held, so the second call fails
233    /// let maybe_guard2 = lock.try_lock();
234    /// assert!(maybe_guard2.is_none());
235    /// ```
236    #[inline(always)]
237    pub fn try_lock(&self) -> Option<SpinMutexGuard<T>> {
238        // The reason for using a strong compare_exchange is explained here:
239        // https://github.com/Amanieu/parking_lot/pull/207#issuecomment-575869107
240        if self
241            .lock
242            .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
243            .is_ok()
244        {
245            Some(SpinMutexGuard {
246                lock: &self.lock,
247                data: unsafe { &mut *self.data.get() },
248            })
249        } else {
250            None
251        }
252    }
253
254    /// Returns a mutable reference to the underlying data.
255    ///
256    /// Since this call borrows the [`SpinMutex`] mutably, and a mutable reference is guaranteed to be exclusive in
257    /// Rust, no actual locking needs to take place -- the mutable borrow statically guarantees no locks exist. As
258    /// such, this is a 'zero-cost' operation.
259    ///
260    /// # Example
261    ///
262    /// ```
263    /// let mut lock = spin::mutex::SpinMutex::<_>::new(0);
264    /// *lock.get_mut() = 10;
265    /// assert_eq!(*lock.lock(), 10);
266    /// ```
267    #[inline(always)]
268    pub fn get_mut(&mut self) -> &mut T {
269        // We know statically that there are no other references to `self`, so
270        // there's no need to lock the inner mutex.
271        unsafe { &mut *self.data.get() }
272    }
273}
274
275impl<T: ?Sized + fmt::Debug, R> fmt::Debug for SpinMutex<T, R> {
276    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
277        match self.try_lock() {
278            Some(guard) => write!(f, "Mutex {{ data: ")
279                .and_then(|()| (&*guard).fmt(f))
280                .and_then(|()| write!(f, "}}")),
281            None => write!(f, "Mutex {{ <locked> }}"),
282        }
283    }
284}
285
286impl<T: ?Sized + Default, R> Default for SpinMutex<T, R> {
287    fn default() -> Self {
288        Self::new(Default::default())
289    }
290}
291
292impl<T, R> From<T> for SpinMutex<T, R> {
293    fn from(data: T) -> Self {
294        Self::new(data)
295    }
296}
297
298impl<'a, T: ?Sized> SpinMutexGuard<'a, T> {
299    /// Leak the lock guard, yielding a mutable reference to the underlying data.
300    ///
301    /// Note that this function will permanently lock the original [`SpinMutex`].
302    ///
303    /// ```
304    /// let mylock = spin::mutex::SpinMutex::<_>::new(0);
305    ///
306    /// let data: &mut i32 = spin::mutex::SpinMutexGuard::leak(mylock.lock());
307    ///
308    /// *data = 1;
309    /// assert_eq!(*data, 1);
310    /// ```
311    #[inline(always)]
312    pub fn leak(this: Self) -> &'a mut T {
313        // Use ManuallyDrop to avoid stacked-borrow invalidation
314        let mut this = ManuallyDrop::new(this);
315        // We know statically that only we are referencing data
316        unsafe { &mut *this.data }
317    }
318}
319
320impl<'a, T: ?Sized + fmt::Debug> fmt::Debug for SpinMutexGuard<'a, T> {
321    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
322        fmt::Debug::fmt(&**self, f)
323    }
324}
325
326impl<'a, T: ?Sized + fmt::Display> fmt::Display for SpinMutexGuard<'a, T> {
327    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
328        fmt::Display::fmt(&**self, f)
329    }
330}
331
332impl<'a, T: ?Sized> Deref for SpinMutexGuard<'a, T> {
333    type Target = T;
334    fn deref(&self) -> &T {
335        // We know statically that only we are referencing data
336        unsafe { &*self.data }
337    }
338}
339
340impl<'a, T: ?Sized> DerefMut for SpinMutexGuard<'a, T> {
341    fn deref_mut(&mut self) -> &mut T {
342        // We know statically that only we are referencing data
343        unsafe { &mut *self.data }
344    }
345}
346
347impl<'a, T: ?Sized> Drop for SpinMutexGuard<'a, T> {
348    /// The dropping of the MutexGuard will release the lock it was created from.
349    fn drop(&mut self) {
350        self.lock.store(false, Ordering::Release);
351    }
352}
353
354#[cfg(feature = "lock_api")]
355unsafe impl<R: RelaxStrategy> lock_api_crate::RawMutex for SpinMutex<(), R> {
356    type GuardMarker = lock_api_crate::GuardSend;
357
358    const INIT: Self = Self::new(());
359
360    fn lock(&self) {
361        // Prevent guard destructor running
362        core::mem::forget(Self::lock(self));
363    }
364
365    fn try_lock(&self) -> bool {
366        // Prevent guard destructor running
367        Self::try_lock(self).map(core::mem::forget).is_some()
368    }
369
370    unsafe fn unlock(&self) {
371        self.force_unlock();
372    }
373
374    fn is_locked(&self) -> bool {
375        Self::is_locked(self)
376    }
377}
378
379#[cfg(test)]
380mod tests {
381    use std::prelude::v1::*;
382
383    use std::sync::atomic::{AtomicUsize, Ordering};
384    use std::sync::mpsc::channel;
385    use std::sync::Arc;
386    use std::thread;
387
388    type SpinMutex<T> = super::SpinMutex<T>;
389
390    #[derive(Eq, PartialEq, Debug)]
391    struct NonCopy(i32);
392
393    #[test]
394    fn smoke() {
395        let m = SpinMutex::<_>::new(());
396        drop(m.lock());
397        drop(m.lock());
398    }
399
400    #[test]
401    fn lots_and_lots() {
402        static M: SpinMutex<()> = SpinMutex::<_>::new(());
403        static mut CNT: u32 = 0;
404        const J: u32 = 1000;
405        const K: u32 = 3;
406
407        fn inc() {
408            for _ in 0..J {
409                unsafe {
410                    let _g = M.lock();
411                    CNT += 1;
412                }
413            }
414        }
415
416        let (tx, rx) = channel();
417        let mut ts = Vec::new();
418        for _ in 0..K {
419            let tx2 = tx.clone();
420            ts.push(thread::spawn(move || {
421                inc();
422                tx2.send(()).unwrap();
423            }));
424            let tx2 = tx.clone();
425            ts.push(thread::spawn(move || {
426                inc();
427                tx2.send(()).unwrap();
428            }));
429        }
430
431        drop(tx);
432        for _ in 0..2 * K {
433            rx.recv().unwrap();
434        }
435        assert_eq!(unsafe { CNT }, J * K * 2);
436
437        for t in ts {
438            t.join().unwrap();
439        }
440    }
441
442    #[test]
443    fn try_lock() {
444        let mutex = SpinMutex::<_>::new(42);
445
446        // First lock succeeds
447        let a = mutex.try_lock();
448        assert_eq!(a.as_ref().map(|r| **r), Some(42));
449
450        // Additional lock fails
451        let b = mutex.try_lock();
452        assert!(b.is_none());
453
454        // After dropping lock, it succeeds again
455        ::core::mem::drop(a);
456        let c = mutex.try_lock();
457        assert_eq!(c.as_ref().map(|r| **r), Some(42));
458    }
459
460    #[test]
461    fn test_into_inner() {
462        let m = SpinMutex::<_>::new(NonCopy(10));
463        assert_eq!(m.into_inner(), NonCopy(10));
464    }
465
466    #[test]
467    fn test_into_inner_drop() {
468        struct Foo(Arc<AtomicUsize>);
469        impl Drop for Foo {
470            fn drop(&mut self) {
471                self.0.fetch_add(1, Ordering::SeqCst);
472            }
473        }
474        let num_drops = Arc::new(AtomicUsize::new(0));
475        let m = SpinMutex::<_>::new(Foo(num_drops.clone()));
476        assert_eq!(num_drops.load(Ordering::SeqCst), 0);
477        {
478            let _inner = m.into_inner();
479            assert_eq!(num_drops.load(Ordering::SeqCst), 0);
480        }
481        assert_eq!(num_drops.load(Ordering::SeqCst), 1);
482    }
483
484    #[test]
485    fn test_mutex_arc_nested() {
486        // Tests nested mutexes and access
487        // to underlying data.
488        let arc = Arc::new(SpinMutex::<_>::new(1));
489        let arc2 = Arc::new(SpinMutex::<_>::new(arc));
490        let (tx, rx) = channel();
491        let t = thread::spawn(move || {
492            let lock = arc2.lock();
493            let lock2 = lock.lock();
494            assert_eq!(*lock2, 1);
495            tx.send(()).unwrap();
496        });
497        rx.recv().unwrap();
498        t.join().unwrap();
499    }
500
501    #[test]
502    fn test_mutex_arc_access_in_unwind() {
503        let arc = Arc::new(SpinMutex::<_>::new(1));
504        let arc2 = arc.clone();
505        let _ = thread::spawn(move || -> () {
506            struct Unwinder {
507                i: Arc<SpinMutex<i32>>,
508            }
509            impl Drop for Unwinder {
510                fn drop(&mut self) {
511                    *self.i.lock() += 1;
512                }
513            }
514            let _u = Unwinder { i: arc2 };
515            panic!();
516        })
517        .join();
518        let lock = arc.lock();
519        assert_eq!(*lock, 2);
520    }
521
522    #[test]
523    fn test_mutex_unsized() {
524        let mutex: &SpinMutex<[i32]> = &SpinMutex::<_>::new([1, 2, 3]);
525        {
526            let b = &mut *mutex.lock();
527            b[0] = 4;
528            b[2] = 5;
529        }
530        let comp: &[i32] = &[4, 2, 5];
531        assert_eq!(&*mutex.lock(), comp);
532    }
533
534    #[test]
535    fn test_mutex_force_lock() {
536        let lock = SpinMutex::<_>::new(());
537        ::std::mem::forget(lock.lock());
538        unsafe {
539            lock.force_unlock();
540        }
541        assert!(lock.try_lock().is_some());
542    }
543}