rtic/
export.rs

1#![allow(clippy::inline_always)]
2use core::{
3    cell::Cell,
4    sync::atomic::{AtomicBool, Ordering},
5};
6
7pub use crate::tq::{NotReady, TimerQueue};
8pub use bare_metal::CriticalSection;
9pub use cortex_m::{
10    asm::nop,
11    asm::wfi,
12    interrupt,
13    peripheral::{scb::SystemHandler, DWT, NVIC, SCB, SYST},
14    Peripherals,
15};
16pub use heapless::sorted_linked_list::SortedLinkedList;
17pub use heapless::spsc::Queue;
18pub use heapless::BinaryHeap;
19pub use rtic_monotonic as monotonic;
20
21pub type SCFQ<const N: usize> = Queue<u8, N>;
22pub type SCRQ<T, const N: usize> = Queue<(T, u8), N>;
23
24/// Mask is used to store interrupt masks on systems without a BASEPRI register (M0, M0+, M23).
25/// It needs to be large enough to cover all the relevant interrupts in use.
26/// For M0/M0+ there are only 32 interrupts so we only need one u32 value.
27/// For M23 there can be as many as 480 interrupts.
28/// Rather than providing space for all possible interrupts, we just detect the highest interrupt in
29/// use at compile time and allocate enough u32 chunks to cover them.
30#[derive(Copy, Clone)]
31pub struct Mask<const M: usize>([u32; M]);
32
33impl<const M: usize> core::ops::BitOrAssign for Mask<M> {
34    fn bitor_assign(&mut self, rhs: Self) {
35        for i in 0..M {
36            self.0[i] |= rhs.0[i];
37        }
38    }
39}
40
41#[cfg(not(have_basepri))]
42impl<const M: usize> Mask<M> {
43    /// Set a bit inside a Mask.
44    const fn set_bit(mut self, bit: u32) -> Self {
45        let block = bit / 32;
46
47        if block as usize >= M {
48            panic!("Generating masks for thumbv6/thumbv8m.base failed! Are you compiling for thumbv6 on an thumbv7 MCU or using an unsupported thumbv8m.base MCU?");
49        }
50
51        let offset = bit - (block * 32);
52        self.0[block as usize] |= 1 << offset;
53        self
54    }
55}
56
57#[cfg(have_basepri)]
58use cortex_m::register::basepri;
59
60#[cfg(have_basepri)]
61#[inline(always)]
62pub fn run<F>(priority: u8, f: F)
63where
64    F: FnOnce(),
65{
66    if priority == 1 {
67        // If the priority of this interrupt is `1` then BASEPRI can only be `0`
68        f();
69        unsafe { basepri::write(0) }
70    } else {
71        let initial = basepri::read();
72        f();
73        unsafe { basepri::write(initial) }
74    }
75}
76
77#[cfg(not(have_basepri))]
78#[inline(always)]
79pub fn run<F>(_priority: u8, f: F)
80where
81    F: FnOnce(),
82{
83    f();
84}
85
86pub struct Barrier {
87    inner: AtomicBool,
88}
89
90impl Barrier {
91    pub const fn new() -> Self {
92        Barrier {
93            inner: AtomicBool::new(false),
94        }
95    }
96
97    pub fn release(&self) {
98        self.inner.store(true, Ordering::Release);
99    }
100
101    pub fn wait(&self) {
102        while !self.inner.load(Ordering::Acquire) {
103            core::hint::spin_loop()
104        }
105    }
106}
107
108// Newtype over `Cell` that forbids mutation through a shared reference
109pub struct Priority {
110    inner: Cell<u8>,
111}
112
113impl Priority {
114    /// Create a new Priority
115    ///
116    /// # Safety
117    ///
118    /// Will overwrite the current Priority
119    #[inline(always)]
120    pub unsafe fn new(value: u8) -> Self {
121        Priority {
122            inner: Cell::new(value),
123        }
124    }
125
126    /// Change the current priority to `value`
127    // These two methods are used by `lock` (see below) but can't be used from the RTIC application
128    #[inline(always)]
129    fn set(&self, value: u8) {
130        self.inner.set(value);
131    }
132
133    /// Get the current priority
134    #[inline(always)]
135    fn get(&self) -> u8 {
136        self.inner.get()
137    }
138}
139
140/// Const helper to check architecture
141pub const fn have_basepri() -> bool {
142    #[cfg(have_basepri)]
143    {
144        true
145    }
146
147    #[cfg(not(have_basepri))]
148    {
149        false
150    }
151}
152
153#[inline(always)]
154pub fn assert_send<T>()
155where
156    T: Send,
157{
158}
159
160#[inline(always)]
161pub fn assert_sync<T>()
162where
163    T: Sync,
164{
165}
166
167#[inline(always)]
168pub fn assert_monotonic<T>()
169where
170    T: monotonic::Monotonic,
171{
172}
173
174/// Lock implementation using BASEPRI and global Critical Section (CS)
175///
176/// # Safety
177///
178/// The system ceiling is raised from current to ceiling
179/// by either
180/// - raising the BASEPRI to the ceiling value, or
181/// - disable all interrupts in case we want to
182///   mask interrupts with maximum priority
183///
184/// Dereferencing a raw pointer inside CS
185///
186/// The priority.set/priority.get can safely be outside the CS
187/// as being a context local cell (not affected by preemptions).
188/// It is merely used in order to omit masking in case current
189/// priority is current priority >= ceiling.
190///
191/// Lock Efficiency:
192/// Experiments validate (sub)-zero cost for CS implementation
193/// (Sub)-zero as:
194/// - Either zero OH (lock optimized out), or
195/// - Amounting to an optimal assembly implementation
196///   - The BASEPRI value is folded to a constant at compile time
197///   - CS entry, single assembly instruction to write BASEPRI
198///   - CS exit, single assembly instruction to write BASEPRI
199///   - priority.set/get optimized out (their effect not)
200/// - On par or better than any handwritten implementation of SRP
201///
202/// Limitations:
203/// The current implementation reads/writes BASEPRI once
204/// even in some edge cases where this may be omitted.
205/// Total OH of per task is max 2 clock cycles, negligible in practice
206/// but can in theory be fixed.
207///
208#[cfg(have_basepri)]
209#[inline(always)]
210pub unsafe fn lock<T, R, const M: usize>(
211    ptr: *mut T,
212    priority: &Priority,
213    ceiling: u8,
214    nvic_prio_bits: u8,
215    _mask: &[Mask<M>; 3],
216    f: impl FnOnce(&mut T) -> R,
217) -> R {
218    let current = priority.get();
219
220    if current < ceiling {
221        if ceiling == (1 << nvic_prio_bits) {
222            priority.set(u8::max_value());
223            let r = interrupt::free(|_| f(&mut *ptr));
224            priority.set(current);
225            r
226        } else {
227            priority.set(ceiling);
228            basepri::write(logical2hw(ceiling, nvic_prio_bits));
229            let r = f(&mut *ptr);
230            basepri::write(logical2hw(current, nvic_prio_bits));
231            priority.set(current);
232            r
233        }
234    } else {
235        f(&mut *ptr)
236    }
237}
238
239/// Lock implementation using interrupt masking
240///
241/// # Safety
242///
243/// The system ceiling is raised from current to ceiling
244/// by computing a 32 bit `mask` (1 bit per interrupt)
245/// 1: ceiling >= priority > current
246/// 0: else
247///
248/// On CS entry, `clear_enable_mask(mask)` disables interrupts
249/// On CS exit,  `set_enable_mask(mask)` re-enables interrupts
250///
251/// The priority.set/priority.get can safely be outside the CS
252/// as being a context local cell (not affected by preemptions).
253/// It is merely used in order to omit masking in case
254/// current priority >= ceiling.
255///
256/// Dereferencing a raw pointer is done safely inside the CS
257///
258/// Lock Efficiency:
259/// Early experiments validate (sub)-zero cost for CS implementation
260/// (Sub)-zero as:
261/// - Either zero OH (lock optimized out), or
262/// - Amounting to an optimal assembly implementation
263///   - if ceiling == (1 << nvic_prio_bits)
264///     - we execute the closure in a global critical section (interrupt free)
265///     - CS entry cost, single write to core register
266///     - CS exit cost, single write to core register
267///   else
268///     - The `mask` value is folded to a constant at compile time
269///     - CS entry, single write of the 32 bit `mask` to the `icer` register
270///     - CS exit, single write of the 32 bit `mask` to the `iser` register
271/// - priority.set/get optimized out (their effect not)
272/// - On par or better than any hand written implementation of SRP
273///
274/// Limitations:
275/// Current implementation does not allow for tasks with shared resources
276/// to be bound to exception handlers, as these cannot be masked in HW.
277///
278/// Possible solutions:
279/// - Mask exceptions by global critical sections (interrupt::free)
280/// - Temporary lower exception priority
281///
282/// These possible solutions are set goals for future work
283#[cfg(not(have_basepri))]
284#[inline(always)]
285pub unsafe fn lock<T, R, const M: usize>(
286    ptr: *mut T,
287    priority: &Priority,
288    ceiling: u8,
289    _nvic_prio_bits: u8,
290    masks: &[Mask<M>; 3],
291    f: impl FnOnce(&mut T) -> R,
292) -> R {
293    let current = priority.get();
294    if current < ceiling {
295        if ceiling >= 4 {
296            // safe to manipulate outside critical section
297            priority.set(ceiling);
298            // execute closure under protection of raised system ceiling
299            let r = interrupt::free(|_| f(&mut *ptr));
300            // safe to manipulate outside critical section
301            priority.set(current);
302            r
303        } else {
304            // safe to manipulate outside critical section
305            priority.set(ceiling);
306            let mask = compute_mask(current, ceiling, masks);
307            clear_enable_mask(mask);
308
309            // execute closure under protection of raised system ceiling
310            let r = f(&mut *ptr);
311
312            set_enable_mask(mask);
313
314            // safe to manipulate outside critical section
315            priority.set(current);
316            r
317        }
318    } else {
319        // execute closure without raising system ceiling
320        f(&mut *ptr)
321    }
322}
323
324#[cfg(not(have_basepri))]
325#[inline(always)]
326fn compute_mask<const M: usize>(from_prio: u8, to_prio: u8, masks: &[Mask<M>; 3]) -> Mask<M> {
327    let mut res = Mask([0; M]);
328    masks[from_prio as usize..to_prio as usize]
329        .iter()
330        .for_each(|m| res |= *m);
331    res
332}
333
334// enables interrupts
335#[cfg(not(have_basepri))]
336#[inline(always)]
337unsafe fn set_enable_mask<const M: usize>(mask: Mask<M>) {
338    for i in 0..M {
339        // This check should involve compile time constants and be optimized out.
340        if mask.0[i] != 0 {
341            (*NVIC::PTR).iser[i].write(mask.0[i]);
342        }
343    }
344}
345
346// disables interrupts
347#[cfg(not(have_basepri))]
348#[inline(always)]
349unsafe fn clear_enable_mask<const M: usize>(mask: Mask<M>) {
350    for i in 0..M {
351        // This check should involve compile time constants and be optimized out.
352        if mask.0[i] != 0 {
353            (*NVIC::PTR).icer[i].write(mask.0[i]);
354        }
355    }
356}
357
358#[inline]
359#[must_use]
360pub fn logical2hw(logical: u8, nvic_prio_bits: u8) -> u8 {
361    ((1 << nvic_prio_bits) - logical) << (8 - nvic_prio_bits)
362}
363
364#[cfg(have_basepri)]
365pub const fn create_mask<const N: usize, const M: usize>(_: [u32; N]) -> Mask<M> {
366    Mask([0; M])
367}
368
369#[cfg(not(have_basepri))]
370pub const fn create_mask<const N: usize, const M: usize>(list_of_shifts: [u32; N]) -> Mask<M> {
371    let mut mask = Mask([0; M]);
372    let mut i = 0;
373
374    while i < N {
375        let shift = list_of_shifts[i];
376        i += 1;
377        mask = mask.set_bit(shift);
378    }
379
380    mask
381}
382
383#[cfg(have_basepri)]
384pub const fn compute_mask_chunks<const L: usize>(_: [u32; L]) -> usize {
385    0
386}
387
388/// Compute the number of u32 chunks needed to store the Mask value.
389/// On M0, M0+ this should always end up being 1.
390/// On M23 we will pick a number that allows us to store the highest index used by the code.
391/// This means the amount of overhead will vary based on the actually interrupts used by the code.
392#[cfg(not(have_basepri))]
393pub const fn compute_mask_chunks<const L: usize>(ids: [u32; L]) -> usize {
394    let mut max: usize = 0;
395    let mut i = 0;
396
397    while i < L {
398        let id = ids[i] as usize;
399        i += 1;
400
401        if id > max {
402            max = id;
403        }
404    }
405    (max + 32) / 32
406}
407
408#[cfg(have_basepri)]
409pub const fn no_basepri_panic() {
410    // For non-v6 all is fine
411}
412
413#[cfg(not(have_basepri))]
414pub const fn no_basepri_panic() {
415    panic!("Exceptions with shared resources are not allowed when compiling for thumbv6 or thumbv8m.base. Use local resources or `#[lock_free]` shared resources");
416}