1#![allow(clippy::inline_always)]
2use core::{
3 cell::Cell,
4 sync::atomic::{AtomicBool, Ordering},
5};
6
7pub use crate::tq::{NotReady, TimerQueue};
8pub use bare_metal::CriticalSection;
9pub use cortex_m::{
10 asm::nop,
11 asm::wfi,
12 interrupt,
13 peripheral::{scb::SystemHandler, DWT, NVIC, SCB, SYST},
14 Peripherals,
15};
16pub use heapless::sorted_linked_list::SortedLinkedList;
17pub use heapless::spsc::Queue;
18pub use heapless::BinaryHeap;
19pub use rtic_monotonic as monotonic;
20
21pub type SCFQ<const N: usize> = Queue<u8, N>;
22pub type SCRQ<T, const N: usize> = Queue<(T, u8), N>;
23
24#[derive(Copy, Clone)]
31pub struct Mask<const M: usize>([u32; M]);
32
33impl<const M: usize> core::ops::BitOrAssign for Mask<M> {
34 fn bitor_assign(&mut self, rhs: Self) {
35 for i in 0..M {
36 self.0[i] |= rhs.0[i];
37 }
38 }
39}
40
41#[cfg(not(have_basepri))]
42impl<const M: usize> Mask<M> {
43 const fn set_bit(mut self, bit: u32) -> Self {
45 let block = bit / 32;
46
47 if block as usize >= M {
48 panic!("Generating masks for thumbv6/thumbv8m.base failed! Are you compiling for thumbv6 on an thumbv7 MCU or using an unsupported thumbv8m.base MCU?");
49 }
50
51 let offset = bit - (block * 32);
52 self.0[block as usize] |= 1 << offset;
53 self
54 }
55}
56
57#[cfg(have_basepri)]
58use cortex_m::register::basepri;
59
60#[cfg(have_basepri)]
61#[inline(always)]
62pub fn run<F>(priority: u8, f: F)
63where
64 F: FnOnce(),
65{
66 if priority == 1 {
67 f();
69 unsafe { basepri::write(0) }
70 } else {
71 let initial = basepri::read();
72 f();
73 unsafe { basepri::write(initial) }
74 }
75}
76
77#[cfg(not(have_basepri))]
78#[inline(always)]
79pub fn run<F>(_priority: u8, f: F)
80where
81 F: FnOnce(),
82{
83 f();
84}
85
86pub struct Barrier {
87 inner: AtomicBool,
88}
89
90impl Barrier {
91 pub const fn new() -> Self {
92 Barrier {
93 inner: AtomicBool::new(false),
94 }
95 }
96
97 pub fn release(&self) {
98 self.inner.store(true, Ordering::Release);
99 }
100
101 pub fn wait(&self) {
102 while !self.inner.load(Ordering::Acquire) {
103 core::hint::spin_loop()
104 }
105 }
106}
107
108pub struct Priority {
110 inner: Cell<u8>,
111}
112
113impl Priority {
114 #[inline(always)]
120 pub unsafe fn new(value: u8) -> Self {
121 Priority {
122 inner: Cell::new(value),
123 }
124 }
125
126 #[inline(always)]
129 fn set(&self, value: u8) {
130 self.inner.set(value);
131 }
132
133 #[inline(always)]
135 fn get(&self) -> u8 {
136 self.inner.get()
137 }
138}
139
140pub const fn have_basepri() -> bool {
142 #[cfg(have_basepri)]
143 {
144 true
145 }
146
147 #[cfg(not(have_basepri))]
148 {
149 false
150 }
151}
152
153#[inline(always)]
154pub fn assert_send<T>()
155where
156 T: Send,
157{
158}
159
160#[inline(always)]
161pub fn assert_sync<T>()
162where
163 T: Sync,
164{
165}
166
167#[inline(always)]
168pub fn assert_monotonic<T>()
169where
170 T: monotonic::Monotonic,
171{
172}
173
174#[cfg(have_basepri)]
209#[inline(always)]
210pub unsafe fn lock<T, R, const M: usize>(
211 ptr: *mut T,
212 priority: &Priority,
213 ceiling: u8,
214 nvic_prio_bits: u8,
215 _mask: &[Mask<M>; 3],
216 f: impl FnOnce(&mut T) -> R,
217) -> R {
218 let current = priority.get();
219
220 if current < ceiling {
221 if ceiling == (1 << nvic_prio_bits) {
222 priority.set(u8::max_value());
223 let r = interrupt::free(|_| f(&mut *ptr));
224 priority.set(current);
225 r
226 } else {
227 priority.set(ceiling);
228 basepri::write(logical2hw(ceiling, nvic_prio_bits));
229 let r = f(&mut *ptr);
230 basepri::write(logical2hw(current, nvic_prio_bits));
231 priority.set(current);
232 r
233 }
234 } else {
235 f(&mut *ptr)
236 }
237}
238
239#[cfg(not(have_basepri))]
284#[inline(always)]
285pub unsafe fn lock<T, R, const M: usize>(
286 ptr: *mut T,
287 priority: &Priority,
288 ceiling: u8,
289 _nvic_prio_bits: u8,
290 masks: &[Mask<M>; 3],
291 f: impl FnOnce(&mut T) -> R,
292) -> R {
293 let current = priority.get();
294 if current < ceiling {
295 if ceiling >= 4 {
296 priority.set(ceiling);
298 let r = interrupt::free(|_| f(&mut *ptr));
300 priority.set(current);
302 r
303 } else {
304 priority.set(ceiling);
306 let mask = compute_mask(current, ceiling, masks);
307 clear_enable_mask(mask);
308
309 let r = f(&mut *ptr);
311
312 set_enable_mask(mask);
313
314 priority.set(current);
316 r
317 }
318 } else {
319 f(&mut *ptr)
321 }
322}
323
324#[cfg(not(have_basepri))]
325#[inline(always)]
326fn compute_mask<const M: usize>(from_prio: u8, to_prio: u8, masks: &[Mask<M>; 3]) -> Mask<M> {
327 let mut res = Mask([0; M]);
328 masks[from_prio as usize..to_prio as usize]
329 .iter()
330 .for_each(|m| res |= *m);
331 res
332}
333
334#[cfg(not(have_basepri))]
336#[inline(always)]
337unsafe fn set_enable_mask<const M: usize>(mask: Mask<M>) {
338 for i in 0..M {
339 if mask.0[i] != 0 {
341 (*NVIC::PTR).iser[i].write(mask.0[i]);
342 }
343 }
344}
345
346#[cfg(not(have_basepri))]
348#[inline(always)]
349unsafe fn clear_enable_mask<const M: usize>(mask: Mask<M>) {
350 for i in 0..M {
351 if mask.0[i] != 0 {
353 (*NVIC::PTR).icer[i].write(mask.0[i]);
354 }
355 }
356}
357
358#[inline]
359#[must_use]
360pub fn logical2hw(logical: u8, nvic_prio_bits: u8) -> u8 {
361 ((1 << nvic_prio_bits) - logical) << (8 - nvic_prio_bits)
362}
363
364#[cfg(have_basepri)]
365pub const fn create_mask<const N: usize, const M: usize>(_: [u32; N]) -> Mask<M> {
366 Mask([0; M])
367}
368
369#[cfg(not(have_basepri))]
370pub const fn create_mask<const N: usize, const M: usize>(list_of_shifts: [u32; N]) -> Mask<M> {
371 let mut mask = Mask([0; M]);
372 let mut i = 0;
373
374 while i < N {
375 let shift = list_of_shifts[i];
376 i += 1;
377 mask = mask.set_bit(shift);
378 }
379
380 mask
381}
382
383#[cfg(have_basepri)]
384pub const fn compute_mask_chunks<const L: usize>(_: [u32; L]) -> usize {
385 0
386}
387
388#[cfg(not(have_basepri))]
393pub const fn compute_mask_chunks<const L: usize>(ids: [u32; L]) -> usize {
394 let mut max: usize = 0;
395 let mut i = 0;
396
397 while i < L {
398 let id = ids[i] as usize;
399 i += 1;
400
401 if id > max {
402 max = id;
403 }
404 }
405 (max + 32) / 32
406}
407
408#[cfg(have_basepri)]
409pub const fn no_basepri_panic() {
410 }
412
413#[cfg(not(have_basepri))]
414pub const fn no_basepri_panic() {
415 panic!("Exceptions with shared resources are not allowed when compiling for thumbv6 or thumbv8m.base. Use local resources or `#[lock_free]` shared resources");
416}