1use core::{cell::UnsafeCell, marker::PhantomData, sync::atomic::Ordering};
11
12struct NotRefUnwindSafe(UnsafeCell<()>);
23unsafe impl Sync for NotRefUnwindSafe {}
25
26#[repr(transparent)]
27pub(crate) struct AtomicPtr<T> {
28 inner: core::sync::atomic::AtomicPtr<T>,
29 _not_ref_unwind_safe: PhantomData<NotRefUnwindSafe>,
31}
32impl<T> AtomicPtr<T> {
33 #[inline]
34 pub(crate) const fn new(v: *mut T) -> Self {
35 Self { inner: core::sync::atomic::AtomicPtr::new(v), _not_ref_unwind_safe: PhantomData }
36 }
37 #[inline]
38 pub(crate) fn is_lock_free() -> bool {
39 Self::IS_ALWAYS_LOCK_FREE
40 }
41 pub(crate) const IS_ALWAYS_LOCK_FREE: bool = true;
42 #[inline]
43 #[cfg_attr(
44 any(all(debug_assertions, not(portable_atomic_no_track_caller)), miri),
45 track_caller
46 )]
47 pub(crate) fn load(&self, order: Ordering) -> *mut T {
48 crate::utils::assert_load_ordering(order); self.inner.load(order)
50 }
51 #[inline]
52 #[cfg_attr(
53 any(all(debug_assertions, not(portable_atomic_no_track_caller)), miri),
54 track_caller
55 )]
56 pub(crate) fn store(&self, ptr: *mut T, order: Ordering) {
57 crate::utils::assert_store_ordering(order); self.inner.store(ptr, order);
59 }
60 const_fn! {
61 const_if: #[cfg(not(portable_atomic_no_const_raw_ptr_deref))];
62 #[inline]
63 pub(crate) const fn as_ptr(&self) -> *mut *mut T {
64 unsafe { (*(self as *const Self as *const UnsafeCell<*mut T>)).get() }
68 }
69 }
70}
71#[cfg_attr(portable_atomic_no_cfg_target_has_atomic, cfg(not(portable_atomic_no_atomic_cas)))]
72#[cfg_attr(not(portable_atomic_no_cfg_target_has_atomic), cfg(target_has_atomic = "ptr"))]
73impl<T> AtomicPtr<T> {
74 #[inline]
75 #[cfg_attr(
76 any(all(debug_assertions, not(portable_atomic_no_track_caller)), miri),
77 track_caller
78 )]
79 pub(crate) fn compare_exchange(
80 &self,
81 current: *mut T,
82 new: *mut T,
83 success: Ordering,
84 failure: Ordering,
85 ) -> Result<*mut T, *mut T> {
86 crate::utils::assert_compare_exchange_ordering(success, failure); #[cfg(portable_atomic_no_stronger_failure_ordering)]
88 let success = crate::utils::upgrade_success_ordering(success, failure);
89 self.inner.compare_exchange(current, new, success, failure)
90 }
91 #[inline]
92 #[cfg_attr(
93 any(all(debug_assertions, not(portable_atomic_no_track_caller)), miri),
94 track_caller
95 )]
96 pub(crate) fn compare_exchange_weak(
97 &self,
98 current: *mut T,
99 new: *mut T,
100 success: Ordering,
101 failure: Ordering,
102 ) -> Result<*mut T, *mut T> {
103 crate::utils::assert_compare_exchange_ordering(success, failure); #[cfg(portable_atomic_no_stronger_failure_ordering)]
105 let success = crate::utils::upgrade_success_ordering(success, failure);
106 self.inner.compare_exchange_weak(current, new, success, failure)
107 }
108}
109impl<T> core::ops::Deref for AtomicPtr<T> {
110 type Target = core::sync::atomic::AtomicPtr<T>;
111 #[inline]
112 #[cfg_attr(miri, track_caller)] fn deref(&self) -> &Self::Target {
114 &self.inner
115 }
116}
117
118macro_rules! atomic_int {
119 ($atomic_type:ident, $int_type:ident) => {
120 #[repr(transparent)]
121 pub(crate) struct $atomic_type {
122 inner: core::sync::atomic::$atomic_type,
123 _not_ref_unwind_safe: PhantomData<NotRefUnwindSafe>,
125 }
126 #[cfg_attr(
127 portable_atomic_no_cfg_target_has_atomic,
128 cfg(not(portable_atomic_no_atomic_cas))
129 )]
130 #[cfg_attr(not(portable_atomic_no_cfg_target_has_atomic), cfg(target_has_atomic = "ptr"))]
131 impl_default_no_fetch_ops!($atomic_type, $int_type);
132 #[cfg(not(all(
133 any(target_arch = "x86", target_arch = "x86_64"),
134 not(any(miri, portable_atomic_sanitize_thread)),
135 any(not(portable_atomic_no_asm), portable_atomic_unstable_asm),
136 )))]
137 #[cfg_attr(
138 portable_atomic_no_cfg_target_has_atomic,
139 cfg(not(portable_atomic_no_atomic_cas))
140 )]
141 #[cfg_attr(not(portable_atomic_no_cfg_target_has_atomic), cfg(target_has_atomic = "ptr"))]
142 impl_default_bit_opts!($atomic_type, $int_type);
143 impl $atomic_type {
144 #[inline]
145 pub(crate) const fn new(v: $int_type) -> Self {
146 Self {
147 inner: core::sync::atomic::$atomic_type::new(v),
148 _not_ref_unwind_safe: PhantomData,
149 }
150 }
151 #[inline]
152 pub(crate) fn is_lock_free() -> bool {
153 Self::IS_ALWAYS_LOCK_FREE
154 }
155 pub(crate) const IS_ALWAYS_LOCK_FREE: bool = cfg!(not(all(
158 any(target_arch = "riscv32", target_arch = "xtensa"),
159 target_os = "espidf",
160 ))) | (core::mem::size_of::<$int_type>()
161 < 8);
162 #[inline]
163 #[cfg_attr(
164 any(all(debug_assertions, not(portable_atomic_no_track_caller)), miri),
165 track_caller
166 )]
167 pub(crate) fn load(&self, order: Ordering) -> $int_type {
168 crate::utils::assert_load_ordering(order); self.inner.load(order)
170 }
171 #[inline]
172 #[cfg_attr(
173 any(all(debug_assertions, not(portable_atomic_no_track_caller)), miri),
174 track_caller
175 )]
176 pub(crate) fn store(&self, val: $int_type, order: Ordering) {
177 crate::utils::assert_store_ordering(order); self.inner.store(val, order);
179 }
180 const_fn! {
181 const_if: #[cfg(not(portable_atomic_no_const_raw_ptr_deref))];
182 #[inline]
183 pub(crate) const fn as_ptr(&self) -> *mut $int_type {
184 unsafe {
188 (*(self as *const Self as *const UnsafeCell<$int_type>)).get()
189 }
190 }
191 }
192 }
193 #[cfg_attr(
194 portable_atomic_no_cfg_target_has_atomic,
195 cfg(not(portable_atomic_no_atomic_cas))
196 )]
197 #[cfg_attr(not(portable_atomic_no_cfg_target_has_atomic), cfg(target_has_atomic = "ptr"))]
198 impl $atomic_type {
199 #[inline]
200 #[cfg_attr(
201 any(all(debug_assertions, not(portable_atomic_no_track_caller)), miri),
202 track_caller
203 )]
204 pub(crate) fn compare_exchange(
205 &self,
206 current: $int_type,
207 new: $int_type,
208 success: Ordering,
209 failure: Ordering,
210 ) -> Result<$int_type, $int_type> {
211 crate::utils::assert_compare_exchange_ordering(success, failure); #[cfg(portable_atomic_no_stronger_failure_ordering)]
213 let success = crate::utils::upgrade_success_ordering(success, failure);
214 self.inner.compare_exchange(current, new, success, failure)
215 }
216 #[inline]
217 #[cfg_attr(
218 any(all(debug_assertions, not(portable_atomic_no_track_caller)), miri),
219 track_caller
220 )]
221 pub(crate) fn compare_exchange_weak(
222 &self,
223 current: $int_type,
224 new: $int_type,
225 success: Ordering,
226 failure: Ordering,
227 ) -> Result<$int_type, $int_type> {
228 crate::utils::assert_compare_exchange_ordering(success, failure); #[cfg(portable_atomic_no_stronger_failure_ordering)]
230 let success = crate::utils::upgrade_success_ordering(success, failure);
231 self.inner.compare_exchange_weak(current, new, success, failure)
232 }
233 #[allow(dead_code)]
234 #[inline]
235 #[cfg_attr(miri, track_caller)] fn fetch_update_<F>(&self, order: Ordering, mut f: F) -> $int_type
237 where
238 F: FnMut($int_type) -> $int_type,
239 {
240 let mut prev = self.load(Ordering::Relaxed);
243 loop {
244 let next = f(prev);
245 match self.compare_exchange_weak(prev, next, order, Ordering::Relaxed) {
246 Ok(x) => return x,
247 Err(next_prev) => prev = next_prev,
248 }
249 }
250 }
251 #[inline]
252 #[cfg_attr(miri, track_caller)] pub(crate) fn fetch_max(&self, val: $int_type, order: Ordering) -> $int_type {
254 #[cfg(not(portable_atomic_no_atomic_min_max))]
255 {
256 #[cfg(any(
257 all(
258 any(target_arch = "aarch64", target_arch = "arm64ec"),
259 any(target_feature = "lse", portable_atomic_target_feature = "lse"),
260 ),
261 all(
262 target_arch = "arm",
263 not(any(
264 target_feature = "v6",
265 portable_atomic_target_feature = "v6",
266 )),
267 ),
268 target_arch = "mips",
269 target_arch = "mips32r6",
270 target_arch = "mips64",
271 target_arch = "mips64r6",
272 target_arch = "powerpc",
273 target_arch = "powerpc64",
274 ))]
275 {
276 if core::mem::size_of::<$int_type>() <= 2 {
293 return self.fetch_update_(order, |x| core::cmp::max(x, val));
294 }
295 }
296 self.inner.fetch_max(val, order)
297 }
298 #[cfg(portable_atomic_no_atomic_min_max)]
299 {
300 self.fetch_update_(order, |x| core::cmp::max(x, val))
301 }
302 }
303 #[inline]
304 #[cfg_attr(miri, track_caller)] pub(crate) fn fetch_min(&self, val: $int_type, order: Ordering) -> $int_type {
306 #[cfg(not(portable_atomic_no_atomic_min_max))]
307 {
308 #[cfg(any(
309 all(
310 any(target_arch = "aarch64", target_arch = "arm64ec"),
311 any(target_feature = "lse", portable_atomic_target_feature = "lse"),
312 ),
313 all(
314 target_arch = "arm",
315 not(any(
316 target_feature = "v6",
317 portable_atomic_target_feature = "v6",
318 )),
319 ),
320 target_arch = "mips",
321 target_arch = "mips32r6",
322 target_arch = "mips64",
323 target_arch = "mips64r6",
324 target_arch = "powerpc",
325 target_arch = "powerpc64",
326 ))]
327 {
328 if core::mem::size_of::<$int_type>() <= 2 {
345 return self.fetch_update_(order, |x| core::cmp::min(x, val));
346 }
347 }
348 self.inner.fetch_min(val, order)
349 }
350 #[cfg(portable_atomic_no_atomic_min_max)]
351 {
352 self.fetch_update_(order, |x| core::cmp::min(x, val))
353 }
354 }
355 #[inline]
356 #[cfg_attr(miri, track_caller)] pub(crate) fn fetch_not(&self, order: Ordering) -> $int_type {
358 self.fetch_xor(!0, order)
359 }
360 #[cfg(not(all(
361 any(target_arch = "x86", target_arch = "x86_64"),
362 not(any(miri, portable_atomic_sanitize_thread)),
363 any(not(portable_atomic_no_asm), portable_atomic_unstable_asm),
364 )))]
365 #[inline]
366 #[cfg_attr(miri, track_caller)] pub(crate) fn not(&self, order: Ordering) {
368 self.fetch_not(order);
369 }
370 #[inline]
372 #[cfg_attr(miri, track_caller)] pub(crate) fn fetch_neg(&self, order: Ordering) -> $int_type {
374 self.fetch_update_(order, $int_type::wrapping_neg)
375 }
376 #[cfg(not(all(
377 any(target_arch = "x86", target_arch = "x86_64"),
378 not(any(miri, portable_atomic_sanitize_thread)),
379 any(not(portable_atomic_no_asm), portable_atomic_unstable_asm),
380 )))]
381 #[inline]
382 #[cfg_attr(miri, track_caller)] pub(crate) fn neg(&self, order: Ordering) {
384 self.fetch_neg(order);
385 }
386 }
387 impl core::ops::Deref for $atomic_type {
388 type Target = core::sync::atomic::$atomic_type;
389 #[inline]
390 #[cfg_attr(miri, track_caller)] fn deref(&self) -> &Self::Target {
392 &self.inner
393 }
394 }
395 };
396}
397
398atomic_int!(AtomicIsize, isize);
399atomic_int!(AtomicUsize, usize);
400#[cfg(not(portable_atomic_no_atomic_load_store))]
401atomic_int!(AtomicI8, i8);
402#[cfg(not(portable_atomic_no_atomic_load_store))]
403atomic_int!(AtomicU8, u8);
404#[cfg(not(portable_atomic_no_atomic_load_store))]
405atomic_int!(AtomicI16, i16);
406#[cfg(not(portable_atomic_no_atomic_load_store))]
407atomic_int!(AtomicU16, u16);
408#[cfg(not(portable_atomic_no_atomic_load_store))]
409#[cfg(not(target_pointer_width = "16"))]
410atomic_int!(AtomicI32, i32);
411#[cfg(not(portable_atomic_no_atomic_load_store))]
412#[cfg(not(target_pointer_width = "16"))]
413atomic_int!(AtomicU32, u32);
414#[cfg_attr(portable_atomic_no_cfg_target_has_atomic, cfg(not(portable_atomic_no_atomic_64)))]
415#[cfg_attr(
416 not(portable_atomic_no_cfg_target_has_atomic),
417 cfg(any(
418 target_has_atomic = "64",
419 not(any(target_pointer_width = "16", target_pointer_width = "32")),
420 ))
421)]
422atomic_int!(AtomicI64, i64);
423#[cfg_attr(portable_atomic_no_cfg_target_has_atomic, cfg(not(portable_atomic_no_atomic_64)))]
424#[cfg_attr(
425 not(portable_atomic_no_cfg_target_has_atomic),
426 cfg(any(
427 target_has_atomic = "64",
428 not(any(target_pointer_width = "16", target_pointer_width = "32")),
429 ))
430)]
431atomic_int!(AtomicU64, u64);