1include!("macros.rs");
25
26#[cfg(not(any(target_feature = "cmpxchg16b", portable_atomic_target_feature = "cmpxchg16b")))]
27#[path = "../fallback/outline_atomics.rs"]
28mod fallback;
29
30#[cfg(not(portable_atomic_no_outline_atomics))]
31#[cfg(not(target_env = "sgx"))]
32#[cfg_attr(
33 not(target_feature = "sse"),
34 cfg(not(any(target_feature = "cmpxchg16b", portable_atomic_target_feature = "cmpxchg16b")))
35)]
36#[path = "../detect/x86_64.rs"]
37mod detect;
38
39#[cfg(not(portable_atomic_no_asm))]
40use core::arch::asm;
41use core::sync::atomic::Ordering;
42
43use crate::utils::{Pair, U128};
44
45macro_rules! debug_assert_cmpxchg16b {
47 () => {
48 #[cfg(not(any(
49 target_feature = "cmpxchg16b",
50 portable_atomic_target_feature = "cmpxchg16b",
51 )))]
52 {
53 debug_assert!(detect::detect().has_cmpxchg16b());
54 }
55 };
56}
57#[cfg(not(any(portable_atomic_no_outline_atomics, target_env = "sgx")))]
58#[cfg(target_feature = "sse")]
59macro_rules! debug_assert_vmovdqa_atomic {
60 () => {{
61 debug_assert_cmpxchg16b!();
62 debug_assert!(detect::detect().has_vmovdqa_atomic());
63 }};
64}
65
66#[cfg(not(any(portable_atomic_no_outline_atomics, target_env = "sgx")))]
67#[cfg(target_feature = "sse")]
68#[cfg(target_pointer_width = "32")]
69macro_rules! ptr_modifier {
70 () => {
71 ":e"
72 };
73}
74#[cfg(not(any(portable_atomic_no_outline_atomics, target_env = "sgx")))]
75#[cfg(target_feature = "sse")]
76#[cfg(target_pointer_width = "64")]
77macro_rules! ptr_modifier {
78 () => {
79 ""
80 };
81}
82
83#[cfg_attr(
91 not(portable_atomic_no_cmpxchg16b_target_feature),
92 target_feature(enable = "cmpxchg16b")
93)]
94#[inline]
95unsafe fn cmpxchg16b(dst: *mut u128, old: u128, new: u128) -> (u128, bool) {
96 debug_assert!(dst as usize % 16 == 0);
97 debug_assert_cmpxchg16b!();
98
99 unsafe {
112 let r: u8;
114 let old = U128 { whole: old };
115 let new = U128 { whole: new };
116 let (prev_lo, prev_hi);
117 macro_rules! cmpxchg16b {
118 ($rdi:tt) => {
119 asm!(
120 "xchg {rbx_tmp}, rbx", concat!("lock cmpxchg16b xmmword ptr [", $rdi, "]"),
122 "sete cl",
123 "mov rbx, {rbx_tmp}", rbx_tmp = inout(reg) new.pair.lo => _,
125 in("rcx") new.pair.hi,
126 inout("rax") old.pair.lo => prev_lo,
127 inout("rdx") old.pair.hi => prev_hi,
128 in($rdi) dst,
129 lateout("cl") r,
130 options(nostack),
132 )
133 };
134 }
135 #[cfg(target_pointer_width = "32")]
136 cmpxchg16b!("edi");
137 #[cfg(target_pointer_width = "64")]
138 cmpxchg16b!("rdi");
139 crate::utils::assert_unchecked(r == 0 || r == 1); (U128 { pair: Pair { lo: prev_lo, hi: prev_hi } }.whole, r != 0)
141 }
142}
143
144#[cfg(not(any(portable_atomic_no_outline_atomics, target_env = "sgx")))]
155#[cfg(target_feature = "sse")]
156#[target_feature(enable = "avx")]
157#[inline]
158unsafe fn atomic_load_vmovdqa(src: *mut u128) -> u128 {
159 debug_assert!(src as usize % 16 == 0);
160 debug_assert_vmovdqa_atomic!();
161
162 unsafe {
166 let out: core::arch::x86_64::__m128i;
167 asm!(
168 concat!("vmovdqa {out}, xmmword ptr [{src", ptr_modifier!(), "}]"),
169 src = in(reg) src,
170 out = out(xmm_reg) out,
171 options(nostack, preserves_flags),
172 );
173 core::mem::transmute(out)
174 }
175}
176#[cfg(not(any(portable_atomic_no_outline_atomics, target_env = "sgx")))]
177#[cfg(target_feature = "sse")]
178#[target_feature(enable = "avx")]
179#[inline]
180unsafe fn atomic_store_vmovdqa(dst: *mut u128, val: u128, order: Ordering) {
181 debug_assert!(dst as usize % 16 == 0);
182 debug_assert_vmovdqa_atomic!();
183
184 unsafe {
186 let val: core::arch::x86_64::__m128i = core::mem::transmute(val);
187 match order {
188 Ordering::Relaxed | Ordering::Release => {
190 asm!(
191 concat!("vmovdqa xmmword ptr [{dst", ptr_modifier!(), "}], {val}"),
192 dst = in(reg) dst,
193 val = in(xmm_reg) val,
194 options(nostack, preserves_flags),
195 );
196 }
197 Ordering::SeqCst => {
198 let p = core::cell::UnsafeCell::new(core::mem::MaybeUninit::<u64>::uninit());
199 asm!(
200 concat!("vmovdqa xmmword ptr [{dst", ptr_modifier!(), "}], {val}"),
201 concat!("xchg qword ptr [{p", ptr_modifier!(), "}], {tmp}"),
208 dst = in(reg) dst,
209 val = in(xmm_reg) val,
210 p = inout(reg) p.get() => _,
211 tmp = lateout(reg) _,
212 options(nostack, preserves_flags),
213 );
214 }
215 _ => unreachable!(),
216 }
217 }
218}
219
220#[cfg(not(all(
221 any(target_feature = "cmpxchg16b", portable_atomic_target_feature = "cmpxchg16b"),
222 any(portable_atomic_no_outline_atomics, target_env = "sgx", not(target_feature = "sse")),
223)))]
224macro_rules! load_store_detect {
225 (
226 vmovdqa = $vmovdqa:ident
227 cmpxchg16b = $cmpxchg16b:ident
228 fallback = $fallback:ident
229 ) => {{
230 let cpuid = detect::detect();
231 #[cfg(not(any(
232 target_feature = "cmpxchg16b",
233 portable_atomic_target_feature = "cmpxchg16b",
234 )))]
235 {
236 if cpuid.has_cmpxchg16b() {
238 #[cfg(target_feature = "sse")]
240 {
241 if cpuid.has_vmovdqa_atomic() { $vmovdqa } else { $cmpxchg16b }
242 }
243 #[cfg(not(target_feature = "sse"))]
244 {
245 $cmpxchg16b
246 }
247 } else {
248 fallback::$fallback
249 }
250 }
251 #[cfg(any(target_feature = "cmpxchg16b", portable_atomic_target_feature = "cmpxchg16b"))]
252 {
253 if cpuid.has_vmovdqa_atomic() { $vmovdqa } else { $cmpxchg16b }
254 }
255 }};
256}
257
258#[inline]
259unsafe fn atomic_load(src: *mut u128, _order: Ordering) -> u128 {
260 #[cfg(all(
263 any(target_feature = "cmpxchg16b", portable_atomic_target_feature = "cmpxchg16b"),
264 any(portable_atomic_no_outline_atomics, target_env = "sgx", not(target_feature = "sse")),
265 ))]
266 unsafe {
269 atomic_load_cmpxchg16b(src)
271 }
272 #[cfg(not(all(
273 any(target_feature = "cmpxchg16b", portable_atomic_target_feature = "cmpxchg16b"),
274 any(portable_atomic_no_outline_atomics, target_env = "sgx", not(target_feature = "sse")),
275 )))]
276 unsafe {
278 ifunc!(unsafe fn(src: *mut u128) -> u128 {
279 load_store_detect! {
280 vmovdqa = atomic_load_vmovdqa
281 cmpxchg16b = atomic_load_cmpxchg16b
282 fallback = atomic_load_seqcst
284 }
285 })
286 }
287}
288#[cfg_attr(
290 not(portable_atomic_no_cmpxchg16b_target_feature),
291 target_feature(enable = "cmpxchg16b")
292)]
293#[inline]
294unsafe fn atomic_load_cmpxchg16b(src: *mut u128) -> u128 {
295 debug_assert!(src as usize % 16 == 0);
296 debug_assert_cmpxchg16b!();
297
298 unsafe {
307 let (out_lo, out_hi);
309 macro_rules! cmpxchg16b {
310 ($rdi:tt) => {
311 asm!(
312 "mov {rbx_tmp}, rbx", "xor rbx, rbx", concat!("lock cmpxchg16b xmmword ptr [", $rdi, "]"),
315 "mov rbx, {rbx_tmp}", rbx_tmp = out(reg) _,
318 in("rcx") 0_u64,
319 inout("rax") 0_u64 => out_lo,
320 inout("rdx") 0_u64 => out_hi,
321 in($rdi) src,
322 options(nostack),
324 )
325 };
326 }
327 #[cfg(target_pointer_width = "32")]
328 cmpxchg16b!("edi");
329 #[cfg(target_pointer_width = "64")]
330 cmpxchg16b!("rdi");
331 U128 { pair: Pair { lo: out_lo, hi: out_hi } }.whole
332 }
333}
334
335#[inline]
336unsafe fn atomic_store(dst: *mut u128, val: u128, order: Ordering) {
337 #[cfg(all(
340 any(target_feature = "cmpxchg16b", portable_atomic_target_feature = "cmpxchg16b"),
341 any(portable_atomic_no_outline_atomics, target_env = "sgx", not(target_feature = "sse")),
342 ))]
343 unsafe {
346 let _ = order;
348 atomic_store_cmpxchg16b(dst, val);
349 }
350 #[cfg(not(all(
351 any(target_feature = "cmpxchg16b", portable_atomic_target_feature = "cmpxchg16b"),
352 any(portable_atomic_no_outline_atomics, target_env = "sgx", not(target_feature = "sse")),
353 )))]
354 unsafe {
356 #[cfg(target_feature = "sse")]
357 fn_alias! {
358 #[target_feature(enable = "avx")]
359 unsafe fn(dst: *mut u128, val: u128);
360 atomic_store_vmovdqa_non_seqcst = atomic_store_vmovdqa(Ordering::Release);
362 atomic_store_vmovdqa_seqcst = atomic_store_vmovdqa(Ordering::SeqCst);
363 }
364 match order {
365 Ordering::Relaxed | Ordering::Release => {
369 ifunc!(unsafe fn(dst: *mut u128, val: u128) {
370 load_store_detect! {
371 vmovdqa = atomic_store_vmovdqa_non_seqcst
372 cmpxchg16b = atomic_store_cmpxchg16b
373 fallback = atomic_store_non_seqcst
374 }
375 });
376 }
377 Ordering::SeqCst => {
378 ifunc!(unsafe fn(dst: *mut u128, val: u128) {
379 load_store_detect! {
380 vmovdqa = atomic_store_vmovdqa_seqcst
381 cmpxchg16b = atomic_store_cmpxchg16b
382 fallback = atomic_store_seqcst
383 }
384 });
385 }
386 _ => unreachable!(),
387 }
388 }
389}
390#[cfg_attr(
392 not(portable_atomic_no_cmpxchg16b_target_feature),
393 target_feature(enable = "cmpxchg16b")
394)]
395#[inline]
396unsafe fn atomic_store_cmpxchg16b(dst: *mut u128, val: u128) {
397 unsafe {
399 atomic_swap_cmpxchg16b(dst, val, Ordering::SeqCst);
401 }
402}
403
404#[inline]
405unsafe fn atomic_compare_exchange(
406 dst: *mut u128,
407 old: u128,
408 new: u128,
409 _success: Ordering,
410 _failure: Ordering,
411) -> Result<u128, u128> {
412 #[cfg(any(target_feature = "cmpxchg16b", portable_atomic_target_feature = "cmpxchg16b"))]
413 let (prev, ok) = unsafe { cmpxchg16b(dst, old, new) };
417 #[cfg(not(any(target_feature = "cmpxchg16b", portable_atomic_target_feature = "cmpxchg16b")))]
418 let (prev, ok) = unsafe {
421 ifunc!(unsafe fn(dst: *mut u128, old: u128, new: u128) -> (u128, bool) {
422 if detect::detect().has_cmpxchg16b() {
423 cmpxchg16b
424 } else {
425 fallback::atomic_compare_exchange_seqcst
427 }
428 })
429 };
430 if ok { Ok(prev) } else { Err(prev) }
431}
432
433use self::atomic_compare_exchange as atomic_compare_exchange_weak;
435
436#[cfg_attr(
438 not(portable_atomic_no_cmpxchg16b_target_feature),
439 target_feature(enable = "cmpxchg16b")
440)]
441#[inline]
442unsafe fn atomic_swap_cmpxchg16b(dst: *mut u128, val: u128, _order: Ordering) -> u128 {
443 debug_assert!(dst as usize % 16 == 0);
444 debug_assert_cmpxchg16b!();
445
446 unsafe {
457 let val = U128 { whole: val };
459 let (mut prev_lo, mut prev_hi);
460 macro_rules! cmpxchg16b {
461 ($rdi:tt) => {
462 asm!(
463 "xchg {rbx_tmp}, rbx", concat!("mov rax, qword ptr [", $rdi, "]"),
474 concat!("mov rdx, qword ptr [", $rdi, " + 8]"),
475 "2:",
476 concat!("lock cmpxchg16b xmmword ptr [", $rdi, "]"),
477 "jne 2b",
478 "mov rbx, {rbx_tmp}", rbx_tmp = inout(reg) val.pair.lo => _,
480 in("rcx") val.pair.hi,
481 out("rax") prev_lo,
482 out("rdx") prev_hi,
483 in($rdi) dst,
484 options(nostack),
486 )
487 };
488 }
489 #[cfg(target_pointer_width = "32")]
490 cmpxchg16b!("edi");
491 #[cfg(target_pointer_width = "64")]
492 cmpxchg16b!("rdi");
493 U128 { pair: Pair { lo: prev_lo, hi: prev_hi } }.whole
494 }
495}
496
497macro_rules! atomic_rmw_cas_3 {
507 ($name:ident, $($op:tt)*) => {
508 #[cfg_attr(
510 not(portable_atomic_no_cmpxchg16b_target_feature),
511 target_feature(enable = "cmpxchg16b")
512 )]
513 #[inline]
514 unsafe fn $name(dst: *mut u128, val: u128, _order: Ordering) -> u128 {
515 debug_assert!(dst as usize % 16 == 0);
516 debug_assert_cmpxchg16b!();
517 unsafe {
523 let val = U128 { whole: val };
525 let (mut prev_lo, mut prev_hi);
526 macro_rules! cmpxchg16b {
527 ($rdi:tt) => {
528 asm!(
529 "mov {rbx_tmp}, rbx", concat!("mov rax, qword ptr [", $rdi, "]"),
540 concat!("mov rdx, qword ptr [", $rdi, " + 8]"),
541 "2:",
542 $($op)*
543 concat!("lock cmpxchg16b xmmword ptr [", $rdi, "]"),
544 "jne 2b",
545 "mov rbx, {rbx_tmp}", rbx_tmp = out(reg) _,
547 out("rcx") _,
548 out("rax") prev_lo,
549 out("rdx") prev_hi,
550 in($rdi) dst,
551 in("rsi") val.pair.lo,
552 in("r8") val.pair.hi,
553 options(nostack),
555 )
556 };
557 }
558 #[cfg(target_pointer_width = "32")]
559 cmpxchg16b!("edi");
560 #[cfg(target_pointer_width = "64")]
561 cmpxchg16b!("rdi");
562 U128 { pair: Pair { lo: prev_lo, hi: prev_hi } }.whole
563 }
564 }
565 };
566}
567macro_rules! atomic_rmw_cas_2 {
576 ($name:ident, $($op:tt)*) => {
577 #[cfg_attr(
579 not(portable_atomic_no_cmpxchg16b_target_feature),
580 target_feature(enable = "cmpxchg16b")
581 )]
582 #[inline]
583 unsafe fn $name(dst: *mut u128, _order: Ordering) -> u128 {
584 debug_assert!(dst as usize % 16 == 0);
585 debug_assert_cmpxchg16b!();
586 unsafe {
592 let (mut prev_lo, mut prev_hi);
594 macro_rules! cmpxchg16b {
595 ($rdi:tt) => {
596 asm!(
597 "mov {rbx_tmp}, rbx", concat!("mov rax, qword ptr [", $rdi, "]"),
608 concat!("mov rdx, qword ptr [", $rdi, " + 8]"),
609 "2:",
610 $($op)*
611 concat!("lock cmpxchg16b xmmword ptr [", $rdi, "]"),
612 "jne 2b",
613 "mov rbx, {rbx_tmp}", rbx_tmp = out(reg) _,
615 out("rcx") _,
616 out("rax") prev_lo,
617 out("rdx") prev_hi,
618 in($rdi) dst,
619 options(nostack),
621 )
622 };
623 }
624 #[cfg(target_pointer_width = "32")]
625 cmpxchg16b!("edi");
626 #[cfg(target_pointer_width = "64")]
627 cmpxchg16b!("rdi");
628 U128 { pair: Pair { lo: prev_lo, hi: prev_hi } }.whole
629 }
630 }
631 };
632}
633
634atomic_rmw_cas_3! {
635 atomic_add_cmpxchg16b,
636 "mov rbx, rax",
637 "add rbx, rsi",
638 "mov rcx, rdx",
639 "adc rcx, r8",
640}
641atomic_rmw_cas_3! {
642 atomic_sub_cmpxchg16b,
643 "mov rbx, rax",
644 "sub rbx, rsi",
645 "mov rcx, rdx",
646 "sbb rcx, r8",
647}
648atomic_rmw_cas_3! {
649 atomic_and_cmpxchg16b,
650 "mov rbx, rax",
651 "and rbx, rsi",
652 "mov rcx, rdx",
653 "and rcx, r8",
654}
655atomic_rmw_cas_3! {
656 atomic_nand_cmpxchg16b,
657 "mov rbx, rax",
658 "and rbx, rsi",
659 "not rbx",
660 "mov rcx, rdx",
661 "and rcx, r8",
662 "not rcx",
663}
664atomic_rmw_cas_3! {
665 atomic_or_cmpxchg16b,
666 "mov rbx, rax",
667 "or rbx, rsi",
668 "mov rcx, rdx",
669 "or rcx, r8",
670}
671atomic_rmw_cas_3! {
672 atomic_xor_cmpxchg16b,
673 "mov rbx, rax",
674 "xor rbx, rsi",
675 "mov rcx, rdx",
676 "xor rcx, r8",
677}
678
679atomic_rmw_cas_2! {
680 atomic_not_cmpxchg16b,
681 "mov rbx, rax",
682 "not rbx",
683 "mov rcx, rdx",
684 "not rcx",
685}
686atomic_rmw_cas_2! {
687 atomic_neg_cmpxchg16b,
688 "mov rbx, rax",
689 "neg rbx",
690 "mov rcx, 0",
691 "sbb rcx, rdx",
692}
693
694atomic_rmw_cas_3! {
695 atomic_max_cmpxchg16b,
696 "cmp rsi, rax",
697 "mov rcx, r8",
698 "sbb rcx, rdx",
699 "mov rcx, r8",
700 "cmovl rcx, rdx",
701 "mov rbx, rsi",
702 "cmovl rbx, rax",
703}
704atomic_rmw_cas_3! {
705 atomic_umax_cmpxchg16b,
706 "cmp rsi, rax",
707 "mov rcx, r8",
708 "sbb rcx, rdx",
709 "mov rcx, r8",
710 "cmovb rcx, rdx",
711 "mov rbx, rsi",
712 "cmovb rbx, rax",
713}
714atomic_rmw_cas_3! {
715 atomic_min_cmpxchg16b,
716 "cmp rsi, rax",
717 "mov rcx, r8",
718 "sbb rcx, rdx",
719 "mov rcx, r8",
720 "cmovge rcx, rdx",
721 "mov rbx, rsi",
722 "cmovge rbx, rax",
723}
724atomic_rmw_cas_3! {
725 atomic_umin_cmpxchg16b,
726 "cmp rsi, rax",
727 "mov rcx, r8",
728 "sbb rcx, rdx",
729 "mov rcx, r8",
730 "cmovae rcx, rdx",
731 "mov rbx, rsi",
732 "cmovae rbx, rax",
733}
734
735macro_rules! select_atomic_rmw {
736 (
737 unsafe fn $name:ident($($arg:tt)*) $(-> $ret_ty:ty)?;
738 cmpxchg16b = $cmpxchg16b_fn:ident;
739 fallback = $seqcst_fallback_fn:ident;
740 ) => {
741 #[cfg(any(target_feature = "cmpxchg16b", portable_atomic_target_feature = "cmpxchg16b"))]
743 use self::$cmpxchg16b_fn as $name;
744 #[cfg(not(any(
746 target_feature = "cmpxchg16b",
747 portable_atomic_target_feature = "cmpxchg16b",
748 )))]
749 #[inline]
750 unsafe fn $name($($arg)*, _order: Ordering) $(-> $ret_ty)? {
751 fn_alias! {
752 #[cfg_attr(
754 not(portable_atomic_no_cmpxchg16b_target_feature),
755 target_feature(enable = "cmpxchg16b")
756 )]
757 unsafe fn($($arg)*) $(-> $ret_ty)?;
758 cmpxchg16b_seqcst_fn = $cmpxchg16b_fn(Ordering::SeqCst);
760 }
761 unsafe {
764 ifunc!(unsafe fn($($arg)*) $(-> $ret_ty)? {
765 if detect::detect().has_cmpxchg16b() {
766 cmpxchg16b_seqcst_fn
767 } else {
768 fallback::$seqcst_fallback_fn
770 }
771 })
772 }
773 }
774 };
775}
776
777select_atomic_rmw! {
778 unsafe fn atomic_swap(dst: *mut u128, val: u128) -> u128;
779 cmpxchg16b = atomic_swap_cmpxchg16b;
780 fallback = atomic_swap_seqcst;
781}
782select_atomic_rmw! {
783 unsafe fn atomic_add(dst: *mut u128, val: u128) -> u128;
784 cmpxchg16b = atomic_add_cmpxchg16b;
785 fallback = atomic_add_seqcst;
786}
787select_atomic_rmw! {
788 unsafe fn atomic_sub(dst: *mut u128, val: u128) -> u128;
789 cmpxchg16b = atomic_sub_cmpxchg16b;
790 fallback = atomic_sub_seqcst;
791}
792select_atomic_rmw! {
793 unsafe fn atomic_and(dst: *mut u128, val: u128) -> u128;
794 cmpxchg16b = atomic_and_cmpxchg16b;
795 fallback = atomic_and_seqcst;
796}
797select_atomic_rmw! {
798 unsafe fn atomic_nand(dst: *mut u128, val: u128) -> u128;
799 cmpxchg16b = atomic_nand_cmpxchg16b;
800 fallback = atomic_nand_seqcst;
801}
802select_atomic_rmw! {
803 unsafe fn atomic_or(dst: *mut u128, val: u128) -> u128;
804 cmpxchg16b = atomic_or_cmpxchg16b;
805 fallback = atomic_or_seqcst;
806}
807select_atomic_rmw! {
808 unsafe fn atomic_xor(dst: *mut u128, val: u128) -> u128;
809 cmpxchg16b = atomic_xor_cmpxchg16b;
810 fallback = atomic_xor_seqcst;
811}
812select_atomic_rmw! {
813 unsafe fn atomic_max(dst: *mut u128, val: u128) -> u128;
814 cmpxchg16b = atomic_max_cmpxchg16b;
815 fallback = atomic_max_seqcst;
816}
817select_atomic_rmw! {
818 unsafe fn atomic_umax(dst: *mut u128, val: u128) -> u128;
819 cmpxchg16b = atomic_umax_cmpxchg16b;
820 fallback = atomic_umax_seqcst;
821}
822select_atomic_rmw! {
823 unsafe fn atomic_min(dst: *mut u128, val: u128) -> u128;
824 cmpxchg16b = atomic_min_cmpxchg16b;
825 fallback = atomic_min_seqcst;
826}
827select_atomic_rmw! {
828 unsafe fn atomic_umin(dst: *mut u128, val: u128) -> u128;
829 cmpxchg16b = atomic_umin_cmpxchg16b;
830 fallback = atomic_umin_seqcst;
831}
832select_atomic_rmw! {
833 unsafe fn atomic_not(dst: *mut u128) -> u128;
834 cmpxchg16b = atomic_not_cmpxchg16b;
835 fallback = atomic_not_seqcst;
836}
837select_atomic_rmw! {
838 unsafe fn atomic_neg(dst: *mut u128) -> u128;
839 cmpxchg16b = atomic_neg_cmpxchg16b;
840 fallback = atomic_neg_seqcst;
841}
842
843#[inline]
844fn is_lock_free() -> bool {
845 #[cfg(any(target_feature = "cmpxchg16b", portable_atomic_target_feature = "cmpxchg16b"))]
846 {
847 true
849 }
850 #[cfg(not(any(target_feature = "cmpxchg16b", portable_atomic_target_feature = "cmpxchg16b")))]
851 {
852 detect::detect().has_cmpxchg16b()
853 }
854}
855const IS_ALWAYS_LOCK_FREE: bool =
856 cfg!(any(target_feature = "cmpxchg16b", portable_atomic_target_feature = "cmpxchg16b"));
857
858atomic128!(AtomicI128, i128, atomic_max, atomic_min);
859atomic128!(AtomicU128, u128, atomic_umax, atomic_umin);
860
861#[allow(clippy::undocumented_unsafe_blocks, clippy::wildcard_imports)]
862#[cfg(test)]
863mod tests {
864 use super::*;
865
866 test_atomic_int!(i128);
867 test_atomic_int!(u128);
868
869 stress_test!(u128);
872}