|
68 | 68 | //! ```
|
69 | 69 |
|
70 | 70 | #![no_std]
|
71 |
| -#![feature(asm_experimental_arch)] |
72 |
| -#![cfg_attr(not(target_arch = "msp430"), feature(core_intrinsics))] |
| 71 | +#![cfg_attr(target_arch = "msp430", feature(asm_experimental_arch))] |
73 | 72 |
|
| 73 | +#[cfg(target_arch = "msp430")] |
74 | 74 | use core::arch::asm;
|
75 | 75 | use core::cell::UnsafeCell;
|
76 | 76 | use core::fmt;
|
@@ -676,42 +676,50 @@ macro_rules! atomic_int {
|
676 | 676 | impl AtomicOperations for $int_type {
|
677 | 677 | #[inline(always)]
|
678 | 678 | unsafe fn atomic_store(dst: *mut Self, val: Self) {
|
679 |
| - ::core::intrinsics::atomic_store(dst, val); |
| 679 | + (*(dst as *const ::core::sync::atomic::$atomic_type)) |
| 680 | + .store(val, ::core::sync::atomic::Ordering::SeqCst); |
680 | 681 | }
|
681 | 682 |
|
682 | 683 | #[inline(always)]
|
683 | 684 | unsafe fn atomic_load(dst: *const Self) -> Self {
|
684 |
| - ::core::intrinsics::atomic_load(dst) |
| 685 | + (*(dst as *const ::core::sync::atomic::$atomic_type)) |
| 686 | + .load(::core::sync::atomic::Ordering::SeqCst) |
685 | 687 | }
|
686 | 688 |
|
687 | 689 | #[inline(always)]
|
688 | 690 | unsafe fn atomic_add(dst: *mut Self, val: Self) {
|
689 |
| - ::core::intrinsics::atomic_xadd(dst, val); |
| 691 | + (*(dst as *const ::core::sync::atomic::$atomic_type)) |
| 692 | + .fetch_add(val, ::core::sync::atomic::Ordering::SeqCst); |
690 | 693 | }
|
691 | 694 |
|
692 | 695 | #[inline(always)]
|
693 | 696 | unsafe fn atomic_sub(dst: *mut Self, val: Self) {
|
694 |
| - ::core::intrinsics::atomic_xsub(dst, val); |
| 697 | + (*(dst as *const ::core::sync::atomic::$atomic_type)) |
| 698 | + .fetch_sub(val, ::core::sync::atomic::Ordering::SeqCst); |
695 | 699 | }
|
696 | 700 |
|
697 | 701 | #[inline(always)]
|
698 | 702 | unsafe fn atomic_and(dst: *mut Self, val: Self) {
|
699 |
| - ::core::intrinsics::atomic_and(dst, val); |
| 703 | + (*(dst as *const ::core::sync::atomic::$atomic_type)) |
| 704 | + .fetch_and(val, ::core::sync::atomic::Ordering::SeqCst); |
700 | 705 | }
|
701 | 706 |
|
702 | 707 | #[inline(always)]
|
703 | 708 | unsafe fn atomic_clear(dst: *mut Self, val: Self) {
|
704 |
| - ::core::intrinsics::atomic_and(dst, !val); |
| 709 | + (*(dst as *const ::core::sync::atomic::$atomic_type)) |
| 710 | + .fetch_and(!val, ::core::sync::atomic::Ordering::SeqCst); |
705 | 711 | }
|
706 | 712 |
|
707 | 713 | #[inline(always)]
|
708 | 714 | unsafe fn atomic_or(dst: *mut Self, val: Self) {
|
709 |
| - ::core::intrinsics::atomic_or(dst, val); |
| 715 | + (*(dst as *const ::core::sync::atomic::$atomic_type)) |
| 716 | + .fetch_or(val, ::core::sync::atomic::Ordering::SeqCst); |
710 | 717 | }
|
711 | 718 |
|
712 | 719 | #[inline(always)]
|
713 | 720 | unsafe fn atomic_xor(dst: *mut Self, val: Self) {
|
714 |
| - ::core::intrinsics::atomic_xor(dst, val); |
| 721 | + (*(dst as *const ::core::sync::atomic::$atomic_type)) |
| 722 | + .fetch_xor(val, ::core::sync::atomic::Ordering::SeqCst); |
715 | 723 | }
|
716 | 724 | }
|
717 | 725 | }
|
|
0 commit comments