diff --git a/src/data_race.rs b/src/data_race.rs
index d249d28d03..b8656627e6 100644
--- a/src/data_race.rs
+++ b/src/data_race.rs
@@ -73,9 +73,9 @@ use rustc_middle::{mir, ty::layout::TyAndLayout};
 use rustc_target::abi::Size;
 
 use crate::{
-    AllocId, AllocRange, ImmTy, Immediate, InterpResult, MPlaceTy, MemPlaceMeta, MemoryKind,
-    MiriEvalContext, MiriEvalContextExt, MiriMemoryKind, OpTy, Pointer, RangeMap, Scalar,
-    ScalarMaybeUninit, Tag, ThreadId, VClock, VTimestamp, VectorIdx,
+    AllocId, AllocRange, HelpersEvalContextExt, ImmTy, Immediate, InterpResult, MPlaceTy,
+    MemoryKind, MiriEvalContext, MiriEvalContextExt, MiriMemoryKind, OpTy, Pointer, RangeMap,
+    Scalar, ScalarMaybeUninit, Tag, ThreadId, VClock, VTimestamp, VectorIdx,
 };
 
 pub type AllocExtra = VClockAlloc;
@@ -450,12 +450,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
         atomic: AtomicReadOp,
     ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
         let this = self.eval_context_ref();
-        let op_place = this.deref_operand(op)?;
-        let offset = Size::from_bytes(offset);
-
-        // Ensure that the following read at an offset is within bounds.
-        assert!(op_place.layout.size >= offset + layout.size);
-        let value_place = op_place.offset(offset, MemPlaceMeta::None, layout, this)?;
+        let value_place = this.deref_operand_and_offset(op, offset, layout)?;
         this.read_scalar_atomic(&value_place, atomic)
     }
 
@@ -469,12 +464,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
         atomic: AtomicWriteOp,
     ) -> InterpResult<'tcx> {
         let this = self.eval_context_mut();
-        let op_place = this.deref_operand(op)?;
-        let offset = Size::from_bytes(offset);
-
-        // Ensure that the following read at an offset is within bounds.
-        assert!(op_place.layout.size >= offset + layout.size);
-        let value_place = op_place.offset(offset, MemPlaceMeta::None, layout, this)?;
+        let value_place = this.deref_operand_and_offset(op, offset, layout)?;
         this.write_scalar_atomic(value.into(), &value_place, atomic)
     }
 
diff --git a/src/helpers.rs b/src/helpers.rs
index 5b820218a9..ba5ebd3026 100644
--- a/src/helpers.rs
+++ b/src/helpers.rs
@@ -597,18 +597,31 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
         }
     }
 
-    fn read_scalar_at_offset(
+    /// Calculates the MPlaceTy given the offset and layout of an access on an operand
+    fn deref_operand_and_offset(
         &self,
         op: &OpTy<'tcx, Tag>,
         offset: u64,
         layout: TyAndLayout<'tcx>,
-    ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
+    ) -> InterpResult<'tcx, MPlaceTy<'tcx, Tag>> {
         let this = self.eval_context_ref();
         let op_place = this.deref_operand(op)?;
         let offset = Size::from_bytes(offset);
-        // Ensure that the following read at an offset is within bounds
+
+        // Ensure that the access is within bounds.
         assert!(op_place.layout.size >= offset + layout.size);
         let value_place = op_place.offset(offset, MemPlaceMeta::None, layout, this)?;
+        Ok(value_place)
+    }
+
+    fn read_scalar_at_offset(
+        &self,
+        op: &OpTy<'tcx, Tag>,
+        offset: u64,
+        layout: TyAndLayout<'tcx>,
+    ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
+        let this = self.eval_context_ref();
+        let value_place = this.deref_operand_and_offset(op, offset, layout)?;
         this.read_scalar(&value_place.into())
     }
 
@@ -620,11 +633,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
         layout: TyAndLayout<'tcx>,
     ) -> InterpResult<'tcx, ()> {
         let this = self.eval_context_mut();
-        let op_place = this.deref_operand(op)?;
-        let offset = Size::from_bytes(offset);
-        // Ensure that the following read at an offset is within bounds
-        assert!(op_place.layout.size >= offset + layout.size);
-        let value_place = op_place.offset(offset, MemPlaceMeta::None, layout, this)?;
+        let value_place = this.deref_operand_and_offset(op, offset, layout)?;
         this.write_scalar(value, &value_place.into())
     }
 
diff --git a/src/shims/posix/sync.rs b/src/shims/posix/sync.rs
index ea940df1c6..56d4969847 100644
--- a/src/shims/posix/sync.rs
+++ b/src/shims/posix/sync.rs
@@ -112,16 +112,28 @@ fn mutex_get_or_create_id<'mir, 'tcx: 'mir>(
     ecx: &mut MiriEvalContext<'mir, 'tcx>,
     mutex_op: &OpTy<'tcx, Tag>,
 ) -> InterpResult<'tcx, MutexId> {
-    let id = mutex_get_id(ecx, mutex_op)?.to_u32()?;
-    if id == 0 {
-        // 0 is a default value and also not a valid mutex id. Need to allocate
-        // a new mutex.
-        let id = ecx.mutex_create();
-        mutex_set_id(ecx, mutex_op, id.to_u32_scalar())?;
-        Ok(id)
-    } else {
-        Ok(MutexId::from_u32(id))
-    }
+    let value_place = ecx.deref_operand_and_offset(mutex_op, 4, ecx.machine.layouts.u32)?;
+
+    ecx.mutex_get_or_create(|ecx, next_id| {
+        let (old, success) = ecx
+            .atomic_compare_exchange_scalar(
+                &value_place,
+                &ImmTy::from_uint(0u32, ecx.machine.layouts.u32),
+                next_id.to_u32_scalar().into(),
+                AtomicRwOp::Relaxed,
+                AtomicReadOp::Relaxed,
+                false,
+            )?
+            .to_scalar_pair()
+            .expect("compare_exchange returns a scalar pair");
+
+        Ok(if success.to_bool().expect("compare_exchange's second return value is a bool") {
+            // Caller of the closure needs to allocate next_id
+            None
+        } else {
+            Some(MutexId::from_u32(old.to_u32().expect("layout is u32")))
+        })
+    })
 }
 
 // pthread_rwlock_t is between 32 and 56 bytes, depending on the platform.
@@ -156,16 +168,28 @@ fn rwlock_get_or_create_id<'mir, 'tcx: 'mir>(
     ecx: &mut MiriEvalContext<'mir, 'tcx>,
     rwlock_op: &OpTy<'tcx, Tag>,
 ) -> InterpResult<'tcx, RwLockId> {
-    let id = rwlock_get_id(ecx, rwlock_op)?.to_u32()?;
-    if id == 0 {
-        // 0 is a default value and also not a valid rwlock id. Need to allocate
-        // a new read-write lock.
-        let id = ecx.rwlock_create();
-        rwlock_set_id(ecx, rwlock_op, id.to_u32_scalar())?;
-        Ok(id)
-    } else {
-        Ok(RwLockId::from_u32(id))
-    }
+    let value_place = ecx.deref_operand_and_offset(rwlock_op, 4, ecx.machine.layouts.u32)?;
+
+    ecx.rwlock_get_or_create(|ecx, next_id| {
+        let (old, success) = ecx
+            .atomic_compare_exchange_scalar(
+                &value_place,
+                &ImmTy::from_uint(0u32, ecx.machine.layouts.u32),
+                next_id.to_u32_scalar().into(),
+                AtomicRwOp::Relaxed,
+                AtomicReadOp::Relaxed,
+                false,
+            )?
+            .to_scalar_pair()
+            .expect("compare_exchange returns a scalar pair");
+
+        Ok(if success.to_bool().expect("compare_exchange's second return value is a bool") {
+            // Caller of the closure needs to allocate next_id
+            None
+        } else {
+            Some(RwLockId::from_u32(old.to_u32().expect("layout is u32")))
+        })
+    })
 }
 
 // pthread_condattr_t
@@ -228,16 +252,28 @@ fn cond_get_or_create_id<'mir, 'tcx: 'mir>(
     ecx: &mut MiriEvalContext<'mir, 'tcx>,
     cond_op: &OpTy<'tcx, Tag>,
 ) -> InterpResult<'tcx, CondvarId> {
-    let id = cond_get_id(ecx, cond_op)?.to_u32()?;
-    if id == 0 {
-        // 0 is a default value and also not a valid conditional variable id.
-        // Need to allocate a new id.
-        let id = ecx.condvar_create();
-        cond_set_id(ecx, cond_op, id.to_u32_scalar())?;
-        Ok(id)
-    } else {
-        Ok(CondvarId::from_u32(id))
-    }
+    let value_place = ecx.deref_operand_and_offset(cond_op, 4, ecx.machine.layouts.u32)?;
+
+    ecx.condvar_get_or_create(|ecx, next_id| {
+        let (old, success) = ecx
+            .atomic_compare_exchange_scalar(
+                &value_place,
+                &ImmTy::from_uint(0u32, ecx.machine.layouts.u32),
+                next_id.to_u32_scalar().into(),
+                AtomicRwOp::Relaxed,
+                AtomicReadOp::Relaxed,
+                false,
+            )?
+            .to_scalar_pair()
+            .expect("compare_exchange returns a scalar pair");
+
+        Ok(if success.to_bool().expect("compare_exchange's second return value is a bool") {
+            // Caller of the closure needs to allocate next_id
+            None
+        } else {
+            Some(CondvarId::from_u32(old.to_u32().expect("layout is u32")))
+        })
+    })
 }
 
 fn cond_get_clock_id<'mir, 'tcx: 'mir>(
diff --git a/src/shims/windows/sync.rs b/src/shims/windows/sync.rs
index 78458dc6c9..6a6b2269e6 100644
--- a/src/shims/windows/sync.rs
+++ b/src/shims/windows/sync.rs
@@ -7,16 +7,28 @@ fn srwlock_get_or_create_id<'mir, 'tcx: 'mir>(
     ecx: &mut MiriEvalContext<'mir, 'tcx>,
     lock_op: &OpTy<'tcx, Tag>,
 ) -> InterpResult<'tcx, RwLockId> {
-    let id = ecx.read_scalar_at_offset(lock_op, 0, ecx.machine.layouts.u32)?.to_u32()?;
-    if id == 0 {
-        // 0 is a default value and also not a valid rwlock id. Need to allocate
-        // a new rwlock.
-        let id = ecx.rwlock_create();
-        ecx.write_scalar_at_offset(lock_op, 0, id.to_u32_scalar(), ecx.machine.layouts.u32)?;
-        Ok(id)
-    } else {
-        Ok(RwLockId::from_u32(id))
-    }
+    let value_place = ecx.deref_operand_and_offset(lock_op, 0, ecx.machine.layouts.u32)?;
+
+    ecx.rwlock_get_or_create(|ecx, next_id| {
+        let (old, success) = ecx
+            .atomic_compare_exchange_scalar(
+                &value_place,
+                &ImmTy::from_uint(0u32, ecx.machine.layouts.u32),
+                next_id.to_u32_scalar().into(),
+                AtomicRwOp::Relaxed,
+                AtomicReadOp::Relaxed,
+                false,
+            )?
+            .to_scalar_pair()
+            .expect("compare_exchange returns a scalar pair");
+
+        Ok(if success.to_bool().expect("compare_exchange's second return value is a bool") {
+            // Caller of the closure needs to allocate next_id
+            None
+        } else {
+            Some(RwLockId::from_u32(old.to_u32().expect("layout is u32")))
+        })
+    })
 }
 
 impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
diff --git a/src/sync.rs b/src/sync.rs
index ac1687a22e..0eebe4f654 100644
--- a/src/sync.rs
+++ b/src/sync.rs
@@ -215,6 +215,24 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
         this.machine.threads.sync.mutexes.push(Default::default())
     }
 
+    #[inline]
+    /// Provides the closure with the next MutexId. Creates that mutex if the closure returns None,
+    /// otherwise returns the value from the closure
+    fn mutex_get_or_create<F>(&mut self, existing: F) -> InterpResult<'tcx, MutexId>
+    where
+        F: FnOnce(&mut MiriEvalContext<'mir, 'tcx>, MutexId) -> InterpResult<'tcx, Option<MutexId>>,
+    {
+        let this = self.eval_context_mut();
+        let next_index = this.machine.threads.sync.mutexes.next_index();
+        if let Some(old) = existing(this, next_index)? {
+            Ok(old)
+        } else {
+            let new_index = this.machine.threads.sync.mutexes.push(Default::default());
+            assert_eq!(next_index, new_index);
+            Ok(new_index)
+        }
+    }
+
     #[inline]
     /// Get the id of the thread that currently owns this lock.
     fn mutex_get_owner(&mut self, id: MutexId) -> ThreadId {
@@ -297,6 +315,27 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
         this.machine.threads.sync.rwlocks.push(Default::default())
     }
 
+    #[inline]
+    /// Provides the closure with the next RwLockId. Creates that RwLock if the closure returns None,
+    /// otherwise returns the value from the closure
+    fn rwlock_get_or_create<F>(&mut self, existing: F) -> InterpResult<'tcx, RwLockId>
+    where
+        F: FnOnce(
+            &mut MiriEvalContext<'mir, 'tcx>,
+            RwLockId,
+        ) -> InterpResult<'tcx, Option<RwLockId>>,
+    {
+        let this = self.eval_context_mut();
+        let next_index = this.machine.threads.sync.rwlocks.next_index();
+        if let Some(old) = existing(this, next_index)? {
+            Ok(old)
+        } else {
+            let new_index = this.machine.threads.sync.rwlocks.push(Default::default());
+            assert_eq!(next_index, new_index);
+            Ok(new_index)
+        }
+    }
+
     #[inline]
     /// Check if locked.
     fn rwlock_is_locked(&self, id: RwLockId) -> bool {
@@ -445,6 +484,27 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
         this.machine.threads.sync.condvars.push(Default::default())
     }
 
+    #[inline]
+    /// Provides the closure with the next CondvarId. Creates that Condvar if the closure returns None,
+    /// otherwise returns the value from the closure
+    fn condvar_get_or_create<F>(&mut self, existing: F) -> InterpResult<'tcx, CondvarId>
+    where
+        F: FnOnce(
+            &mut MiriEvalContext<'mir, 'tcx>,
+            CondvarId,
+        ) -> InterpResult<'tcx, Option<CondvarId>>,
+    {
+        let this = self.eval_context_mut();
+        let next_index = this.machine.threads.sync.condvars.next_index();
+        if let Some(old) = existing(this, next_index)? {
+            Ok(old)
+        } else {
+            let new_index = this.machine.threads.sync.condvars.push(Default::default());
+            assert_eq!(next_index, new_index);
+            Ok(new_index)
+        }
+    }
+
     #[inline]
     /// Is the conditional variable awaited?
     fn condvar_is_awaited(&mut self, id: CondvarId) -> bool {