diff --git a/src/tools/miri/README.md b/src/tools/miri/README.md
index 948f1ee6c6377..4254b9bb67dba 100644
--- a/src/tools/miri/README.md
+++ b/src/tools/miri/README.md
@@ -295,6 +295,16 @@ up the sysroot.  If you are using `miri` (the Miri driver) directly, see the
 Miri adds its own set of `-Z` flags, which are usually set via the `MIRIFLAGS`
 environment variable. We first document the most relevant and most commonly used flags:
 
+* `-Zmiri-address-reuse-rate=<rate>` changes the probability that a freed *non-stack* allocation
+  will be added to the pool for address reuse, and the probability that a new *non-stack* allocation
+  will be taken from the pool. Stack allocations never get added to or taken from the pool. The
+  default is `0.5`.
+* `-Zmiri-address-reuse-cross-thread-rate=<rate>` changes the probability that an allocation which
+  attempts to reuse a previously freed block of memory will also consider blocks freed by *other
+  threads*. The default is `0.1`, which means by default, in 90% of the cases where an address reuse
+  attempt is made, only addresses from the same thread will be considered. Reusing an address from
+  another thread induces synchronization between those threads, which can mask data races and weak
+  memory bugs.
 * `-Zmiri-compare-exchange-weak-failure-rate=<rate>` changes the failure rate of
   `compare_exchange_weak` operations. The default is `0.8` (so 4 out of 5 weak ops will fail).
   You can change it to any value between `0.0` and `1.0`, where `1.0` means it
diff --git a/src/tools/miri/rust-version b/src/tools/miri/rust-version
index 6ad8fba723c89..a60acf44a401f 100644
--- a/src/tools/miri/rust-version
+++ b/src/tools/miri/rust-version
@@ -1 +1 @@
-23d47dba319331d4418827cfbb8c1af283497d3c
+c8d19a92aa9022eb690899cf6d54fd23cb6877e5
diff --git a/src/tools/miri/src/alloc_addresses/mod.rs b/src/tools/miri/src/alloc_addresses/mod.rs
index b4983656adc10..2bbb34c9a4bf7 100644
--- a/src/tools/miri/src/alloc_addresses/mod.rs
+++ b/src/tools/miri/src/alloc_addresses/mod.rs
@@ -13,8 +13,9 @@ use rustc_data_structures::fx::{FxHashMap, FxHashSet};
 use rustc_span::Span;
 use rustc_target::abi::{Align, HasDataLayout, Size};
 
-use crate::*;
-use reuse_pool::ReusePool;
+use crate::{concurrency::VClock, *};
+
+use self::reuse_pool::ReusePool;
 
 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
 pub enum ProvenanceMode {
@@ -77,7 +78,7 @@ impl GlobalStateInner {
         GlobalStateInner {
             int_to_ptr_map: Vec::default(),
             base_addr: FxHashMap::default(),
-            reuse: ReusePool::new(),
+            reuse: ReusePool::new(config),
             exposed: FxHashSet::default(),
             next_base_addr: stack_addr,
             provenance_mode: config.provenance_mode,
@@ -144,7 +145,7 @@ trait EvalContextExtPriv<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
     fn addr_from_alloc_id(
         &self,
         alloc_id: AllocId,
-        _kind: MemoryKind,
+        memory_kind: MemoryKind,
     ) -> InterpResult<'tcx, u64> {
         let ecx = self.eval_context_ref();
         let mut global_state = ecx.machine.alloc_addresses.borrow_mut();
@@ -163,9 +164,18 @@ trait EvalContextExtPriv<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
                 assert!(!matches!(kind, AllocKind::Dead));
 
                 // This allocation does not have a base address yet, pick or reuse one.
-                let base_addr = if let Some(reuse_addr) =
-                    global_state.reuse.take_addr(&mut *rng, size, align)
-                {
+                let base_addr = if let Some((reuse_addr, clock)) = global_state.reuse.take_addr(
+                    &mut *rng,
+                    size,
+                    align,
+                    memory_kind,
+                    ecx.get_active_thread(),
+                ) {
+                    if let Some(clock) = clock
+                        && let Some(data_race) = &ecx.machine.data_race
+                    {
+                        data_race.acquire_clock(&clock, ecx.get_active_thread());
+                    }
                     reuse_addr
                 } else {
                     // We have to pick a fresh address.
@@ -333,14 +343,11 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
     }
 }
 
-impl GlobalStateInner {
-    pub fn free_alloc_id(
-        &mut self,
-        rng: &mut impl Rng,
-        dead_id: AllocId,
-        size: Size,
-        align: Align,
-    ) {
+impl<'mir, 'tcx> MiriMachine<'mir, 'tcx> {
+    pub fn free_alloc_id(&mut self, dead_id: AllocId, size: Size, align: Align, kind: MemoryKind) {
+        let global_state = self.alloc_addresses.get_mut();
+        let rng = self.rng.get_mut();
+
         // We can *not* remove this from `base_addr`, since the interpreter design requires that we
         // be able to retrieve an AllocId + offset for any memory access *before* we check if the
         // access is valid. Specifically, `ptr_get_alloc` is called on each attempt at a memory
@@ -353,15 +360,25 @@ impl GlobalStateInner {
         // returns a dead allocation.
         // To avoid a linear scan we first look up the address in `base_addr`, and then find it in
         // `int_to_ptr_map`.
-        let addr = *self.base_addr.get(&dead_id).unwrap();
-        let pos = self.int_to_ptr_map.binary_search_by_key(&addr, |(addr, _)| *addr).unwrap();
-        let removed = self.int_to_ptr_map.remove(pos);
+        let addr = *global_state.base_addr.get(&dead_id).unwrap();
+        let pos =
+            global_state.int_to_ptr_map.binary_search_by_key(&addr, |(addr, _)| *addr).unwrap();
+        let removed = global_state.int_to_ptr_map.remove(pos);
         assert_eq!(removed, (addr, dead_id)); // double-check that we removed the right thing
         // We can also remove it from `exposed`, since this allocation can anyway not be returned by
         // `alloc_id_from_addr` any more.
-        self.exposed.remove(&dead_id);
+        global_state.exposed.remove(&dead_id);
         // Also remember this address for future reuse.
-        self.reuse.add_addr(rng, addr, size, align)
+        let thread = self.threads.get_active_thread_id();
+        global_state.reuse.add_addr(rng, addr, size, align, kind, thread, || {
+            if let Some(data_race) = &self.data_race {
+                data_race
+                    .release_clock(thread, self.threads.active_thread_ref().current_span())
+                    .clone()
+            } else {
+                VClock::default()
+            }
+        })
     }
 }
 
diff --git a/src/tools/miri/src/alloc_addresses/reuse_pool.rs b/src/tools/miri/src/alloc_addresses/reuse_pool.rs
index 8374d0ec605a8..77fc9f53f9e37 100644
--- a/src/tools/miri/src/alloc_addresses/reuse_pool.rs
+++ b/src/tools/miri/src/alloc_addresses/reuse_pool.rs
@@ -4,11 +4,9 @@ use rand::Rng;
 
 use rustc_target::abi::{Align, Size};
 
-const MAX_POOL_SIZE: usize = 64;
+use crate::{concurrency::VClock, MemoryKind, MiriConfig, ThreadId};
 
-// Just use fair coins, until we have evidence that other numbers are better.
-const ADDR_REMEMBER_CHANCE: f64 = 0.5;
-const ADDR_TAKE_CHANCE: f64 = 0.5;
+const MAX_POOL_SIZE: usize = 64;
 
 /// The pool strikes a balance between exploring more possible executions and making it more likely
 /// to find bugs. The hypothesis is that bugs are more likely to occur when reuse happens for
@@ -16,20 +14,29 @@ const ADDR_TAKE_CHANCE: f64 = 0.5;
 /// structure. Therefore we only reuse allocations when size and alignment match exactly.
 #[derive(Debug)]
 pub struct ReusePool {
+    address_reuse_rate: f64,
+    address_reuse_cross_thread_rate: f64,
     /// The i-th element in `pool` stores allocations of alignment `2^i`. We store these reusable
-    /// allocations as address-size pairs, the list must be sorted by the size.
+    /// allocations as address-size pairs, the list must be sorted by the size and then the thread ID.
     ///
     /// Each of these maps has at most MAX_POOL_SIZE elements, and since alignment is limited to
     /// less than 64 different possible value, that bounds the overall size of the pool.
-    pool: Vec<Vec<(u64, Size)>>,
+    ///
+    /// We also store the ID and the data-race clock of the thread that donated this pool element,
+    /// to ensure synchronization with the thread that picks up this address.
+    pool: Vec<Vec<(u64, Size, ThreadId, VClock)>>,
 }
 
 impl ReusePool {
-    pub fn new() -> Self {
-        ReusePool { pool: vec![] }
+    pub fn new(config: &MiriConfig) -> Self {
+        ReusePool {
+            address_reuse_rate: config.address_reuse_rate,
+            address_reuse_cross_thread_rate: config.address_reuse_cross_thread_rate,
+            pool: vec![],
+        }
     }
 
-    fn subpool(&mut self, align: Align) -> &mut Vec<(u64, Size)> {
+    fn subpool(&mut self, align: Align) -> &mut Vec<(u64, Size, ThreadId, VClock)> {
         let pool_idx: usize = align.bytes().trailing_zeros().try_into().unwrap();
         if self.pool.len() <= pool_idx {
             self.pool.resize(pool_idx + 1, Vec::new());
@@ -37,40 +44,73 @@ impl ReusePool {
         &mut self.pool[pool_idx]
     }
 
-    pub fn add_addr(&mut self, rng: &mut impl Rng, addr: u64, size: Size, align: Align) {
+    pub fn add_addr(
+        &mut self,
+        rng: &mut impl Rng,
+        addr: u64,
+        size: Size,
+        align: Align,
+        kind: MemoryKind,
+        thread: ThreadId,
+        clock: impl FnOnce() -> VClock,
+    ) {
         // Let's see if we even want to remember this address.
-        if !rng.gen_bool(ADDR_REMEMBER_CHANCE) {
+        // We don't remember stack addresses: there's a lot of them (so the perf impact is big),
+        // and we only want to reuse stack slots within the same thread or else we'll add a lot of
+        // undesired synchronization.
+        if kind == MemoryKind::Stack || !rng.gen_bool(self.address_reuse_rate) {
             return;
         }
+        let clock = clock();
         // Determine the pool to add this to, and where in the pool to put it.
         let subpool = self.subpool(align);
-        let pos = subpool.partition_point(|(_addr, other_size)| *other_size < size);
+        let pos = subpool.partition_point(|(_addr, other_size, other_thread, _)| {
+            (*other_size, *other_thread) < (size, thread)
+        });
         // Make sure the pool does not grow too big.
         if subpool.len() >= MAX_POOL_SIZE {
             // Pool full. Replace existing element, or last one if this would be even bigger.
             let clamped_pos = pos.min(subpool.len() - 1);
-            subpool[clamped_pos] = (addr, size);
+            subpool[clamped_pos] = (addr, size, thread, clock);
             return;
         }
         // Add address to pool, at the right position.
-        subpool.insert(pos, (addr, size));
+        subpool.insert(pos, (addr, size, thread, clock));
     }
 
-    pub fn take_addr(&mut self, rng: &mut impl Rng, size: Size, align: Align) -> Option<u64> {
-        // Determine whether we'll even attempt a reuse.
-        if !rng.gen_bool(ADDR_TAKE_CHANCE) {
+    /// Returns the address to use and optionally a clock we have to synchronize with.
+    pub fn take_addr(
+        &mut self,
+        rng: &mut impl Rng,
+        size: Size,
+        align: Align,
+        kind: MemoryKind,
+        thread: ThreadId,
+    ) -> Option<(u64, Option<VClock>)> {
+        // Determine whether we'll even attempt a reuse. As above, we don't do reuse for stack addresses.
+        if kind == MemoryKind::Stack || !rng.gen_bool(self.address_reuse_rate) {
             return None;
         }
+        let cross_thread_reuse = rng.gen_bool(self.address_reuse_cross_thread_rate);
         // Determine the pool to take this from.
         let subpool = self.subpool(align);
         // Let's see if we can find something of the right size. We want to find the full range of
-        // such items, beginning with the first, so we can't use `binary_search_by_key`.
-        let begin = subpool.partition_point(|(_addr, other_size)| *other_size < size);
+        // such items, beginning with the first, so we can't use `binary_search_by_key`. If we do
+        // *not* want to consider other thread's allocations, we effectively use the lexicographic
+        // order on `(size, thread)`.
+        let begin = subpool.partition_point(|(_addr, other_size, other_thread, _)| {
+            *other_size < size
+                || (*other_size == size && !cross_thread_reuse && *other_thread < thread)
+        });
         let mut end = begin;
-        while let Some((_addr, other_size)) = subpool.get(end) {
+        while let Some((_addr, other_size, other_thread, _)) = subpool.get(end) {
             if *other_size != size {
                 break;
             }
+            if !cross_thread_reuse && *other_thread != thread {
+                // We entered the allocations of another thread.
+                break;
+            }
             end += 1;
         }
         if end == begin {
@@ -80,8 +120,10 @@ impl ReusePool {
         // Pick a random element with the desired size.
         let idx = rng.gen_range(begin..end);
         // Remove it from the pool and return.
-        let (chosen_addr, chosen_size) = subpool.remove(idx);
+        let (chosen_addr, chosen_size, chosen_thread, clock) = subpool.remove(idx);
         debug_assert!(chosen_size >= size && chosen_addr % align.bytes() == 0);
-        Some(chosen_addr)
+        debug_assert!(cross_thread_reuse || chosen_thread == thread);
+        // No synchronization needed if we reused from the current thread.
+        Some((chosen_addr, if chosen_thread == thread { None } else { Some(clock) }))
     }
 }
diff --git a/src/tools/miri/src/bin/miri.rs b/src/tools/miri/src/bin/miri.rs
index 3f7a965e9df7d..db2cd01ce0bf6 100644
--- a/src/tools/miri/src/bin/miri.rs
+++ b/src/tools/miri/src/bin/miri.rs
@@ -307,6 +307,15 @@ fn parse_comma_list<T: FromStr>(input: &str) -> Result<Vec<T>, T::Err> {
     input.split(',').map(str::parse::<T>).collect()
 }
 
+/// Parses the input as a float in the range from 0.0 to 1.0 (inclusive).
+fn parse_rate(input: &str) -> Result<f64, &'static str> {
+    match input.parse::<f64>() {
+        Ok(rate) if rate >= 0.0 && rate <= 1.0 => Ok(rate),
+        Ok(_) => Err("must be between `0.0` and `1.0`"),
+        Err(_) => Err("requires a `f64` between `0.0` and `1.0`"),
+    }
+}
+
 #[cfg(any(target_os = "linux", target_os = "macos"))]
 fn jemalloc_magic() {
     // These magic runes are copied from
@@ -499,14 +508,9 @@ fn main() {
         } else if let Some(param) = arg.strip_prefix("-Zmiri-env-forward=") {
             miri_config.forwarded_env_vars.push(param.to_owned());
         } else if let Some(param) = arg.strip_prefix("-Zmiri-track-pointer-tag=") {
-            let ids: Vec<u64> = match parse_comma_list(param) {
-                Ok(ids) => ids,
-                Err(err) =>
-                    show_error!(
-                        "-Zmiri-track-pointer-tag requires a comma separated list of valid `u64` arguments: {}",
-                        err
-                    ),
-            };
+            let ids: Vec<u64> = parse_comma_list(param).unwrap_or_else(|err| {
+                show_error!("-Zmiri-track-pointer-tag requires a comma separated list of valid `u64` arguments: {err}")
+            });
             for id in ids.into_iter().map(miri::BorTag::new) {
                 if let Some(id) = id {
                     miri_config.tracked_pointer_tags.insert(id);
@@ -515,14 +519,9 @@ fn main() {
                 }
             }
         } else if let Some(param) = arg.strip_prefix("-Zmiri-track-call-id=") {
-            let ids: Vec<u64> = match parse_comma_list(param) {
-                Ok(ids) => ids,
-                Err(err) =>
-                    show_error!(
-                        "-Zmiri-track-call-id requires a comma separated list of valid `u64` arguments: {}",
-                        err
-                    ),
-            };
+            let ids: Vec<u64> = parse_comma_list(param).unwrap_or_else(|err| {
+                show_error!("-Zmiri-track-call-id requires a comma separated list of valid `u64` arguments: {err}")
+            });
             for id in ids.into_iter().map(miri::CallId::new) {
                 if let Some(id) = id {
                     miri_config.tracked_call_ids.insert(id);
@@ -531,56 +530,37 @@ fn main() {
                 }
             }
         } else if let Some(param) = arg.strip_prefix("-Zmiri-track-alloc-id=") {
-            let ids: Vec<miri::AllocId> = match parse_comma_list::<NonZero<u64>>(param) {
-                Ok(ids) => ids.into_iter().map(miri::AllocId).collect(),
-                Err(err) =>
-                    show_error!(
-                        "-Zmiri-track-alloc-id requires a comma separated list of valid non-zero `u64` arguments: {}",
-                        err
-                    ),
-            };
-            miri_config.tracked_alloc_ids.extend(ids);
+            let ids = parse_comma_list::<NonZero<u64>>(param).unwrap_or_else(|err| {
+                show_error!("-Zmiri-track-alloc-id requires a comma separated list of valid non-zero `u64` arguments: {err}")
+            });
+            miri_config.tracked_alloc_ids.extend(ids.into_iter().map(miri::AllocId));
         } else if arg == "-Zmiri-track-alloc-accesses" {
             miri_config.track_alloc_accesses = true;
+        } else if let Some(param) = arg.strip_prefix("-Zmiri-address-reuse-rate=") {
+            miri_config.address_reuse_rate = parse_rate(param)
+                .unwrap_or_else(|err| show_error!("-Zmiri-address-reuse-rate {err}"));
+        } else if let Some(param) = arg.strip_prefix("-Zmiri-address-reuse-cross-thread-rate=") {
+            miri_config.address_reuse_cross_thread_rate = parse_rate(param)
+                .unwrap_or_else(|err| show_error!("-Zmiri-address-reuse-cross-thread-rate {err}"));
         } else if let Some(param) = arg.strip_prefix("-Zmiri-compare-exchange-weak-failure-rate=") {
-            let rate = match param.parse::<f64>() {
-                Ok(rate) if rate >= 0.0 && rate <= 1.0 => rate,
-                Ok(_) =>
-                    show_error!(
-                        "-Zmiri-compare-exchange-weak-failure-rate must be between `0.0` and `1.0`"
-                    ),
-                Err(err) =>
-                    show_error!(
-                        "-Zmiri-compare-exchange-weak-failure-rate requires a `f64` between `0.0` and `1.0`: {}",
-                        err
-                    ),
-            };
-            miri_config.cmpxchg_weak_failure_rate = rate;
+            miri_config.cmpxchg_weak_failure_rate = parse_rate(param).unwrap_or_else(|err| {
+                show_error!("-Zmiri-compare-exchange-weak-failure-rate {err}")
+            });
         } else if let Some(param) = arg.strip_prefix("-Zmiri-preemption-rate=") {
-            let rate = match param.parse::<f64>() {
-                Ok(rate) if rate >= 0.0 && rate <= 1.0 => rate,
-                Ok(_) => show_error!("-Zmiri-preemption-rate must be between `0.0` and `1.0`"),
-                Err(err) =>
-                    show_error!(
-                        "-Zmiri-preemption-rate requires a `f64` between `0.0` and `1.0`: {}",
-                        err
-                    ),
-            };
-            miri_config.preemption_rate = rate;
+            miri_config.preemption_rate =
+                parse_rate(param).unwrap_or_else(|err| show_error!("-Zmiri-preemption-rate {err}"));
         } else if arg == "-Zmiri-report-progress" {
             // This makes it take a few seconds between progress reports on my laptop.
             miri_config.report_progress = Some(1_000_000);
         } else if let Some(param) = arg.strip_prefix("-Zmiri-report-progress=") {
-            let interval = match param.parse::<u32>() {
-                Ok(i) => i,
-                Err(err) => show_error!("-Zmiri-report-progress requires a `u32`: {}", err),
-            };
+            let interval = param.parse::<u32>().unwrap_or_else(|err| {
+                show_error!("-Zmiri-report-progress requires a `u32`: {}", err)
+            });
             miri_config.report_progress = Some(interval);
         } else if let Some(param) = arg.strip_prefix("-Zmiri-provenance-gc=") {
-            let interval = match param.parse::<u32>() {
-                Ok(i) => i,
-                Err(err) => show_error!("-Zmiri-provenance-gc requires a `u32`: {}", err),
-            };
+            let interval = param.parse::<u32>().unwrap_or_else(|err| {
+                show_error!("-Zmiri-provenance-gc requires a `u32`: {}", err)
+            });
             miri_config.gc_interval = interval;
         } else if let Some(param) = arg.strip_prefix("-Zmiri-measureme=") {
             miri_config.measureme_out = Some(param.to_string());
@@ -605,23 +585,20 @@ fn main() {
                 show_error!("-Zmiri-extern-so-file `{}` does not exist", filename);
             }
         } else if let Some(param) = arg.strip_prefix("-Zmiri-num-cpus=") {
-            let num_cpus = match param.parse::<u32>() {
-                Ok(i) => i,
-                Err(err) => show_error!("-Zmiri-num-cpus requires a `u32`: {}", err),
-            };
-
+            let num_cpus = param
+                .parse::<u32>()
+                .unwrap_or_else(|err| show_error!("-Zmiri-num-cpus requires a `u32`: {}", err));
             miri_config.num_cpus = num_cpus;
         } else if let Some(param) = arg.strip_prefix("-Zmiri-force-page-size=") {
-            let page_size = match param.parse::<u64>() {
-                Ok(i) =>
-                    if i.is_power_of_two() {
-                        i * 1024
-                    } else {
-                        show_error!("-Zmiri-force-page-size requires a power of 2: {}", i)
-                    },
-                Err(err) => show_error!("-Zmiri-force-page-size requires a `u64`: {}", err),
+            let page_size = param.parse::<u64>().unwrap_or_else(|err| {
+                show_error!("-Zmiri-force-page-size requires a `u64`: {}", err)
+            });
+            // Convert from kilobytes to bytes.
+            let page_size = if page_size.is_power_of_two() {
+                page_size * 1024
+            } else {
+                show_error!("-Zmiri-force-page-size requires a power of 2: {page_size}");
             };
-
             miri_config.page_size = Some(page_size);
         } else {
             // Forward to rustc.
diff --git a/src/tools/miri/src/borrow_tracker/stacked_borrows/diagnostics.rs b/src/tools/miri/src/borrow_tracker/stacked_borrows/diagnostics.rs
index 6d4a5bd41b1eb..cb677b865311f 100644
--- a/src/tools/miri/src/borrow_tracker/stacked_borrows/diagnostics.rs
+++ b/src/tools/miri/src/borrow_tracker/stacked_borrows/diagnostics.rs
@@ -438,7 +438,7 @@ impl<'history, 'ecx, 'mir, 'tcx> DiagnosticCx<'history, 'ecx, 'mir, 'tcx> {
             .machine
             .threads
             .all_stacks()
-            .flatten()
+            .flat_map(|(_id, stack)| stack)
             .map(|frame| {
                 frame.extra.borrow_tracker.as_ref().expect("we should have borrow tracking data")
             })
diff --git a/src/tools/miri/src/borrow_tracker/tree_borrows/exhaustive.rs b/src/tools/miri/src/borrow_tracker/tree_borrows/exhaustive.rs
index daf3590358fd9..d50a22a91047d 100644
--- a/src/tools/miri/src/borrow_tracker/tree_borrows/exhaustive.rs
+++ b/src/tools/miri/src/borrow_tracker/tree_borrows/exhaustive.rs
@@ -2,7 +2,6 @@
 //! (These are used in Tree Borrows `#[test]`s for thorough verification
 //! of the behavior of the state machine of permissions,
 //! but the contents of this file are extremely generic)
-#![cfg(test)]
 
 pub trait Exhaustive: Sized {
     fn exhaustive() -> Box<dyn Iterator<Item = Self>>;
diff --git a/src/tools/miri/src/concurrency/data_race.rs b/src/tools/miri/src/concurrency/data_race.rs
index 95049b91cbad1..2281609a049d4 100644
--- a/src/tools/miri/src/concurrency/data_race.rs
+++ b/src/tools/miri/src/concurrency/data_race.rs
@@ -547,9 +547,9 @@ impl MemoryCellClocks {
     ) -> Result<(), DataRace> {
         trace!("Unsynchronized read with vectors: {:#?} :: {:#?}", self, thread_clocks);
         if !current_span.is_dummy() {
-            thread_clocks.clock[index].span = current_span;
+            thread_clocks.clock.index_mut(index).span = current_span;
         }
-        thread_clocks.clock[index].set_read_type(read_type);
+        thread_clocks.clock.index_mut(index).set_read_type(read_type);
         if self.write_was_before(&thread_clocks.clock) {
             let race_free = if let Some(atomic) = self.atomic() {
                 // We must be ordered-after all atomic accesses, reads and writes.
@@ -577,7 +577,7 @@ impl MemoryCellClocks {
     ) -> Result<(), DataRace> {
         trace!("Unsynchronized write with vectors: {:#?} :: {:#?}", self, thread_clocks);
         if !current_span.is_dummy() {
-            thread_clocks.clock[index].span = current_span;
+            thread_clocks.clock.index_mut(index).span = current_span;
         }
         if self.write_was_before(&thread_clocks.clock) && self.read <= thread_clocks.clock {
             let race_free = if let Some(atomic) = self.atomic() {
@@ -1701,49 +1701,34 @@ impl GlobalState {
         format!("thread `{thread_name}`")
     }
 
-    /// Acquire a lock, express that the previous call of
-    /// `validate_lock_release` must happen before this.
+    /// Acquire the given clock into the given thread, establishing synchronization with
+    /// the moment when that clock snapshot was taken via `release_clock`.
     /// As this is an acquire operation, the thread timestamp is not
     /// incremented.
-    pub fn validate_lock_acquire(&self, lock: &VClock, thread: ThreadId) {
-        let (_, mut clocks) = self.load_thread_state_mut(thread);
+    pub fn acquire_clock(&self, lock: &VClock, thread: ThreadId) {
+        let (_, mut clocks) = self.thread_state_mut(thread);
         clocks.clock.join(lock);
     }
 
-    /// Release a lock handle, express that this happens-before
-    /// any subsequent calls to `validate_lock_acquire`.
-    /// For normal locks this should be equivalent to `validate_lock_release_shared`
-    /// since an acquire operation should have occurred before, however
-    /// for futex & condvar operations this is not the case and this
-    /// operation must be used.
-    pub fn validate_lock_release(&self, lock: &mut VClock, thread: ThreadId, current_span: Span) {
-        let (index, mut clocks) = self.load_thread_state_mut(thread);
-        lock.clone_from(&clocks.clock);
-        clocks.increment_clock(index, current_span);
-    }
-
-    /// Release a lock handle, express that this happens-before
-    /// any subsequent calls to `validate_lock_acquire` as well
-    /// as any previous calls to this function after any
-    /// `validate_lock_release` calls.
-    /// For normal locks this should be equivalent to `validate_lock_release`.
-    /// This function only exists for joining over the set of concurrent readers
-    /// in a read-write lock and should not be used for anything else.
-    pub fn validate_lock_release_shared(
-        &self,
-        lock: &mut VClock,
-        thread: ThreadId,
-        current_span: Span,
-    ) {
-        let (index, mut clocks) = self.load_thread_state_mut(thread);
-        lock.join(&clocks.clock);
+    /// Returns the `release` clock of the given thread.
+    /// Other threads can acquire this clock in the future to establish synchronization
+    /// with this program point.
+    pub fn release_clock(&self, thread: ThreadId, current_span: Span) -> Ref<'_, VClock> {
+        // We increment the clock each time this happens, to ensure no two releases
+        // can be confused with each other.
+        let (index, mut clocks) = self.thread_state_mut(thread);
         clocks.increment_clock(index, current_span);
+        drop(clocks);
+        // To return a read-only view, we need to release the RefCell
+        // and borrow it again.
+        let (_index, clocks) = self.thread_state(thread);
+        Ref::map(clocks, |c| &c.clock)
     }
 
     /// Load the vector index used by the given thread as well as the set of vector clocks
     /// used by the thread.
     #[inline]
-    fn load_thread_state_mut(&self, thread: ThreadId) -> (VectorIdx, RefMut<'_, ThreadClockSet>) {
+    fn thread_state_mut(&self, thread: ThreadId) -> (VectorIdx, RefMut<'_, ThreadClockSet>) {
         let index = self.thread_info.borrow()[thread]
             .vector_index
             .expect("Loading thread state for thread with no assigned vector");
@@ -1752,6 +1737,18 @@ impl GlobalState {
         (index, clocks)
     }
 
+    /// Load the vector index used by the given thread as well as the set of vector clocks
+    /// used by the thread.
+    #[inline]
+    fn thread_state(&self, thread: ThreadId) -> (VectorIdx, Ref<'_, ThreadClockSet>) {
+        let index = self.thread_info.borrow()[thread]
+            .vector_index
+            .expect("Loading thread state for thread with no assigned vector");
+        let ref_vector = self.vector_clocks.borrow();
+        let clocks = Ref::map(ref_vector, |vec| &vec[index]);
+        (index, clocks)
+    }
+
     /// Load the current vector clock in use and the current set of thread clocks
     /// in use for the vector.
     #[inline]
@@ -1759,10 +1756,7 @@ impl GlobalState {
         &self,
         thread_mgr: &ThreadManager<'_, '_>,
     ) -> (VectorIdx, Ref<'_, ThreadClockSet>) {
-        let index = self.current_index(thread_mgr);
-        let ref_vector = self.vector_clocks.borrow();
-        let clocks = Ref::map(ref_vector, |vec| &vec[index]);
-        (index, clocks)
+        self.thread_state(thread_mgr.get_active_thread_id())
     }
 
     /// Load the current vector clock in use and the current set of thread clocks
@@ -1772,10 +1766,7 @@ impl GlobalState {
         &self,
         thread_mgr: &ThreadManager<'_, '_>,
     ) -> (VectorIdx, RefMut<'_, ThreadClockSet>) {
-        let index = self.current_index(thread_mgr);
-        let ref_vector = self.vector_clocks.borrow_mut();
-        let clocks = RefMut::map(ref_vector, |vec| &mut vec[index]);
-        (index, clocks)
+        self.thread_state_mut(thread_mgr.get_active_thread_id())
     }
 
     /// Return the current thread, should be the same
diff --git a/src/tools/miri/src/concurrency/init_once.rs b/src/tools/miri/src/concurrency/init_once.rs
index 35dcfecbbe334..a01b59c9165b7 100644
--- a/src/tools/miri/src/concurrency/init_once.rs
+++ b/src/tools/miri/src/concurrency/init_once.rs
@@ -41,7 +41,7 @@ pub enum InitOnceStatus {
 pub(super) struct InitOnce<'mir, 'tcx> {
     status: InitOnceStatus,
     waiters: VecDeque<InitOnceWaiter<'mir, 'tcx>>,
-    data_race: VClock,
+    clock: VClock,
 }
 
 impl<'mir, 'tcx> VisitProvenance for InitOnce<'mir, 'tcx> {
@@ -61,10 +61,8 @@ trait EvalContextExtPriv<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
         let current_thread = this.get_active_thread();
 
         if let Some(data_race) = &this.machine.data_race {
-            data_race.validate_lock_acquire(
-                &this.machine.threads.sync.init_onces[id].data_race,
-                current_thread,
-            );
+            data_race
+                .acquire_clock(&this.machine.threads.sync.init_onces[id].clock, current_thread);
         }
     }
 
@@ -77,7 +75,7 @@ trait EvalContextExtPriv<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
         let this = self.eval_context_mut();
         let current_thread = this.get_active_thread();
 
-        this.unblock_thread(waiter.thread);
+        this.unblock_thread(waiter.thread, BlockReason::InitOnce(id));
 
         // Call callback, with the woken-up thread as `current`.
         this.set_active_thread(waiter.thread);
@@ -142,7 +140,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
         let init_once = &mut this.machine.threads.sync.init_onces[id];
         assert_ne!(init_once.status, InitOnceStatus::Complete, "queueing on complete init once");
         init_once.waiters.push_back(InitOnceWaiter { thread, callback });
-        this.block_thread(thread);
+        this.block_thread(thread, BlockReason::InitOnce(id));
     }
 
     /// Begin initializing this InitOnce. Must only be called after checking that it is currently
@@ -176,7 +174,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
 
         // Each complete happens-before the end of the wait
         if let Some(data_race) = &this.machine.data_race {
-            data_race.validate_lock_release(&mut init_once.data_race, current_thread, current_span);
+            init_once.clock.clone_from(&data_race.release_clock(current_thread, current_span));
         }
 
         // Wake up everyone.
@@ -202,7 +200,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
 
         // Each complete happens-before the end of the wait
         if let Some(data_race) = &this.machine.data_race {
-            data_race.validate_lock_release(&mut init_once.data_race, current_thread, current_span);
+            init_once.clock.clone_from(&data_race.release_clock(current_thread, current_span));
         }
 
         // Wake up one waiting thread, so they can go ahead and try to init this.
diff --git a/src/tools/miri/src/concurrency/mod.rs b/src/tools/miri/src/concurrency/mod.rs
index 45903107f1710..15e1a94d6db0e 100644
--- a/src/tools/miri/src/concurrency/mod.rs
+++ b/src/tools/miri/src/concurrency/mod.rs
@@ -6,3 +6,5 @@ pub mod init_once;
 pub mod thread;
 mod vector_clock;
 pub mod weak_memory;
+
+pub use vector_clock::VClock;
diff --git a/src/tools/miri/src/concurrency/sync.rs b/src/tools/miri/src/concurrency/sync.rs
index 956a02ded0f13..d3cef8bf5f32b 100644
--- a/src/tools/miri/src/concurrency/sync.rs
+++ b/src/tools/miri/src/concurrency/sync.rs
@@ -69,12 +69,8 @@ struct Mutex {
     lock_count: usize,
     /// The queue of threads waiting for this mutex.
     queue: VecDeque<ThreadId>,
-    /// Data race handle. This tracks the happens-before
-    /// relationship between each mutex access. It is
-    /// released to during unlock and acquired from during
-    /// locking, and therefore stores the clock of the last
-    /// thread to release this mutex.
-    data_race: VClock,
+    /// Mutex clock. This tracks the moment of the last unlock.
+    clock: VClock,
 }
 
 declare_id!(RwLockId);
@@ -91,7 +87,7 @@ struct RwLock {
     writer_queue: VecDeque<ThreadId>,
     /// The queue of reader threads waiting for this lock.
     reader_queue: VecDeque<ThreadId>,
-    /// Data race handle for writers. Tracks the happens-before
+    /// Data race clock for writers. Tracks the happens-before
     /// ordering between each write access to a rwlock and is updated
     /// after a sequence of concurrent readers to track the happens-
     /// before ordering between the set of previous readers and
@@ -99,8 +95,8 @@ struct RwLock {
     /// Contains the clock of the last thread to release a writer
     /// lock or the joined clock of the set of last threads to release
     /// shared reader locks.
-    data_race: VClock,
-    /// Data race handle for readers. This is temporary storage
+    clock_unlocked: VClock,
+    /// Data race clock for readers. This is temporary storage
     /// for the combined happens-before ordering for between all
     /// concurrent readers and the next writer, and the value
     /// is stored to the main data_race variable once all
@@ -110,30 +106,18 @@ struct RwLock {
     /// add happens-before orderings between shared reader
     /// locks.
     /// This is only relevant when there is an active reader.
-    data_race_reader: VClock,
+    clock_current_readers: VClock,
 }
 
 declare_id!(CondvarId);
 
-#[derive(Debug, Copy, Clone)]
-pub enum RwLockMode {
-    Read,
-    Write,
-}
-
-#[derive(Debug)]
-pub enum CondvarLock {
-    Mutex(MutexId),
-    RwLock { id: RwLockId, mode: RwLockMode },
-}
-
 /// A thread waiting on a conditional variable.
 #[derive(Debug)]
 struct CondvarWaiter {
     /// The thread that is waiting on this variable.
     thread: ThreadId,
-    /// The mutex or rwlock on which the thread is waiting.
-    lock: CondvarLock,
+    /// The mutex on which the thread is waiting.
+    lock: MutexId,
 }
 
 /// The conditional variable state.
@@ -144,8 +128,8 @@ struct Condvar {
     /// between a cond-var signal and a cond-var
     /// wait during a non-spurious signal event.
     /// Contains the clock of the last thread to
-    /// perform a futex-signal.
-    data_race: VClock,
+    /// perform a condvar-signal.
+    clock: VClock,
 }
 
 /// The futex state.
@@ -157,7 +141,7 @@ struct Futex {
     /// during a non-spurious wake event.
     /// Contains the clock of the last thread to
     /// perform a futex-wake.
-    data_race: VClock,
+    clock: VClock,
 }
 
 /// A thread waiting on a futex.
@@ -232,7 +216,7 @@ pub(super) trait EvalContextExtPriv<'mir, 'tcx: 'mir>:
     fn rwlock_dequeue_and_lock_reader(&mut self, id: RwLockId) -> bool {
         let this = self.eval_context_mut();
         if let Some(reader) = this.machine.threads.sync.rwlocks[id].reader_queue.pop_front() {
-            this.unblock_thread(reader);
+            this.unblock_thread(reader, BlockReason::RwLock(id));
             this.rwlock_reader_lock(id, reader);
             true
         } else {
@@ -246,7 +230,7 @@ pub(super) trait EvalContextExtPriv<'mir, 'tcx: 'mir>:
     fn rwlock_dequeue_and_lock_writer(&mut self, id: RwLockId) -> bool {
         let this = self.eval_context_mut();
         if let Some(writer) = this.machine.threads.sync.rwlocks[id].writer_queue.pop_front() {
-            this.unblock_thread(writer);
+            this.unblock_thread(writer, BlockReason::RwLock(id));
             this.rwlock_writer_lock(id, writer);
             true
         } else {
@@ -260,7 +244,7 @@ pub(super) trait EvalContextExtPriv<'mir, 'tcx: 'mir>:
     fn mutex_dequeue_and_lock(&mut self, id: MutexId) -> bool {
         let this = self.eval_context_mut();
         if let Some(thread) = this.machine.threads.sync.mutexes[id].queue.pop_front() {
-            this.unblock_thread(thread);
+            this.unblock_thread(thread, BlockReason::Mutex(id));
             this.mutex_lock(id, thread);
             true
         } else {
@@ -358,7 +342,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
         }
         mutex.lock_count = mutex.lock_count.checked_add(1).unwrap();
         if let Some(data_race) = &this.machine.data_race {
-            data_race.validate_lock_acquire(&mutex.data_race, thread);
+            data_race.acquire_clock(&mutex.clock, thread);
         }
     }
 
@@ -385,11 +369,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
                 // The mutex is completely unlocked. Try transferring ownership
                 // to another thread.
                 if let Some(data_race) = &this.machine.data_race {
-                    data_race.validate_lock_release(
-                        &mut mutex.data_race,
-                        current_owner,
-                        current_span,
-                    );
+                    mutex.clock.clone_from(&data_race.release_clock(current_owner, current_span));
                 }
                 this.mutex_dequeue_and_lock(id);
             }
@@ -406,7 +386,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
         let this = self.eval_context_mut();
         assert!(this.mutex_is_locked(id), "queing on unlocked mutex");
         this.machine.threads.sync.mutexes[id].queue.push_back(thread);
-        this.block_thread(thread);
+        this.block_thread(thread, BlockReason::Mutex(id));
     }
 
     /// Provides the closure with the next RwLockId. Creates that RwLock if the closure returns None,
@@ -460,7 +440,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
         let count = rwlock.readers.entry(reader).or_insert(0);
         *count = count.checked_add(1).expect("the reader counter overflowed");
         if let Some(data_race) = &this.machine.data_race {
-            data_race.validate_lock_acquire(&rwlock.data_race, reader);
+            data_race.acquire_clock(&rwlock.clock_unlocked, reader);
         }
     }
 
@@ -486,20 +466,16 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
         }
         if let Some(data_race) = &this.machine.data_race {
             // Add this to the shared-release clock of all concurrent readers.
-            data_race.validate_lock_release_shared(
-                &mut rwlock.data_race_reader,
-                reader,
-                current_span,
-            );
+            rwlock.clock_current_readers.join(&data_race.release_clock(reader, current_span));
         }
 
         // The thread was a reader. If the lock is not held any more, give it to a writer.
         if this.rwlock_is_locked(id).not() {
             // All the readers are finished, so set the writer data-race handle to the value
-            //  of the union of all reader data race handles, since the set of readers
-            //  happen-before the writers
+            // of the union of all reader data race handles, since the set of readers
+            // happen-before the writers
             let rwlock = &mut this.machine.threads.sync.rwlocks[id];
-            rwlock.data_race.clone_from(&rwlock.data_race_reader);
+            rwlock.clock_unlocked.clone_from(&rwlock.clock_current_readers);
             this.rwlock_dequeue_and_lock_writer(id);
         }
         true
@@ -511,7 +487,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
         let this = self.eval_context_mut();
         assert!(this.rwlock_is_write_locked(id), "read-queueing on not write locked rwlock");
         this.machine.threads.sync.rwlocks[id].reader_queue.push_back(reader);
-        this.block_thread(reader);
+        this.block_thread(reader, BlockReason::RwLock(id));
     }
 
     /// Lock by setting the writer that owns the lock.
@@ -523,7 +499,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
         let rwlock = &mut this.machine.threads.sync.rwlocks[id];
         rwlock.writer = Some(writer);
         if let Some(data_race) = &this.machine.data_race {
-            data_race.validate_lock_acquire(&rwlock.data_race, writer);
+            data_race.acquire_clock(&rwlock.clock_unlocked, writer);
         }
     }
 
@@ -542,11 +518,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
             trace!("rwlock_writer_unlock: {:?} unlocked by {:?}", id, expected_writer);
             // Release memory to next lock holder.
             if let Some(data_race) = &this.machine.data_race {
-                data_race.validate_lock_release(
-                    &mut rwlock.data_race,
-                    current_writer,
-                    current_span,
-                );
+                rwlock
+                    .clock_unlocked
+                    .clone_from(&*data_race.release_clock(current_writer, current_span));
             }
             // The thread was a writer.
             //
@@ -573,7 +547,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
         let this = self.eval_context_mut();
         assert!(this.rwlock_is_locked(id), "write-queueing on unlocked rwlock");
         this.machine.threads.sync.rwlocks[id].writer_queue.push_back(writer);
-        this.block_thread(writer);
+        this.block_thread(writer, BlockReason::RwLock(id));
     }
 
     /// Provides the closure with the next CondvarId. Creates that Condvar if the closure returns None,
@@ -605,7 +579,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
     }
 
     /// Mark that the thread is waiting on the conditional variable.
-    fn condvar_wait(&mut self, id: CondvarId, thread: ThreadId, lock: CondvarLock) {
+    fn condvar_wait(&mut self, id: CondvarId, thread: ThreadId, lock: MutexId) {
         let this = self.eval_context_mut();
         let waiters = &mut this.machine.threads.sync.condvars[id].waiters;
         assert!(waiters.iter().all(|waiter| waiter.thread != thread), "thread is already waiting");
@@ -614,7 +588,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
 
     /// Wake up some thread (if there is any) sleeping on the conditional
     /// variable.
-    fn condvar_signal(&mut self, id: CondvarId) -> Option<(ThreadId, CondvarLock)> {
+    fn condvar_signal(&mut self, id: CondvarId) -> Option<(ThreadId, MutexId)> {
         let this = self.eval_context_mut();
         let current_thread = this.get_active_thread();
         let current_span = this.machine.current_span();
@@ -623,11 +597,11 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
 
         // Each condvar signal happens-before the end of the condvar wake
         if let Some(data_race) = data_race {
-            data_race.validate_lock_release(&mut condvar.data_race, current_thread, current_span);
+            condvar.clock.clone_from(&*data_race.release_clock(current_thread, current_span));
         }
         condvar.waiters.pop_front().map(|waiter| {
             if let Some(data_race) = data_race {
-                data_race.validate_lock_acquire(&condvar.data_race, waiter.thread);
+                data_race.acquire_clock(&condvar.clock, waiter.thread);
             }
             (waiter.thread, waiter.lock)
         })
@@ -657,14 +631,14 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
 
         // Each futex-wake happens-before the end of the futex wait
         if let Some(data_race) = data_race {
-            data_race.validate_lock_release(&mut futex.data_race, current_thread, current_span);
+            futex.clock.clone_from(&*data_race.release_clock(current_thread, current_span));
         }
 
         // Wake up the first thread in the queue that matches any of the bits in the bitset.
         futex.waiters.iter().position(|w| w.bitset & bitset != 0).map(|i| {
             let waiter = futex.waiters.remove(i).unwrap();
             if let Some(data_race) = data_race {
-                data_race.validate_lock_acquire(&futex.data_race, waiter.thread);
+                data_race.acquire_clock(&futex.clock, waiter.thread);
             }
             waiter.thread
         })
diff --git a/src/tools/miri/src/concurrency/thread.rs b/src/tools/miri/src/concurrency/thread.rs
index d1136272f0108..2fabd39a74455 100644
--- a/src/tools/miri/src/concurrency/thread.rs
+++ b/src/tools/miri/src/concurrency/thread.rs
@@ -88,18 +88,33 @@ impl From<ThreadId> for u64 {
     }
 }
 
+/// Keeps track of what the thread is blocked on.
+#[derive(Debug, Copy, Clone, PartialEq, Eq)]
+pub enum BlockReason {
+    /// The thread tried to join the specified thread and is blocked until that
+    /// thread terminates.
+    Join(ThreadId),
+    /// Waiting for time to pass.
+    Sleep,
+    /// Blocked on a mutex.
+    Mutex(MutexId),
+    /// Blocked on a condition variable.
+    Condvar(CondvarId),
+    /// Blocked on a reader-writer lock.
+    RwLock(RwLockId),
+    /// Blocled on a Futex variable.
+    Futex { addr: u64 },
+    /// Blocked on an InitOnce.
+    InitOnce(InitOnceId),
+}
+
 /// The state of a thread.
 #[derive(Debug, Copy, Clone, PartialEq, Eq)]
 pub enum ThreadState {
     /// The thread is enabled and can be executed.
     Enabled,
-    /// The thread tried to join the specified thread and is blocked until that
-    /// thread terminates.
-    BlockedOnJoin(ThreadId),
-    /// The thread is blocked on some synchronization primitive. It is the
-    /// responsibility of the synchronization primitives to track threads that
-    /// are blocked by them.
-    BlockedOnSync,
+    /// The thread is blocked on something.
+    Blocked(BlockReason),
     /// The thread has terminated its execution. We do not delete terminated
     /// threads (FIXME: why?).
     Terminated,
@@ -208,6 +223,12 @@ impl<'mir, 'tcx> Thread<'mir, 'tcx> {
         // empty stacks.
         self.top_user_relevant_frame.or_else(|| self.stack.len().checked_sub(1))
     }
+
+    pub fn current_span(&self) -> Span {
+        self.top_user_relevant_frame()
+            .map(|frame_idx| self.stack[frame_idx].current_span())
+            .unwrap_or(rustc_span::DUMMY_SP)
+    }
 }
 
 impl<'mir, 'tcx> std::fmt::Debug for Thread<'mir, 'tcx> {
@@ -296,17 +317,17 @@ impl VisitProvenance for Frame<'_, '_, Provenance, FrameExtra<'_>> {
 
 /// A specific moment in time.
 #[derive(Debug)]
-pub enum Time {
+pub enum CallbackTime {
     Monotonic(Instant),
     RealTime(SystemTime),
 }
 
-impl Time {
+impl CallbackTime {
     /// How long do we have to wait from now until the specified time?
     fn get_wait_time(&self, clock: &Clock) -> Duration {
         match self {
-            Time::Monotonic(instant) => instant.duration_since(clock.now()),
-            Time::RealTime(time) =>
+            CallbackTime::Monotonic(instant) => instant.duration_since(clock.now()),
+            CallbackTime::RealTime(time) =>
                 time.duration_since(SystemTime::now()).unwrap_or(Duration::new(0, 0)),
         }
     }
@@ -318,7 +339,7 @@ impl Time {
 /// conditional variable, the signal handler deletes the callback.
 struct TimeoutCallbackInfo<'mir, 'tcx> {
     /// The callback should be called no earlier than this time.
-    call_time: Time,
+    call_time: CallbackTime,
     /// The called function.
     callback: TimeoutCallback<'mir, 'tcx>,
 }
@@ -430,11 +451,10 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> {
     ) -> &mut Vec<Frame<'mir, 'tcx, Provenance, FrameExtra<'tcx>>> {
         &mut self.threads[self.active_thread].stack
     }
-
     pub fn all_stacks(
         &self,
-    ) -> impl Iterator<Item = &[Frame<'mir, 'tcx, Provenance, FrameExtra<'tcx>>]> {
-        self.threads.iter().map(|t| &t.stack[..])
+    ) -> impl Iterator<Item = (ThreadId, &[Frame<'mir, 'tcx, Provenance, FrameExtra<'tcx>>])> {
+        self.threads.iter_enumerated().map(|(id, t)| (id, &t.stack[..]))
     }
 
     /// Create a new thread and returns its id.
@@ -539,7 +559,8 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> {
         self.threads[joined_thread_id].join_status = ThreadJoinStatus::Joined;
         if self.threads[joined_thread_id].state != ThreadState::Terminated {
             // The joined thread is still running, we need to wait for it.
-            self.active_thread_mut().state = ThreadState::BlockedOnJoin(joined_thread_id);
+            self.active_thread_mut().state =
+                ThreadState::Blocked(BlockReason::Join(joined_thread_id));
             trace!(
                 "{:?} blocked on {:?} when trying to join",
                 self.active_thread,
@@ -569,10 +590,11 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> {
             throw_ub_format!("trying to join itself");
         }
 
+        // Sanity check `join_status`.
         assert!(
-            self.threads
-                .iter()
-                .all(|thread| thread.state != ThreadState::BlockedOnJoin(joined_thread_id)),
+            self.threads.iter().all(|thread| {
+                thread.state != ThreadState::Blocked(BlockReason::Join(joined_thread_id))
+            }),
             "this thread already has threads waiting for its termination"
         );
 
@@ -594,16 +616,17 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> {
     }
 
     /// Put the thread into the blocked state.
-    fn block_thread(&mut self, thread: ThreadId) {
+    fn block_thread(&mut self, thread: ThreadId, reason: BlockReason) {
         let state = &mut self.threads[thread].state;
         assert_eq!(*state, ThreadState::Enabled);
-        *state = ThreadState::BlockedOnSync;
+        *state = ThreadState::Blocked(reason);
     }
 
     /// Put the blocked thread into the enabled state.
-    fn unblock_thread(&mut self, thread: ThreadId) {
+    /// Sanity-checks that the thread previously was blocked for the right reason.
+    fn unblock_thread(&mut self, thread: ThreadId, reason: BlockReason) {
         let state = &mut self.threads[thread].state;
-        assert_eq!(*state, ThreadState::BlockedOnSync);
+        assert_eq!(*state, ThreadState::Blocked(reason));
         *state = ThreadState::Enabled;
     }
 
@@ -622,7 +645,7 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> {
     fn register_timeout_callback(
         &mut self,
         thread: ThreadId,
-        call_time: Time,
+        call_time: CallbackTime,
         callback: TimeoutCallback<'mir, 'tcx>,
     ) {
         self.timeout_callbacks
@@ -683,7 +706,7 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> {
         // Check if we need to unblock any threads.
         let mut joined_threads = vec![]; // store which threads joined, we'll need it
         for (i, thread) in self.threads.iter_enumerated_mut() {
-            if thread.state == ThreadState::BlockedOnJoin(self.active_thread) {
+            if thread.state == ThreadState::Blocked(BlockReason::Join(self.active_thread)) {
                 // The thread has terminated, mark happens-before edge to joining thread
                 if data_race.is_some() {
                     joined_threads.push(i);
@@ -999,13 +1022,13 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
     }
 
     #[inline]
-    fn block_thread(&mut self, thread: ThreadId) {
-        self.eval_context_mut().machine.threads.block_thread(thread);
+    fn block_thread(&mut self, thread: ThreadId, reason: BlockReason) {
+        self.eval_context_mut().machine.threads.block_thread(thread, reason);
     }
 
     #[inline]
-    fn unblock_thread(&mut self, thread: ThreadId) {
-        self.eval_context_mut().machine.threads.unblock_thread(thread);
+    fn unblock_thread(&mut self, thread: ThreadId, reason: BlockReason) {
+        self.eval_context_mut().machine.threads.unblock_thread(thread, reason);
     }
 
     #[inline]
@@ -1027,11 +1050,11 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
     fn register_timeout_callback(
         &mut self,
         thread: ThreadId,
-        call_time: Time,
+        call_time: CallbackTime,
         callback: TimeoutCallback<'mir, 'tcx>,
     ) {
         let this = self.eval_context_mut();
-        if !this.machine.communicate() && matches!(call_time, Time::RealTime(..)) {
+        if !this.machine.communicate() && matches!(call_time, CallbackTime::RealTime(..)) {
             panic!("cannot have `RealTime` callback with isolation enabled!")
         }
         this.machine.threads.register_timeout_callback(thread, call_time, callback);
diff --git a/src/tools/miri/src/concurrency/vector_clock.rs b/src/tools/miri/src/concurrency/vector_clock.rs
index fe719943dcb65..2cd3d031b1edd 100644
--- a/src/tools/miri/src/concurrency/vector_clock.rs
+++ b/src/tools/miri/src/concurrency/vector_clock.rs
@@ -4,7 +4,7 @@ use smallvec::SmallVec;
 use std::{
     cmp::Ordering,
     fmt::Debug,
-    ops::{Index, IndexMut, Shr},
+    ops::{Index, Shr},
 };
 
 use super::data_race::NaReadType;
@@ -92,7 +92,7 @@ impl VTimestamp {
     }
 
     #[inline]
-    pub fn set_read_type(&mut self, read_type: NaReadType) {
+    pub(super) fn set_read_type(&mut self, read_type: NaReadType) {
         self.time_and_read_type = Self::encode_time_and_read_type(self.time(), read_type);
     }
 
@@ -138,7 +138,7 @@ pub struct VClock(SmallVec<[VTimestamp; SMALL_VECTOR]>);
 impl VClock {
     /// Create a new vector-clock containing all zeros except
     /// for a value at the given index
-    pub fn new_with_index(index: VectorIdx, timestamp: VTimestamp) -> VClock {
+    pub(super) fn new_with_index(index: VectorIdx, timestamp: VTimestamp) -> VClock {
         let len = index.index() + 1;
         let mut vec = smallvec::smallvec![VTimestamp::ZERO; len];
         vec[index.index()] = timestamp;
@@ -147,12 +147,18 @@ impl VClock {
 
     /// Load the internal timestamp slice in the vector clock
     #[inline]
-    pub fn as_slice(&self) -> &[VTimestamp] {
+    pub(super) fn as_slice(&self) -> &[VTimestamp] {
+        debug_assert!(!self.0.last().is_some_and(|t| t.time() == 0));
         self.0.as_slice()
     }
 
+    #[inline]
+    pub(super) fn index_mut(&mut self, index: VectorIdx) -> &mut VTimestamp {
+        self.0.as_mut_slice().get_mut(index.to_u32() as usize).unwrap()
+    }
+
     /// Get a mutable slice to the internal vector with minimum `min_len`
-    /// elements, to preserve invariants this vector must modify
+    /// elements. To preserve invariants, the caller must modify
     /// the `min_len`-1 nth element to a non-zero value
     #[inline]
     fn get_mut_with_min_len(&mut self, min_len: usize) -> &mut [VTimestamp] {
@@ -166,7 +172,7 @@ impl VClock {
     /// Increment the vector clock at a known index
     /// this will panic if the vector index overflows
     #[inline]
-    pub fn increment_index(&mut self, idx: VectorIdx, current_span: Span) {
+    pub(super) fn increment_index(&mut self, idx: VectorIdx, current_span: Span) {
         let idx = idx.index();
         let mut_slice = self.get_mut_with_min_len(idx + 1);
         let idx_ref = &mut mut_slice[idx];
@@ -190,28 +196,36 @@ impl VClock {
         }
     }
 
-    /// Set the element at the current index of the vector
-    pub fn set_at_index(&mut self, other: &Self, idx: VectorIdx) {
+    /// Set the element at the current index of the vector. May only increase elements.
+    pub(super) fn set_at_index(&mut self, other: &Self, idx: VectorIdx) {
+        let new_timestamp = other[idx];
+        // Setting to 0 is different, since the last element cannot be 0.
+        if new_timestamp.time() == 0 {
+            if idx.index() >= self.0.len() {
+                // This index does not even exist yet in our clock. Just do nothing.
+                return;
+            }
+            // This changes an existing element. Since it can only increase, that
+            // can never make the last element 0.
+        }
+
         let mut_slice = self.get_mut_with_min_len(idx.index() + 1);
+        let mut_timestamp = &mut mut_slice[idx.index()];
 
-        let prev_span = mut_slice[idx.index()].span;
+        let prev_span = mut_timestamp.span;
 
-        mut_slice[idx.index()] = other[idx];
+        assert!(*mut_timestamp <= new_timestamp, "set_at_index: may only increase the timestamp");
+        *mut_timestamp = new_timestamp;
 
-        let span = &mut mut_slice[idx.index()].span;
+        let span = &mut mut_timestamp.span;
         *span = span.substitute_dummy(prev_span);
     }
 
     /// Set the vector to the all-zero vector
     #[inline]
-    pub fn set_zero_vector(&mut self) {
+    pub(super) fn set_zero_vector(&mut self) {
         self.0.clear();
     }
-
-    /// Return if this vector is the all-zero vector
-    pub fn is_zero_vector(&self) -> bool {
-        self.0.is_empty()
-    }
 }
 
 impl Clone for VClock {
@@ -407,13 +421,6 @@ impl Index<VectorIdx> for VClock {
     }
 }
 
-impl IndexMut<VectorIdx> for VClock {
-    #[inline]
-    fn index_mut(&mut self, index: VectorIdx) -> &mut VTimestamp {
-        self.0.as_mut_slice().get_mut(index.to_u32() as usize).unwrap()
-    }
-}
-
 /// Test vector clock ordering operations
 ///  data-race detection is tested in the external
 ///  test suite
@@ -553,4 +560,15 @@ mod tests {
             "Invalid alt (>=):\n l: {l:?}\n r: {r:?}"
         );
     }
+
+    #[test]
+    fn set_index_to_0() {
+        let mut clock1 = from_slice(&[0, 1, 2, 3]);
+        let clock2 = from_slice(&[0, 2, 3, 4, 0, 5]);
+        // Naively, this would extend clock1 with a new index and set it to 0, making
+        // the last index 0. Make sure that does not happen.
+        clock1.set_at_index(&clock2, VectorIdx(4));
+        // This must not have made the last element 0.
+        assert!(clock1.0.last().unwrap().time() != 0);
+    }
 }
diff --git a/src/tools/miri/src/diagnostics.rs b/src/tools/miri/src/diagnostics.rs
index a2b817ea0d571..0c0ac4c6036d9 100644
--- a/src/tools/miri/src/diagnostics.rs
+++ b/src/tools/miri/src/diagnostics.rs
@@ -291,7 +291,7 @@ pub fn report_error<'tcx, 'mir>(
                     ValidationErrorKind::PointerAsInt { .. } | ValidationErrorKind::PartialPointer
                 ) =>
             {
-                ecx.handle_ice(); // print interpreter backtrace
+                ecx.handle_ice(); // print interpreter backtrace (this is outside the eval `catch_unwind`)
                 bug!(
                     "This validation error should be impossible in Miri: {}",
                     format_interp_error(ecx.tcx.dcx(), e)
@@ -308,7 +308,7 @@ pub fn report_error<'tcx, 'mir>(
                 InvalidProgramInfo::AlreadyReported(_) | InvalidProgramInfo::Layout(..),
             ) => "post-monomorphization error",
             _ => {
-                ecx.handle_ice(); // print interpreter backtrace
+                ecx.handle_ice(); // print interpreter backtrace (this is outside the eval `catch_unwind`)
                 bug!(
                     "This error should be impossible in Miri: {}",
                     format_interp_error(ecx.tcx.dcx(), e)
@@ -361,9 +361,12 @@ pub fn report_error<'tcx, 'mir>(
     };
 
     let stacktrace = ecx.generate_stacktrace();
-    let (stacktrace, was_pruned) = prune_stacktrace(stacktrace, &ecx.machine);
+    let (stacktrace, mut any_pruned) = prune_stacktrace(stacktrace, &ecx.machine);
 
-    // We want to dump the allocation if this is `InvalidUninitBytes`. Since `format_error` consumes `e`, we compute the outut early.
+    let mut show_all_threads = false;
+
+    // We want to dump the allocation if this is `InvalidUninitBytes`.
+    // Since `format_interp_error` consumes `e`, we compute the outut early.
     let mut extra = String::new();
     match e.kind() {
         UndefinedBehavior(InvalidUninitBytes(Some((alloc_id, access)))) => {
@@ -375,6 +378,15 @@ pub fn report_error<'tcx, 'mir>(
             .unwrap();
             writeln!(extra, "{:?}", ecx.dump_alloc(*alloc_id)).unwrap();
         }
+        MachineStop(info) => {
+            let info = info.downcast_ref::<TerminationInfo>().expect("invalid MachineStop payload");
+            match info {
+                TerminationInfo::Deadlock => {
+                    show_all_threads = true;
+                }
+                _ => {}
+            }
+        }
         _ => {}
     }
 
@@ -387,18 +399,39 @@ pub fn report_error<'tcx, 'mir>(
         vec![],
         helps,
         &stacktrace,
+        Some(ecx.get_active_thread()),
         &ecx.machine,
     );
 
+    eprint!("{extra}"); // newlines are already in the string
+
+    if show_all_threads {
+        for (thread, stack) in ecx.machine.threads.all_stacks() {
+            if thread != ecx.get_active_thread() {
+                let stacktrace = Frame::generate_stacktrace_from_stack(stack);
+                let (stacktrace, was_pruned) = prune_stacktrace(stacktrace, &ecx.machine);
+                any_pruned |= was_pruned;
+                report_msg(
+                    DiagLevel::Error,
+                    format!("deadlock: the evaluated program deadlocked"),
+                    vec![format!("the evaluated program deadlocked")],
+                    vec![],
+                    vec![],
+                    &stacktrace,
+                    Some(thread),
+                    &ecx.machine,
+                )
+            }
+        }
+    }
+
     // Include a note like `std` does when we omit frames from a backtrace
-    if was_pruned {
+    if any_pruned {
         ecx.tcx.dcx().note(
             "some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace",
         );
     }
 
-    eprint!("{extra}"); // newlines are already in the string
-
     // Debug-dump all locals.
     for (i, frame) in ecx.active_thread_stack().iter().enumerate() {
         trace!("-------------------");
@@ -435,6 +468,7 @@ pub fn report_leaks<'mir, 'tcx>(
             vec![],
             vec![],
             &backtrace,
+            None, // we don't know the thread this is from
             &ecx.machine,
         );
     }
@@ -457,6 +491,7 @@ pub fn report_msg<'tcx>(
     notes: Vec<(Option<SpanData>, String)>,
     helps: Vec<(Option<SpanData>, String)>,
     stacktrace: &[FrameInfo<'tcx>],
+    thread: Option<ThreadId>,
     machine: &MiriMachine<'_, 'tcx>,
 ) {
     let span = stacktrace.first().map_or(DUMMY_SP, |fi| fi.span);
@@ -506,12 +541,13 @@ pub fn report_msg<'tcx>(
     if extra_span {
         write!(backtrace_title, " (of the first span)").unwrap();
     }
-    let thread_name =
-        machine.threads.get_thread_display_name(machine.threads.get_active_thread_id());
-    if thread_name != "main" {
-        // Only print thread name if it is not `main`.
-        write!(backtrace_title, " on thread `{thread_name}`").unwrap();
-    };
+    if let Some(thread) = thread {
+        let thread_name = machine.threads.get_thread_display_name(thread);
+        if thread_name != "main" {
+            // Only print thread name if it is not `main`.
+            write!(backtrace_title, " on thread `{thread_name}`").unwrap();
+        };
+    }
     write!(backtrace_title, ":").unwrap();
     err.note(backtrace_title);
     for (idx, frame_info) in stacktrace.iter().enumerate() {
@@ -628,7 +664,16 @@ impl<'mir, 'tcx> MiriMachine<'mir, 'tcx> {
             _ => vec![],
         };
 
-        report_msg(diag_level, title, vec![msg], notes, helps, &stacktrace, self);
+        report_msg(
+            diag_level,
+            title,
+            vec![msg],
+            notes,
+            helps,
+            &stacktrace,
+            Some(self.threads.get_active_thread_id()),
+            self,
+        );
     }
 }
 
@@ -654,6 +699,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
             vec![],
             vec![],
             &stacktrace,
+            Some(this.get_active_thread()),
             &this.machine,
         );
     }
diff --git a/src/tools/miri/src/eval.rs b/src/tools/miri/src/eval.rs
index df0ede1e1b6a9..45dadb50f4b26 100644
--- a/src/tools/miri/src/eval.rs
+++ b/src/tools/miri/src/eval.rs
@@ -150,6 +150,10 @@ pub struct MiriConfig {
     pub page_size: Option<u64>,
     /// Whether to collect a backtrace when each allocation is created, just in case it leaks.
     pub collect_leak_backtraces: bool,
+    /// Probability for address reuse.
+    pub address_reuse_rate: f64,
+    /// Probability for address reuse across threads.
+    pub address_reuse_cross_thread_rate: f64,
 }
 
 impl Default for MiriConfig {
@@ -186,6 +190,8 @@ impl Default for MiriConfig {
             num_cpus: 1,
             page_size: None,
             collect_leak_backtraces: true,
+            address_reuse_rate: 0.5,
+            address_reuse_cross_thread_rate: 0.1,
         }
     }
 }
diff --git a/src/tools/miri/src/helpers.rs b/src/tools/miri/src/helpers.rs
index e2c6769ccb553..92bdaf301704b 100644
--- a/src/tools/miri/src/helpers.rs
+++ b/src/tools/miri/src/helpers.rs
@@ -912,10 +912,25 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
         })
     }
 
+    /// Read bytes from a byte slice.
+    fn read_byte_slice<'a>(
+        &'a self,
+        slice: &ImmTy<'tcx, Provenance>,
+    ) -> InterpResult<'tcx, &'a [u8]>
+    where
+        'mir: 'a,
+    {
+        let this = self.eval_context_ref();
+        let (ptr, len) = slice.to_scalar_pair();
+        let ptr = ptr.to_pointer(this)?;
+        let len = len.to_target_usize(this)?;
+        let bytes = this.read_bytes_ptr_strip_provenance(ptr, Size::from_bytes(len))?;
+        Ok(bytes)
+    }
+
     /// Read a sequence of bytes until the first null terminator.
     fn read_c_str<'a>(&'a self, ptr: Pointer<Option<Provenance>>) -> InterpResult<'tcx, &'a [u8]>
     where
-        'tcx: 'a,
         'mir: 'a,
     {
         let this = self.eval_context_ref();
@@ -1265,9 +1280,7 @@ impl<'mir, 'tcx> MiriMachine<'mir, 'tcx> {
     /// This function is backed by a cache, and can be assumed to be very fast.
     /// It will work even when the stack is empty.
     pub fn current_span(&self) -> Span {
-        self.top_user_relevant_frame()
-            .map(|frame_idx| self.stack()[frame_idx].current_span())
-            .unwrap_or(rustc_span::DUMMY_SP)
+        self.threads.active_thread_ref().current_span()
     }
 
     /// Returns the span of the *caller* of the current operation, again
@@ -1279,7 +1292,7 @@ impl<'mir, 'tcx> MiriMachine<'mir, 'tcx> {
         // We need to go down at least to the caller (len - 2), or however
         // far we have to go to find a frame in a local crate which is also not #[track_caller].
         let frame_idx = self.top_user_relevant_frame().unwrap();
-        let frame_idx = cmp::min(frame_idx, self.stack().len().checked_sub(2).unwrap());
+        let frame_idx = cmp::min(frame_idx, self.stack().len().saturating_sub(2));
         self.stack()[frame_idx].current_span()
     }
 
diff --git a/src/tools/miri/src/lib.rs b/src/tools/miri/src/lib.rs
index 390340d711374..2e19c9ff71356 100644
--- a/src/tools/miri/src/lib.rs
+++ b/src/tools/miri/src/lib.rs
@@ -116,7 +116,9 @@ pub use crate::concurrency::{
     data_race::{AtomicFenceOrd, AtomicReadOrd, AtomicRwOrd, AtomicWriteOrd, EvalContextExt as _},
     init_once::{EvalContextExt as _, InitOnceId},
     sync::{CondvarId, EvalContextExt as _, MutexId, RwLockId, SyncId},
-    thread::{EvalContextExt as _, StackEmptyCallback, ThreadId, ThreadManager, Time},
+    thread::{
+        BlockReason, CallbackTime, EvalContextExt as _, StackEmptyCallback, ThreadId, ThreadManager,
+    },
 };
 pub use crate::diagnostics::{
     report_error, EvalContextExt as _, NonHaltingDiagnostic, TerminationInfo,
diff --git a/src/tools/miri/src/machine.rs b/src/tools/miri/src/machine.rs
index 26f8c0a3babba..cbe70cbffee55 100644
--- a/src/tools/miri/src/machine.rs
+++ b/src/tools/miri/src/machine.rs
@@ -1282,7 +1282,7 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for MiriMachine<'mir, 'tcx> {
         (alloc_id, prove_extra): (AllocId, Self::ProvenanceExtra),
         size: Size,
         align: Align,
-        _kind: MemoryKind,
+        kind: MemoryKind,
     ) -> InterpResult<'tcx> {
         if machine.tracked_alloc_ids.contains(&alloc_id) {
             machine.emit_diagnostic(NonHaltingDiagnostic::FreedAlloc(alloc_id));
@@ -1303,12 +1303,7 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for MiriMachine<'mir, 'tcx> {
         {
             *deallocated_at = Some(machine.current_span());
         }
-        machine.alloc_addresses.get_mut().free_alloc_id(
-            machine.rng.get_mut(),
-            alloc_id,
-            size,
-            align,
-        );
+        machine.free_alloc_id(alloc_id, size, align, kind);
         Ok(())
     }
 
diff --git a/src/tools/miri/src/shims/alloc.rs b/src/tools/miri/src/shims/alloc.rs
new file mode 100644
index 0000000000000..b5ae06c2a492a
--- /dev/null
+++ b/src/tools/miri/src/shims/alloc.rs
@@ -0,0 +1,152 @@
+use std::iter;
+
+use rustc_ast::expand::allocator::AllocatorKind;
+use rustc_target::abi::{Align, Size};
+
+use crate::*;
+use shims::foreign_items::EmulateForeignItemResult;
+
+/// Check some basic requirements for this allocation request:
+/// non-zero size, power-of-two alignment.
+pub(super) fn check_alloc_request<'tcx>(size: u64, align: u64) -> InterpResult<'tcx> {
+    if size == 0 {
+        throw_ub_format!("creating allocation with size 0");
+    }
+    if !align.is_power_of_two() {
+        throw_ub_format!("creating allocation with non-power-of-two alignment {}", align);
+    }
+    Ok(())
+}
+
+impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriInterpCx<'mir, 'tcx> {}
+pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
+    /// Returns the minimum alignment for the target architecture for allocations of the given size.
+    fn min_align(&self, size: u64, kind: MiriMemoryKind) -> Align {
+        let this = self.eval_context_ref();
+        // List taken from `library/std/src/sys/pal/common/alloc.rs`.
+        // This list should be kept in sync with the one from libstd.
+        let min_align = match this.tcx.sess.target.arch.as_ref() {
+            "x86" | "arm" | "mips" | "mips32r6" | "powerpc" | "powerpc64" | "wasm32" => 8,
+            "x86_64" | "aarch64" | "mips64" | "mips64r6" | "s390x" | "sparc64" | "loongarch64" =>
+                16,
+            arch => bug!("unsupported target architecture for malloc: `{}`", arch),
+        };
+        // Windows always aligns, even small allocations.
+        // Source: <https://support.microsoft.com/en-us/help/286470/how-to-use-pageheap-exe-in-windows-xp-windows-2000-and-windows-server>
+        // But jemalloc does not, so for the C heap we only align if the allocation is sufficiently big.
+        if kind == MiriMemoryKind::WinHeap || size >= min_align {
+            return Align::from_bytes(min_align).unwrap();
+        }
+        // We have `size < min_align`. Round `size` *down* to the next power of two and use that.
+        fn prev_power_of_two(x: u64) -> u64 {
+            let next_pow2 = x.next_power_of_two();
+            if next_pow2 == x {
+                // x *is* a power of two, just use that.
+                x
+            } else {
+                // x is between two powers, so next = 2*prev.
+                next_pow2 / 2
+            }
+        }
+        Align::from_bytes(prev_power_of_two(size)).unwrap()
+    }
+
+    /// Emulates calling the internal __rust_* allocator functions
+    fn emulate_allocator(
+        &mut self,
+        default: impl FnOnce(&mut MiriInterpCx<'mir, 'tcx>) -> InterpResult<'tcx>,
+    ) -> InterpResult<'tcx, EmulateForeignItemResult> {
+        let this = self.eval_context_mut();
+
+        let Some(allocator_kind) = this.tcx.allocator_kind(()) else {
+            // in real code, this symbol does not exist without an allocator
+            return Ok(EmulateForeignItemResult::NotSupported);
+        };
+
+        match allocator_kind {
+            AllocatorKind::Global => {
+                // When `#[global_allocator]` is used, `__rust_*` is defined by the macro expansion
+                // of this attribute. As such we have to call an exported Rust function,
+                // and not execute any Miri shim. Somewhat unintuitively doing so is done
+                // by returning `NotSupported`, which triggers the `lookup_exported_symbol`
+                // fallback case in `emulate_foreign_item`.
+                return Ok(EmulateForeignItemResult::NotSupported);
+            }
+            AllocatorKind::Default => {
+                default(this)?;
+                Ok(EmulateForeignItemResult::NeedsJumping)
+            }
+        }
+    }
+
+    fn malloc(
+        &mut self,
+        size: u64,
+        zero_init: bool,
+        kind: MiriMemoryKind,
+    ) -> InterpResult<'tcx, Pointer<Option<Provenance>>> {
+        let this = self.eval_context_mut();
+        if size == 0 {
+            Ok(Pointer::null())
+        } else {
+            let align = this.min_align(size, kind);
+            let ptr = this.allocate_ptr(Size::from_bytes(size), align, kind.into())?;
+            if zero_init {
+                // We just allocated this, the access is definitely in-bounds and fits into our address space.
+                this.write_bytes_ptr(
+                    ptr.into(),
+                    iter::repeat(0u8).take(usize::try_from(size).unwrap()),
+                )
+                .unwrap();
+            }
+            Ok(ptr.into())
+        }
+    }
+
+    fn free(
+        &mut self,
+        ptr: Pointer<Option<Provenance>>,
+        kind: MiriMemoryKind,
+    ) -> InterpResult<'tcx> {
+        let this = self.eval_context_mut();
+        if !this.ptr_is_null(ptr)? {
+            this.deallocate_ptr(ptr, None, kind.into())?;
+        }
+        Ok(())
+    }
+
+    fn realloc(
+        &mut self,
+        old_ptr: Pointer<Option<Provenance>>,
+        new_size: u64,
+        kind: MiriMemoryKind,
+    ) -> InterpResult<'tcx, Pointer<Option<Provenance>>> {
+        let this = self.eval_context_mut();
+        let new_align = this.min_align(new_size, kind);
+        if this.ptr_is_null(old_ptr)? {
+            // Here we must behave like `malloc`.
+            if new_size == 0 {
+                Ok(Pointer::null())
+            } else {
+                let new_ptr =
+                    this.allocate_ptr(Size::from_bytes(new_size), new_align, kind.into())?;
+                Ok(new_ptr.into())
+            }
+        } else {
+            if new_size == 0 {
+                // C, in their infinite wisdom, made this UB.
+                // <https://www.open-std.org/jtc1/sc22/wg14/www/docs/n2464.pdf>
+                throw_ub_format!("`realloc` with a size of zero");
+            } else {
+                let new_ptr = this.reallocate_ptr(
+                    old_ptr,
+                    None,
+                    Size::from_bytes(new_size),
+                    new_align,
+                    kind.into(),
+                )?;
+                Ok(new_ptr.into())
+            }
+        }
+    }
+}
diff --git a/src/tools/miri/src/shims/extern_static.rs b/src/tools/miri/src/shims/extern_static.rs
index 0284e5b606ced..7c4a54fb461a8 100644
--- a/src/tools/miri/src/shims/extern_static.rs
+++ b/src/tools/miri/src/shims/extern_static.rs
@@ -32,9 +32,14 @@ impl<'mir, 'tcx> MiriMachine<'mir, 'tcx> {
     /// Sets up the "extern statics" for this machine.
     pub fn init_extern_statics(this: &mut MiriInterpCx<'mir, 'tcx>) -> InterpResult<'tcx> {
         // "__rust_no_alloc_shim_is_unstable"
-        let val = ImmTy::from_int(0, this.machine.layouts.u8);
+        let val = ImmTy::from_int(0, this.machine.layouts.u8); // always 0, value does not matter
         Self::alloc_extern_static(this, "__rust_no_alloc_shim_is_unstable", val)?;
 
+        // "__rust_alloc_error_handler_should_panic"
+        let val = this.tcx.sess.opts.unstable_opts.oom.should_panic();
+        let val = ImmTy::from_int(val, this.machine.layouts.u8);
+        Self::alloc_extern_static(this, "__rust_alloc_error_handler_should_panic", val)?;
+
         match this.tcx.sess.target.os.as_ref() {
             "linux" => {
                 Self::null_ptr_extern_statics(
diff --git a/src/tools/miri/src/shims/foreign_items.rs b/src/tools/miri/src/shims/foreign_items.rs
index 6b0797f6da1ac..636361148a469 100644
--- a/src/tools/miri/src/shims/foreign_items.rs
+++ b/src/tools/miri/src/shims/foreign_items.rs
@@ -1,7 +1,7 @@
 use std::{collections::hash_map::Entry, io::Write, iter, path::Path};
 
 use rustc_apfloat::Float;
-use rustc_ast::expand::allocator::AllocatorKind;
+use rustc_ast::expand::allocator::alloc_error_handler_name;
 use rustc_hir::{def::DefKind, def_id::CrateNum};
 use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
 use rustc_middle::mir;
@@ -12,6 +12,7 @@ use rustc_target::{
     spec::abi::Abi,
 };
 
+use super::alloc::{check_alloc_request, EvalContextExt as _};
 use super::backtrace::EvalContextExt as _;
 use crate::*;
 use helpers::{ToHost, ToSoft};
@@ -80,6 +81,20 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
                             panic_impl_instance,
                         )));
                     }
+                    "__rust_alloc_error_handler" => {
+                        // Forward to the right symbol that implements this function.
+                        let Some(handler_kind) = this.tcx.alloc_error_handler_kind(()) else {
+                            // in real code, this symbol does not exist without an allocator
+                            throw_unsup_format!(
+                                "`__rust_alloc_error_handler` cannot be called when no alloc error handler is set"
+                            );
+                        };
+                        let name = alloc_error_handler_name(handler_kind);
+                        let handler = this
+                            .lookup_exported_symbol(Symbol::intern(name))?
+                            .expect("missing alloc error handler symbol");
+                        return Ok(Some(handler));
+                    }
                     #[rustfmt::skip]
                     | "exit"
                     | "ExitProcess"
@@ -218,151 +233,10 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
             Some(instance) => Ok(Some((this.load_mir(instance.def, None)?, instance))),
         }
     }
-
-    fn malloc(
-        &mut self,
-        size: u64,
-        zero_init: bool,
-        kind: MiriMemoryKind,
-    ) -> InterpResult<'tcx, Pointer<Option<Provenance>>> {
-        let this = self.eval_context_mut();
-        if size == 0 {
-            Ok(Pointer::null())
-        } else {
-            let align = this.min_align(size, kind);
-            let ptr = this.allocate_ptr(Size::from_bytes(size), align, kind.into())?;
-            if zero_init {
-                // We just allocated this, the access is definitely in-bounds and fits into our address space.
-                this.write_bytes_ptr(
-                    ptr.into(),
-                    iter::repeat(0u8).take(usize::try_from(size).unwrap()),
-                )
-                .unwrap();
-            }
-            Ok(ptr.into())
-        }
-    }
-
-    fn free(
-        &mut self,
-        ptr: Pointer<Option<Provenance>>,
-        kind: MiriMemoryKind,
-    ) -> InterpResult<'tcx> {
-        let this = self.eval_context_mut();
-        if !this.ptr_is_null(ptr)? {
-            this.deallocate_ptr(ptr, None, kind.into())?;
-        }
-        Ok(())
-    }
-
-    fn realloc(
-        &mut self,
-        old_ptr: Pointer<Option<Provenance>>,
-        new_size: u64,
-        kind: MiriMemoryKind,
-    ) -> InterpResult<'tcx, Pointer<Option<Provenance>>> {
-        let this = self.eval_context_mut();
-        let new_align = this.min_align(new_size, kind);
-        if this.ptr_is_null(old_ptr)? {
-            if new_size == 0 {
-                Ok(Pointer::null())
-            } else {
-                let new_ptr =
-                    this.allocate_ptr(Size::from_bytes(new_size), new_align, kind.into())?;
-                Ok(new_ptr.into())
-            }
-        } else {
-            if new_size == 0 {
-                this.deallocate_ptr(old_ptr, None, kind.into())?;
-                Ok(Pointer::null())
-            } else {
-                let new_ptr = this.reallocate_ptr(
-                    old_ptr,
-                    None,
-                    Size::from_bytes(new_size),
-                    new_align,
-                    kind.into(),
-                )?;
-                Ok(new_ptr.into())
-            }
-        }
-    }
 }
 
 impl<'mir, 'tcx: 'mir> EvalContextExtPriv<'mir, 'tcx> for crate::MiriInterpCx<'mir, 'tcx> {}
 trait EvalContextExtPriv<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
-    /// Read bytes from a `(ptr, len)` argument
-    fn read_byte_slice<'i>(&'i self, bytes: &OpTy<'tcx, Provenance>) -> InterpResult<'tcx, &'i [u8]>
-    where
-        'mir: 'i,
-    {
-        let this = self.eval_context_ref();
-        let (ptr, len) = this.read_immediate(bytes)?.to_scalar_pair();
-        let ptr = ptr.to_pointer(this)?;
-        let len = len.to_target_usize(this)?;
-        let bytes = this.read_bytes_ptr_strip_provenance(ptr, Size::from_bytes(len))?;
-        Ok(bytes)
-    }
-
-    /// Returns the minimum alignment for the target architecture for allocations of the given size.
-    fn min_align(&self, size: u64, kind: MiriMemoryKind) -> Align {
-        let this = self.eval_context_ref();
-        // List taken from `library/std/src/sys/pal/common/alloc.rs`.
-        // This list should be kept in sync with the one from libstd.
-        let min_align = match this.tcx.sess.target.arch.as_ref() {
-            "x86" | "arm" | "mips" | "mips32r6" | "powerpc" | "powerpc64" | "wasm32" => 8,
-            "x86_64" | "aarch64" | "mips64" | "mips64r6" | "s390x" | "sparc64" | "loongarch64" =>
-                16,
-            arch => bug!("unsupported target architecture for malloc: `{}`", arch),
-        };
-        // Windows always aligns, even small allocations.
-        // Source: <https://support.microsoft.com/en-us/help/286470/how-to-use-pageheap-exe-in-windows-xp-windows-2000-and-windows-server>
-        // But jemalloc does not, so for the C heap we only align if the allocation is sufficiently big.
-        if kind == MiriMemoryKind::WinHeap || size >= min_align {
-            return Align::from_bytes(min_align).unwrap();
-        }
-        // We have `size < min_align`. Round `size` *down* to the next power of two and use that.
-        fn prev_power_of_two(x: u64) -> u64 {
-            let next_pow2 = x.next_power_of_two();
-            if next_pow2 == x {
-                // x *is* a power of two, just use that.
-                x
-            } else {
-                // x is between two powers, so next = 2*prev.
-                next_pow2 / 2
-            }
-        }
-        Align::from_bytes(prev_power_of_two(size)).unwrap()
-    }
-
-    /// Emulates calling the internal __rust_* allocator functions
-    fn emulate_allocator(
-        &mut self,
-        default: impl FnOnce(&mut MiriInterpCx<'mir, 'tcx>) -> InterpResult<'tcx>,
-    ) -> InterpResult<'tcx, EmulateForeignItemResult> {
-        let this = self.eval_context_mut();
-
-        let Some(allocator_kind) = this.tcx.allocator_kind(()) else {
-            // in real code, this symbol does not exist without an allocator
-            return Ok(EmulateForeignItemResult::NotSupported);
-        };
-
-        match allocator_kind {
-            AllocatorKind::Global => {
-                // When `#[global_allocator]` is used, `__rust_*` is defined by the macro expansion
-                // of this attribute. As such we have to call an exported Rust function,
-                // and not execute any Miri shim. Somewhat unintuitively doing so is done
-                // by returning `NotSupported`, which triggers the `lookup_exported_symbol`
-                // fallback case in `emulate_foreign_item`.
-                return Ok(EmulateForeignItemResult::NotSupported);
-            }
-            AllocatorKind::Default => {
-                default(this)?;
-                Ok(EmulateForeignItemResult::NeedsJumping)
-            }
-        }
-    }
-
     fn emulate_foreign_item_inner(
         &mut self,
         link_name: Symbol,
@@ -452,7 +326,9 @@ trait EvalContextExtPriv<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
                 let [ptr, nth_parent, name] = this.check_shim(abi, Abi::Rust, link_name, args)?;
                 let ptr = this.read_pointer(ptr)?;
                 let nth_parent = this.read_scalar(nth_parent)?.to_u8()?;
-                let name = this.read_byte_slice(name)?;
+                let name = this.read_immediate(name)?;
+
+                let name = this.read_byte_slice(&name)?;
                 // We must make `name` owned because we need to
                 // end the shared borrow from `read_byte_slice` before we can
                 // start the mutable borrow for `give_pointer_debug_name`.
@@ -513,7 +389,8 @@ trait EvalContextExtPriv<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
             // README for details.
             "miri_write_to_stdout" | "miri_write_to_stderr" => {
                 let [msg] = this.check_shim(abi, Abi::Rust, link_name, args)?;
-                let msg = this.read_byte_slice(msg)?;
+                let msg = this.read_immediate(msg)?;
+                let msg = this.read_byte_slice(&msg)?;
                 // Note: we're ignoring errors writing to host stdout/stderr.
                 let _ignore = match link_name.as_str() {
                     "miri_write_to_stdout" => std::io::stdout().write_all(msg),
@@ -606,7 +483,7 @@ trait EvalContextExtPriv<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
                     let size = this.read_target_usize(size)?;
                     let align = this.read_target_usize(align)?;
 
-                    Self::check_alloc_request(size, align)?;
+                    check_alloc_request(size, align)?;
 
                     let memory_kind = match link_name.as_str() {
                         "__rust_alloc" => MiriMemoryKind::Rust,
@@ -640,7 +517,7 @@ trait EvalContextExtPriv<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
                     let size = this.read_target_usize(size)?;
                     let align = this.read_target_usize(align)?;
 
-                    Self::check_alloc_request(size, align)?;
+                    check_alloc_request(size, align)?;
 
                     let ptr = this.allocate_ptr(
                         Size::from_bytes(size),
@@ -704,7 +581,7 @@ trait EvalContextExtPriv<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
                     let new_size = this.read_target_usize(new_size)?;
                     // No need to check old_size; we anyway check that they match the allocation.
 
-                    Self::check_alloc_request(new_size, align)?;
+                    check_alloc_request(new_size, align)?;
 
                     let align = Align::from_bytes(align).unwrap();
                     let new_ptr = this.reallocate_ptr(
@@ -1096,16 +973,4 @@ trait EvalContextExtPriv<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
         // i.e., if we actually emulated the function with one of the shims.
         Ok(EmulateForeignItemResult::NeedsJumping)
     }
-
-    /// Check some basic requirements for this allocation request:
-    /// non-zero size, power-of-two alignment.
-    fn check_alloc_request(size: u64, align: u64) -> InterpResult<'tcx> {
-        if size == 0 {
-            throw_ub_format!("creating allocation with size 0");
-        }
-        if !align.is_power_of_two() {
-            throw_ub_format!("creating allocation with non-power-of-two alignment {}", align);
-        }
-        Ok(())
-    }
 }
diff --git a/src/tools/miri/src/shims/mod.rs b/src/tools/miri/src/shims/mod.rs
index ea6120f757947..85c9a202f7daf 100644
--- a/src/tools/miri/src/shims/mod.rs
+++ b/src/tools/miri/src/shims/mod.rs
@@ -1,5 +1,6 @@
 #![warn(clippy::arithmetic_side_effects)]
 
+mod alloc;
 mod backtrace;
 #[cfg(target_os = "linux")]
 pub mod ffi_support;
diff --git a/src/tools/miri/src/shims/os_str.rs b/src/tools/miri/src/shims/os_str.rs
index 0409e31d65a8a..3e8c35d48aee0 100644
--- a/src/tools/miri/src/shims/os_str.rs
+++ b/src/tools/miri/src/shims/os_str.rs
@@ -251,7 +251,6 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
         this.alloc_os_str_as_wide_str(&os_str, memkind)
     }
 
-    #[allow(clippy::get_first)]
     fn convert_path<'a>(
         &self,
         os_str: Cow<'a, OsStr>,
@@ -260,100 +259,97 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
         let this = self.eval_context_ref();
         let target_os = &this.tcx.sess.target.os;
 
+        /// Adjust a Windows path to Unix conventions such that it un-does everything that
+        /// `unix_to_windows` did, and such that if the Windows input path was absolute, then the
+        /// Unix output path is absolute.
+        fn windows_to_unix<T>(path: &mut Vec<T>)
+        where
+            T: From<u8> + Copy + Eq,
+        {
+            let sep = T::from(b'/');
+            // Make sure all path separators are `/`.
+            for c in path.iter_mut() {
+                if *c == b'\\'.into() {
+                    *c = sep;
+                }
+            }
+            // If this starts with `//?/`, it was probably produced by `unix_to_windows`` and we
+            // remove the `//?` that got added to get the Unix path back out.
+            if path.get(0..4) == Some(&[sep, sep, b'?'.into(), sep]) {
+                // Remove first 3 characters. It still starts with `/` so it is absolute on Unix.
+                path.splice(0..3, std::iter::empty());
+            }
+            // If it starts with a drive letter (`X:/`), convert it to an absolute Unix path.
+            else if path.get(1..3) == Some(&[b':'.into(), sep]) {
+                // We add a `/` at the beginning, to store the absolute Windows
+                // path in something that looks like an absolute Unix path.
+                path.insert(0, sep);
+            }
+        }
+
+        /// Adjust a Unix path to Windows conventions such that it un-does everything that
+        /// `windows_to_unix` did, and such that if the Unix input path was absolute, then the
+        /// Windows output path is absolute.
+        fn unix_to_windows<T>(path: &mut Vec<T>)
+        where
+            T: From<u8> + Copy + Eq,
+        {
+            let sep = T::from(b'\\');
+            // Make sure all path separators are `\`.
+            for c in path.iter_mut() {
+                if *c == b'/'.into() {
+                    *c = sep;
+                }
+            }
+            // If the path is `\X:\`, the leading separator was probably added by `windows_to_unix`
+            // and we should get rid of it again.
+            if path.get(2..4) == Some(&[b':'.into(), sep]) && path[0] == sep {
+                // The new path is still absolute on Windows.
+                path.remove(0);
+            }
+            // If this starts withs a `\` but not a `\\`, then this was absolute on Unix but is
+            // relative on Windows (relative to "the root of the current directory", e.g. the
+            // drive letter).
+            else if path.first() == Some(&sep) && path.get(1) != Some(&sep) {
+                // We add `\\?` so it starts with `\\?\` which is some magic path on Windows
+                // that *is* considered absolute. This way we store the absolute Unix path
+                // in something that looks like an absolute Windows path.
+                path.splice(0..0, [sep, sep, b'?'.into()]);
+            }
+        }
+
+        // Below we assume that everything non-Windows works like Unix, at least
+        // when it comes to file system path conventions.
         #[cfg(windows)]
         return if target_os == "windows" {
             // Windows-on-Windows, all fine.
             os_str
         } else {
             // Unix target, Windows host.
-            let (from, to) = match direction {
-                PathConversion::HostToTarget => ('\\', '/'),
-                PathConversion::TargetToHost => ('/', '\\'),
-            };
-            let mut converted = os_str
-                .encode_wide()
-                .map(|wchar| if wchar == from as u16 { to as u16 } else { wchar })
-                .collect::<Vec<_>>();
-            // We also have to ensure that absolute paths remain absolute.
+            let mut path: Vec<u16> = os_str.encode_wide().collect();
             match direction {
                 PathConversion::HostToTarget => {
-                    // If this is an absolute Windows path that starts with a drive letter (`C:/...`
-                    // after separator conversion), it would not be considered absolute by Unix
-                    // target code.
-                    if converted.get(1).copied() == Some(b':' as u16)
-                        && converted.get(2).copied() == Some(b'/' as u16)
-                    {
-                        // We add a `/` at the beginning, to store the absolute Windows
-                        // path in something that looks like an absolute Unix path.
-                        converted.insert(0, b'/' as u16);
-                    }
+                    windows_to_unix(&mut path);
                 }
                 PathConversion::TargetToHost => {
-                    // If the path is `\C:\`, the leading backslash was probably added by the above code
-                    // and we should get rid of it again.
-                    if converted.get(0).copied() == Some(b'\\' as u16)
-                        && converted.get(2).copied() == Some(b':' as u16)
-                        && converted.get(3).copied() == Some(b'\\' as u16)
-                    {
-                        converted.remove(0);
-                    }
+                    unix_to_windows(&mut path);
                 }
             }
-            Cow::Owned(OsString::from_wide(&converted))
+            Cow::Owned(OsString::from_wide(&path))
         };
         #[cfg(unix)]
         return if target_os == "windows" {
             // Windows target, Unix host.
-            let (from, to) = match direction {
-                PathConversion::HostToTarget => (b'/', b'\\'),
-                PathConversion::TargetToHost => (b'\\', b'/'),
-            };
-            let mut converted = os_str
-                .as_bytes()
-                .iter()
-                .map(|&wchar| if wchar == from { to } else { wchar })
-                .collect::<Vec<_>>();
-            // We also have to ensure that absolute paths remain absolute.
+            let mut path: Vec<u8> = os_str.into_owned().into_encoded_bytes();
             match direction {
                 PathConversion::HostToTarget => {
-                    // If the path is `/C:/`, the leading backslash was probably added by the below
-                    // driver letter handling and we should get rid of it again.
-                    if converted.get(0).copied() == Some(b'\\')
-                        && converted.get(2).copied() == Some(b':')
-                        && converted.get(3).copied() == Some(b'\\')
-                    {
-                        converted.remove(0);
-                    }
-                    // If this start withs a `\` but not a `\\`, then for Windows this is a relative
-                    // path. But the host path is absolute as it started with `/`. We add `\\?` so
-                    // it starts with `\\?\` which is some magic path on Windows that *is*
-                    // considered absolute.
-                    else if converted.get(0).copied() == Some(b'\\')
-                        && converted.get(1).copied() != Some(b'\\')
-                    {
-                        converted.splice(0..0, b"\\\\?".iter().copied());
-                    }
+                    unix_to_windows(&mut path);
                 }
                 PathConversion::TargetToHost => {
-                    // If this starts with `//?/`, it was probably produced by the above code and we
-                    // remove the `//?` that got added to get the Unix path back out.
-                    if converted.get(0).copied() == Some(b'/')
-                        && converted.get(1).copied() == Some(b'/')
-                        && converted.get(2).copied() == Some(b'?')
-                        && converted.get(3).copied() == Some(b'/')
-                    {
-                        // Remove first 3 characters
-                        converted.splice(0..3, std::iter::empty());
-                    }
-                    // If it starts with a drive letter, convert it to an absolute Unix path.
-                    else if converted.get(1).copied() == Some(b':')
-                        && converted.get(2).copied() == Some(b'/')
-                    {
-                        converted.insert(0, b'/');
-                    }
+                    windows_to_unix(&mut path);
                 }
             }
-            Cow::Owned(OsString::from_vec(converted))
+            Cow::Owned(OsString::from_vec(path))
         } else {
             // Unix-on-Unix, all is fine.
             os_str
diff --git a/src/tools/miri/src/shims/time.rs b/src/tools/miri/src/shims/time.rs
index 4535bcf6dfedd..1126c900226df 100644
--- a/src/tools/miri/src/shims/time.rs
+++ b/src/tools/miri/src/shims/time.rs
@@ -236,11 +236,11 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
             .unwrap_or_else(|| now.checked_add(Duration::from_secs(3600)).unwrap());
 
         let active_thread = this.get_active_thread();
-        this.block_thread(active_thread);
+        this.block_thread(active_thread, BlockReason::Sleep);
 
         this.register_timeout_callback(
             active_thread,
-            Time::Monotonic(timeout_time),
+            CallbackTime::Monotonic(timeout_time),
             Box::new(UnblockCallback { thread_to_unblock: active_thread }),
         );
 
@@ -259,11 +259,11 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
         let timeout_time = this.machine.clock.now().checked_add(duration).unwrap();
 
         let active_thread = this.get_active_thread();
-        this.block_thread(active_thread);
+        this.block_thread(active_thread, BlockReason::Sleep);
 
         this.register_timeout_callback(
             active_thread,
-            Time::Monotonic(timeout_time),
+            CallbackTime::Monotonic(timeout_time),
             Box::new(UnblockCallback { thread_to_unblock: active_thread }),
         );
 
@@ -281,7 +281,7 @@ impl VisitProvenance for UnblockCallback {
 
 impl<'mir, 'tcx: 'mir> MachineCallback<'mir, 'tcx> for UnblockCallback {
     fn call(&self, ecx: &mut MiriInterpCx<'mir, 'tcx>) -> InterpResult<'tcx> {
-        ecx.unblock_thread(self.thread_to_unblock);
+        ecx.unblock_thread(self.thread_to_unblock, BlockReason::Sleep);
         Ok(())
     }
 }
diff --git a/src/tools/miri/src/shims/unix/foreign_items.rs b/src/tools/miri/src/shims/unix/foreign_items.rs
index 3a56aa9138b45..c72d3bb3df4a8 100644
--- a/src/tools/miri/src/shims/unix/foreign_items.rs
+++ b/src/tools/miri/src/shims/unix/foreign_items.rs
@@ -6,6 +6,7 @@ use rustc_span::Symbol;
 use rustc_target::abi::{Align, Size};
 use rustc_target::spec::abi::Abi;
 
+use crate::shims::alloc::EvalContextExt as _;
 use crate::shims::unix::*;
 use crate::*;
 use shims::foreign_items::EmulateForeignItemResult;
diff --git a/src/tools/miri/src/shims/unix/fs.rs b/src/tools/miri/src/shims/unix/fs.rs
index 31076fdfaf626..ebf9f43c19ef6 100644
--- a/src/tools/miri/src/shims/unix/fs.rs
+++ b/src/tools/miri/src/shims/unix/fs.rs
@@ -196,13 +196,12 @@ struct OpenDir {
     read_dir: ReadDir,
     /// The most recent entry returned by readdir().
     /// Will be freed by the next call.
-    entry: Pointer<Option<Provenance>>,
+    entry: Option<Pointer<Option<Provenance>>>,
 }
 
 impl OpenDir {
     fn new(read_dir: ReadDir) -> Self {
-        // We rely on `free` being a NOP on null pointers.
-        Self { read_dir, entry: Pointer::null() }
+        Self { read_dir, entry: None }
     }
 }
 
@@ -924,8 +923,12 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
                 let d_name_offset = dirent64_layout.fields.offset(4 /* d_name */).bytes();
                 let size = d_name_offset.checked_add(name_len).unwrap();
 
-                let entry =
-                    this.malloc(size, /*zero_init:*/ false, MiriMemoryKind::Runtime)?;
+                let entry = this.allocate_ptr(
+                    Size::from_bytes(size),
+                    dirent64_layout.align.abi,
+                    MiriMemoryKind::Runtime.into(),
+                )?;
+                let entry: Pointer<Option<Provenance>> = entry.into();
 
                 // If the host is a Unix system, fill in the inode number with its real value.
                 // If not, use 0 as a fallback value.
@@ -949,23 +952,25 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
                 let name_ptr = entry.offset(Size::from_bytes(d_name_offset), this)?;
                 this.write_bytes_ptr(name_ptr, name_bytes.iter().copied())?;
 
-                entry
+                Some(entry)
             }
             None => {
                 // end of stream: return NULL
-                Pointer::null()
+                None
             }
             Some(Err(e)) => {
                 this.set_last_error_from_io_error(e.kind())?;
-                Pointer::null()
+                None
             }
         };
 
         let open_dir = this.machine.dirs.streams.get_mut(&dirp).unwrap();
         let old_entry = std::mem::replace(&mut open_dir.entry, entry);
-        this.free(old_entry, MiriMemoryKind::Runtime)?;
+        if let Some(old_entry) = old_entry {
+            this.deallocate_ptr(old_entry, None, MiriMemoryKind::Runtime.into())?;
+        }
 
-        Ok(Scalar::from_maybe_pointer(entry, this))
+        Ok(Scalar::from_maybe_pointer(entry.unwrap_or_else(Pointer::null), this))
     }
 
     fn macos_fbsd_readdir_r(
@@ -1106,7 +1111,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
         }
 
         if let Some(open_dir) = this.machine.dirs.streams.remove(&dirp) {
-            this.free(open_dir.entry, MiriMemoryKind::Runtime)?;
+            if let Some(entry) = open_dir.entry {
+                this.deallocate_ptr(entry, None, MiriMemoryKind::Runtime.into())?;
+            }
             drop(open_dir);
             Ok(0)
         } else {
diff --git a/src/tools/miri/src/shims/unix/linux/sync.rs b/src/tools/miri/src/shims/unix/linux/sync.rs
index ed27066aa6a0f..d4a6cd96f48df 100644
--- a/src/tools/miri/src/shims/unix/linux/sync.rs
+++ b/src/tools/miri/src/shims/unix/linux/sync.rs
@@ -107,16 +107,22 @@ pub fn futex<'tcx>(
                 Some(if wait_bitset {
                     // FUTEX_WAIT_BITSET uses an absolute timestamp.
                     if realtime {
-                        Time::RealTime(SystemTime::UNIX_EPOCH.checked_add(duration).unwrap())
+                        CallbackTime::RealTime(
+                            SystemTime::UNIX_EPOCH.checked_add(duration).unwrap(),
+                        )
                     } else {
-                        Time::Monotonic(this.machine.clock.anchor().checked_add(duration).unwrap())
+                        CallbackTime::Monotonic(
+                            this.machine.clock.anchor().checked_add(duration).unwrap(),
+                        )
                     }
                 } else {
                     // FUTEX_WAIT uses a relative timestamp.
                     if realtime {
-                        Time::RealTime(SystemTime::now().checked_add(duration).unwrap())
+                        CallbackTime::RealTime(SystemTime::now().checked_add(duration).unwrap())
                     } else {
-                        Time::Monotonic(this.machine.clock.now().checked_add(duration).unwrap())
+                        CallbackTime::Monotonic(
+                            this.machine.clock.now().checked_add(duration).unwrap(),
+                        )
                     }
                 })
             };
@@ -169,7 +175,7 @@ pub fn futex<'tcx>(
             let futex_val = this.read_scalar_atomic(&addr, AtomicReadOrd::Relaxed)?.to_i32()?;
             if val == futex_val {
                 // The value still matches, so we block the thread make it wait for FUTEX_WAKE.
-                this.block_thread(thread);
+                this.block_thread(thread, BlockReason::Futex { addr: addr_usize });
                 this.futex_wait(addr_usize, thread, bitset);
                 // Succesfully waking up from FUTEX_WAIT always returns zero.
                 this.write_scalar(Scalar::from_target_isize(0, this), dest)?;
@@ -191,7 +197,10 @@ pub fn futex<'tcx>(
 
                     impl<'mir, 'tcx: 'mir> MachineCallback<'mir, 'tcx> for Callback<'tcx> {
                         fn call(&self, this: &mut MiriInterpCx<'mir, 'tcx>) -> InterpResult<'tcx> {
-                            this.unblock_thread(self.thread);
+                            this.unblock_thread(
+                                self.thread,
+                                BlockReason::Futex { addr: self.addr_usize },
+                            );
                             this.futex_remove_waiter(self.addr_usize, self.thread);
                             let etimedout = this.eval_libc("ETIMEDOUT");
                             this.set_last_error(etimedout)?;
@@ -249,7 +258,7 @@ pub fn futex<'tcx>(
             #[allow(clippy::arithmetic_side_effects)]
             for _ in 0..val {
                 if let Some(thread) = this.futex_wake(addr_usize, bitset) {
-                    this.unblock_thread(thread);
+                    this.unblock_thread(thread, BlockReason::Futex { addr: addr_usize });
                     this.unregister_timeout_callback_if_exists(thread);
                     n += 1;
                 } else {
diff --git a/src/tools/miri/src/shims/unix/sync.rs b/src/tools/miri/src/shims/unix/sync.rs
index dd301f9ee6d36..e50a8934e09d0 100644
--- a/src/tools/miri/src/shims/unix/sync.rs
+++ b/src/tools/miri/src/shims/unix/sync.rs
@@ -1,6 +1,5 @@
 use std::time::SystemTime;
 
-use crate::concurrency::sync::CondvarLock;
 use crate::concurrency::thread::MachineCallback;
 use crate::*;
 
@@ -225,9 +224,10 @@ fn cond_set_clock_id<'mir, 'tcx: 'mir>(
 fn reacquire_cond_mutex<'mir, 'tcx: 'mir>(
     ecx: &mut MiriInterpCx<'mir, 'tcx>,
     thread: ThreadId,
+    condvar: CondvarId,
     mutex: MutexId,
 ) -> InterpResult<'tcx> {
-    ecx.unblock_thread(thread);
+    ecx.unblock_thread(thread, BlockReason::Condvar(condvar));
     if ecx.mutex_is_locked(mutex) {
         ecx.mutex_enqueue_and_block(mutex, thread);
     } else {
@@ -242,9 +242,10 @@ fn reacquire_cond_mutex<'mir, 'tcx: 'mir>(
 fn post_cond_signal<'mir, 'tcx: 'mir>(
     ecx: &mut MiriInterpCx<'mir, 'tcx>,
     thread: ThreadId,
+    condvar: CondvarId,
     mutex: MutexId,
 ) -> InterpResult<'tcx> {
-    reacquire_cond_mutex(ecx, thread, mutex)?;
+    reacquire_cond_mutex(ecx, thread, condvar, mutex)?;
     // Waiting for the mutex is not included in the waiting time because we need
     // to acquire the mutex always even if we get a timeout.
     ecx.unregister_timeout_callback_if_exists(thread);
@@ -256,6 +257,7 @@ fn post_cond_signal<'mir, 'tcx: 'mir>(
 fn release_cond_mutex_and_block<'mir, 'tcx: 'mir>(
     ecx: &mut MiriInterpCx<'mir, 'tcx>,
     active_thread: ThreadId,
+    condvar: CondvarId,
     mutex: MutexId,
 ) -> InterpResult<'tcx> {
     if let Some(old_locked_count) = ecx.mutex_unlock(mutex, active_thread) {
@@ -265,7 +267,7 @@ fn release_cond_mutex_and_block<'mir, 'tcx: 'mir>(
     } else {
         throw_ub_format!("awaiting on unlocked or owned by a different thread mutex");
     }
-    ecx.block_thread(active_thread);
+    ecx.block_thread(active_thread, BlockReason::Condvar(condvar));
     Ok(())
 }
 
@@ -792,12 +794,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
     fn pthread_cond_signal(&mut self, cond_op: &OpTy<'tcx, Provenance>) -> InterpResult<'tcx, i32> {
         let this = self.eval_context_mut();
         let id = cond_get_id(this, cond_op)?;
-        if let Some((thread, lock)) = this.condvar_signal(id) {
-            if let CondvarLock::Mutex(mutex) = lock {
-                post_cond_signal(this, thread, mutex)?;
-            } else {
-                panic!("condvar should not have an rwlock on unix");
-            }
+        if let Some((thread, mutex)) = this.condvar_signal(id) {
+            post_cond_signal(this, thread, id, mutex)?;
         }
 
         Ok(0)
@@ -810,12 +808,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
         let this = self.eval_context_mut();
         let id = cond_get_id(this, cond_op)?;
 
-        while let Some((thread, lock)) = this.condvar_signal(id) {
-            if let CondvarLock::Mutex(mutex) = lock {
-                post_cond_signal(this, thread, mutex)?;
-            } else {
-                panic!("condvar should not have an rwlock on unix");
-            }
+        while let Some((thread, mutex)) = this.condvar_signal(id) {
+            post_cond_signal(this, thread, id, mutex)?;
         }
 
         Ok(0)
@@ -832,8 +826,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
         let mutex_id = mutex_get_id(this, mutex_op)?;
         let active_thread = this.get_active_thread();
 
-        release_cond_mutex_and_block(this, active_thread, mutex_id)?;
-        this.condvar_wait(id, active_thread, CondvarLock::Mutex(mutex_id));
+        release_cond_mutex_and_block(this, active_thread, id, mutex_id)?;
+        this.condvar_wait(id, active_thread, mutex_id);
 
         Ok(0)
     }
@@ -866,15 +860,15 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
 
         let timeout_time = if clock_id == this.eval_libc_i32("CLOCK_REALTIME") {
             this.check_no_isolation("`pthread_cond_timedwait` with `CLOCK_REALTIME`")?;
-            Time::RealTime(SystemTime::UNIX_EPOCH.checked_add(duration).unwrap())
+            CallbackTime::RealTime(SystemTime::UNIX_EPOCH.checked_add(duration).unwrap())
         } else if clock_id == this.eval_libc_i32("CLOCK_MONOTONIC") {
-            Time::Monotonic(this.machine.clock.anchor().checked_add(duration).unwrap())
+            CallbackTime::Monotonic(this.machine.clock.anchor().checked_add(duration).unwrap())
         } else {
             throw_unsup_format!("unsupported clock id: {}", clock_id);
         };
 
-        release_cond_mutex_and_block(this, active_thread, mutex_id)?;
-        this.condvar_wait(id, active_thread, CondvarLock::Mutex(mutex_id));
+        release_cond_mutex_and_block(this, active_thread, id, mutex_id)?;
+        this.condvar_wait(id, active_thread, mutex_id);
 
         // We return success for now and override it in the timeout callback.
         this.write_scalar(Scalar::from_i32(0), dest)?;
@@ -897,7 +891,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
             fn call(&self, ecx: &mut MiriInterpCx<'mir, 'tcx>) -> InterpResult<'tcx> {
                 // We are not waiting for the condvar any more, wait for the
                 // mutex instead.
-                reacquire_cond_mutex(ecx, self.active_thread, self.mutex_id)?;
+                reacquire_cond_mutex(ecx, self.active_thread, self.id, self.mutex_id)?;
 
                 // Remove the thread from the conditional variable.
                 ecx.condvar_remove_waiter(self.id, self.active_thread);
diff --git a/src/tools/miri/src/shims/windows/foreign_items.rs b/src/tools/miri/src/shims/windows/foreign_items.rs
index de80df3c80d6c..ec4c61014877e 100644
--- a/src/tools/miri/src/shims/windows/foreign_items.rs
+++ b/src/tools/miri/src/shims/windows/foreign_items.rs
@@ -8,6 +8,7 @@ use rustc_span::Symbol;
 use rustc_target::abi::Size;
 use rustc_target::spec::abi::Abi;
 
+use crate::shims::alloc::EvalContextExt as _;
 use crate::shims::os_str::bytes_to_os_str;
 use crate::*;
 use shims::foreign_items::EmulateForeignItemResult;
diff --git a/src/tools/miri/src/shims/windows/sync.rs b/src/tools/miri/src/shims/windows/sync.rs
index f02939f888ec3..836b9e9259591 100644
--- a/src/tools/miri/src/shims/windows/sync.rs
+++ b/src/tools/miri/src/shims/windows/sync.rs
@@ -170,7 +170,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
             None
         } else {
             let duration = Duration::from_millis(timeout_ms.into());
-            Some(Time::Monotonic(this.machine.clock.now().checked_add(duration).unwrap()))
+            Some(CallbackTime::Monotonic(this.machine.clock.now().checked_add(duration).unwrap()))
         };
 
         // See the Linux futex implementation for why this fence exists.
@@ -183,7 +183,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
 
         if futex_val == compare_val {
             // If the values are the same, we have to block.
-            this.block_thread(thread);
+            this.block_thread(thread, BlockReason::Futex { addr });
             this.futex_wait(addr, thread, u32::MAX);
 
             if let Some(timeout_time) = timeout_time {
@@ -202,7 +202,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
 
                 impl<'mir, 'tcx: 'mir> MachineCallback<'mir, 'tcx> for Callback<'tcx> {
                     fn call(&self, this: &mut MiriInterpCx<'mir, 'tcx>) -> InterpResult<'tcx> {
-                        this.unblock_thread(self.thread);
+                        this.unblock_thread(self.thread, BlockReason::Futex { addr: self.addr });
                         this.futex_remove_waiter(self.addr, self.thread);
                         let error_timeout = this.eval_windows("c", "ERROR_TIMEOUT");
                         this.set_last_error(error_timeout)?;
@@ -233,8 +233,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
         // See the Linux futex implementation for why this fence exists.
         this.atomic_fence(AtomicFenceOrd::SeqCst)?;
 
-        if let Some(thread) = this.futex_wake(ptr.addr().bytes(), u32::MAX) {
-            this.unblock_thread(thread);
+        let addr = ptr.addr().bytes();
+        if let Some(thread) = this.futex_wake(addr, u32::MAX) {
+            this.unblock_thread(thread, BlockReason::Futex { addr });
             this.unregister_timeout_callback_if_exists(thread);
         }
 
@@ -248,8 +249,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
         // See the Linux futex implementation for why this fence exists.
         this.atomic_fence(AtomicFenceOrd::SeqCst)?;
 
-        while let Some(thread) = this.futex_wake(ptr.addr().bytes(), u32::MAX) {
-            this.unblock_thread(thread);
+        let addr = ptr.addr().bytes();
+        while let Some(thread) = this.futex_wake(addr, u32::MAX) {
+            this.unblock_thread(thread, BlockReason::Futex { addr });
             this.unregister_timeout_callback_if_exists(thread);
         }
 
diff --git a/src/tools/miri/test-cargo-miri/test.bin-target.stdout.ref b/src/tools/miri/test-cargo-miri/test.bin-target.stdout.ref
index 5264530160bc5..6f480259965b9 100644
--- a/src/tools/miri/test-cargo-miri/test.bin-target.stdout.ref
+++ b/src/tools/miri/test-cargo-miri/test.bin-target.stdout.ref
@@ -3,5 +3,5 @@ running 2 tests
 test test::dev_dependency ... ok
 test test::exported_symbol ... ok
 
-test result: ok. 2 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out
+test result: ok. 2 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in $TIME
 
diff --git a/src/tools/miri/test-cargo-miri/test.cross-target.stdout.ref b/src/tools/miri/test-cargo-miri/test.cross-target.stdout.ref
index 8c543e479f4e0..2ef124e4de8f7 100644
--- a/src/tools/miri/test-cargo-miri/test.cross-target.stdout.ref
+++ b/src/tools/miri/test-cargo-miri/test.cross-target.stdout.ref
@@ -1,11 +1,11 @@
 
 running 2 tests
 ..
-test result: ok. 2 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out
+test result: ok. 2 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in $TIME
 
 imported main
 
 running 6 tests
 ...i..
-test result: ok. 5 passed; 0 failed; 1 ignored; 0 measured; 0 filtered out
+test result: ok. 5 passed; 0 failed; 1 ignored; 0 measured; 0 filtered out; finished in $TIME
 
diff --git a/src/tools/miri/test-cargo-miri/test.default.stdout.ref b/src/tools/miri/test-cargo-miri/test.default.stdout.ref
index 922d2120bed0f..2d74d82f769ba 100644
--- a/src/tools/miri/test-cargo-miri/test.default.stdout.ref
+++ b/src/tools/miri/test-cargo-miri/test.default.stdout.ref
@@ -1,13 +1,13 @@
 
 running 2 tests
 ..
-test result: ok. 2 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out
+test result: ok. 2 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in $TIME
 
 imported main
 
 running 6 tests
 ...i..
-test result: ok. 5 passed; 0 failed; 1 ignored; 0 measured; 0 filtered out
+test result: ok. 5 passed; 0 failed; 1 ignored; 0 measured; 0 filtered out; finished in $TIME
 
 
 running 5 tests
diff --git a/src/tools/miri/test-cargo-miri/test.filter.cross-target.stdout.ref b/src/tools/miri/test-cargo-miri/test.filter.cross-target.stdout.ref
index bb0282d6c9167..59b4deb1ff32b 100644
--- a/src/tools/miri/test-cargo-miri/test.filter.cross-target.stdout.ref
+++ b/src/tools/miri/test-cargo-miri/test.filter.cross-target.stdout.ref
@@ -1,12 +1,12 @@
 
 running 0 tests
 
-test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 2 filtered out
+test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 2 filtered out; finished in $TIME
 
 imported main
 
 running 1 test
 test simple ... ok
 
-test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured; 5 filtered out
+test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured; 5 filtered out; finished in $TIME
 
diff --git a/src/tools/miri/test-cargo-miri/test.filter.stdout.ref b/src/tools/miri/test-cargo-miri/test.filter.stdout.ref
index 5c819dd532366..b68bc983276f7 100644
--- a/src/tools/miri/test-cargo-miri/test.filter.stdout.ref
+++ b/src/tools/miri/test-cargo-miri/test.filter.stdout.ref
@@ -1,14 +1,14 @@
 
 running 0 tests
 
-test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 2 filtered out
+test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 2 filtered out; finished in $TIME
 
 imported main
 
 running 1 test
 test simple ... ok
 
-test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured; 5 filtered out
+test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured; 5 filtered out; finished in $TIME
 
 
 running 0 tests
diff --git a/src/tools/miri/test-cargo-miri/test.subcrate.stdout.ref b/src/tools/miri/test-cargo-miri/test.subcrate.stdout.ref
index 67e5c7f8e920c..e50838ebc838b 100644
--- a/src/tools/miri/test-cargo-miri/test.subcrate.stdout.ref
+++ b/src/tools/miri/test-cargo-miri/test.subcrate.stdout.ref
@@ -1,6 +1,6 @@
 
 running 0 tests
 
-test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out
+test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in $TIME
 
 subcrate testing
diff --git a/src/tools/miri/test-cargo-miri/test.test-target.stdout.ref b/src/tools/miri/test-cargo-miri/test.test-target.stdout.ref
index dd59b32b780c8..38b3f5c0989e7 100644
--- a/src/tools/miri/test-cargo-miri/test.test-target.stdout.ref
+++ b/src/tools/miri/test-cargo-miri/test.test-target.stdout.ref
@@ -7,5 +7,5 @@ test does_not_work_on_miri ... ignored
 test fail_index_check - should panic ... ok
 test simple ... ok
 
-test result: ok. 5 passed; 0 failed; 1 ignored; 0 measured; 0 filtered out
+test result: ok. 5 passed; 0 failed; 1 ignored; 0 measured; 0 filtered out; finished in $TIME
 
diff --git a/src/tools/miri/tests/fail-dep/concurrency/windows_join_main.rs b/src/tools/miri/tests/fail-dep/concurrency/windows_join_main.rs
index 910e06222ee76..532bda201364f 100644
--- a/src/tools/miri/tests/fail-dep/concurrency/windows_join_main.rs
+++ b/src/tools/miri/tests/fail-dep/concurrency/windows_join_main.rs
@@ -1,6 +1,7 @@
 //@only-target-windows: Uses win32 api functions
 // We are making scheduler assumptions here.
 //@compile-flags: -Zmiri-preemption-rate=0
+//@error-in-other-file: deadlock
 
 // On windows, joining main is not UB, but it will block a thread forever.
 
diff --git a/src/tools/miri/tests/fail-dep/concurrency/windows_join_main.stderr b/src/tools/miri/tests/fail-dep/concurrency/windows_join_main.stderr
index d9137ee74376c..12f35fdeb02a7 100644
--- a/src/tools/miri/tests/fail-dep/concurrency/windows_join_main.stderr
+++ b/src/tools/miri/tests/fail-dep/concurrency/windows_join_main.stderr
@@ -8,7 +8,28 @@ LL |             assert_eq!(WaitForSingleObject(MAIN_THREAD, INFINITE), WAIT_OBJ
    = note: inside closure at RUSTLIB/core/src/macros/mod.rs:LL:CC
    = note: this error originates in the macro `assert_eq` (in Nightly builds, run with -Z macro-backtrace for more info)
 
+error: deadlock: the evaluated program deadlocked
+  --> RUSTLIB/std/src/sys/pal/PLATFORM/thread.rs:LL:CC
+   |
+LL |         let rc = unsafe { c::WaitForSingleObject(self.handle.as_raw_handle(), c::INFINITE) };
+   |                                                                                          ^ the evaluated program deadlocked
+   |
+   = note: BACKTRACE:
+   = note: inside `std::sys::pal::PLATFORM::thread::Thread::join` at RUSTLIB/std/src/sys/pal/PLATFORM/thread.rs:LL:CC
+   = note: inside `std::thread::JoinInner::<'_, ()>::join` at RUSTLIB/std/src/thread/mod.rs:LL:CC
+   = note: inside `std::thread::JoinHandle::<()>::join` at RUSTLIB/std/src/thread/mod.rs:LL:CC
+note: inside `main`
+  --> $DIR/windows_join_main.rs:LL:CC
+   |
+LL | /     thread::spawn(|| {
+LL | |         unsafe {
+LL | |             assert_eq!(WaitForSingleObject(MAIN_THREAD, INFINITE), WAIT_OBJECT_0);
+LL | |         }
+LL | |     })
+LL | |     .join()
+   | |___________^
+
 note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
 
-error: aborting due to 1 previous error
+error: aborting due to 2 previous errors
 
diff --git a/src/tools/miri/tests/fail-dep/concurrency/windows_join_self.rs b/src/tools/miri/tests/fail-dep/concurrency/windows_join_self.rs
index a7c8faf5a98ba..a64265ca0ca5e 100644
--- a/src/tools/miri/tests/fail-dep/concurrency/windows_join_self.rs
+++ b/src/tools/miri/tests/fail-dep/concurrency/windows_join_self.rs
@@ -1,6 +1,7 @@
 //@only-target-windows: Uses win32 api functions
 // We are making scheduler assumptions here.
 //@compile-flags: -Zmiri-preemption-rate=0
+//@error-in-other-file: deadlock
 
 // On windows, a thread joining itself is not UB, but it will deadlock.
 
diff --git a/src/tools/miri/tests/fail-dep/concurrency/windows_join_self.stderr b/src/tools/miri/tests/fail-dep/concurrency/windows_join_self.stderr
index 74699a0317fff..8d26c35de8ab2 100644
--- a/src/tools/miri/tests/fail-dep/concurrency/windows_join_self.stderr
+++ b/src/tools/miri/tests/fail-dep/concurrency/windows_join_self.stderr
@@ -7,7 +7,29 @@ LL |             assert_eq!(WaitForSingleObject(native, INFINITE), WAIT_OBJECT_0
    = note: BACKTRACE on thread `unnamed-ID`:
    = note: inside closure at $DIR/windows_join_self.rs:LL:CC
 
+error: deadlock: the evaluated program deadlocked
+  --> RUSTLIB/std/src/sys/pal/PLATFORM/thread.rs:LL:CC
+   |
+LL |         let rc = unsafe { c::WaitForSingleObject(self.handle.as_raw_handle(), c::INFINITE) };
+   |                                                                                          ^ the evaluated program deadlocked
+   |
+   = note: BACKTRACE:
+   = note: inside `std::sys::pal::PLATFORM::thread::Thread::join` at RUSTLIB/std/src/sys/pal/PLATFORM/thread.rs:LL:CC
+   = note: inside `std::thread::JoinInner::<'_, ()>::join` at RUSTLIB/std/src/thread/mod.rs:LL:CC
+   = note: inside `std::thread::JoinHandle::<()>::join` at RUSTLIB/std/src/thread/mod.rs:LL:CC
+note: inside `main`
+  --> $DIR/windows_join_self.rs:LL:CC
+   |
+LL | /     thread::spawn(|| {
+LL | |         unsafe {
+LL | |             let native = GetCurrentThread();
+LL | |             assert_eq!(WaitForSingleObject(native, INFINITE), WAIT_OBJECT_0);
+LL | |         }
+LL | |     })
+LL | |     .join()
+   | |___________^
+
 note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
 
-error: aborting due to 1 previous error
+error: aborting due to 2 previous errors
 
diff --git a/src/tools/miri/tests/fail-dep/realloc-zero.rs b/src/tools/miri/tests/fail-dep/realloc-zero.rs
new file mode 100644
index 0000000000000..1482798e90c08
--- /dev/null
+++ b/src/tools/miri/tests/fail-dep/realloc-zero.rs
@@ -0,0 +1,10 @@
+//@ignore-target-windows: No libc on Windows
+
+fn main() {
+    unsafe {
+        let p1 = libc::malloc(20);
+        // C made this UB...
+        let p2 = libc::realloc(p1, 0); //~ERROR: `realloc` with a size of zero
+        assert!(p2.is_null());
+    }
+}
diff --git a/src/tools/miri/tests/fail-dep/realloc-zero.stderr b/src/tools/miri/tests/fail-dep/realloc-zero.stderr
new file mode 100644
index 0000000000000..749a61f7396e7
--- /dev/null
+++ b/src/tools/miri/tests/fail-dep/realloc-zero.stderr
@@ -0,0 +1,15 @@
+error: Undefined Behavior: `realloc` with a size of zero
+  --> $DIR/realloc-zero.rs:LL:CC
+   |
+LL |         let p2 = libc::realloc(p1, 0);
+   |                  ^^^^^^^^^^^^^^^^^^^^ `realloc` with a size of zero
+   |
+   = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
+   = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information
+   = note: BACKTRACE:
+   = note: inside `main` at $DIR/realloc-zero.rs:LL:CC
+
+note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
+
+error: aborting due to 1 previous error
+
diff --git a/src/tools/miri/tests/fail-dep/shims/sync/libc_pthread_mutex_deadlock.rs b/src/tools/miri/tests/fail-dep/shims/sync/libc_pthread_mutex_deadlock.rs
index 6c3cb738e2997..60d56d41fd986 100644
--- a/src/tools/miri/tests/fail-dep/shims/sync/libc_pthread_mutex_deadlock.rs
+++ b/src/tools/miri/tests/fail-dep/shims/sync/libc_pthread_mutex_deadlock.rs
@@ -1,4 +1,5 @@
 //@ignore-target-windows: No libc on Windows
+//@error-in-other-file: deadlock
 
 use std::cell::UnsafeCell;
 use std::sync::Arc;
diff --git a/src/tools/miri/tests/fail-dep/shims/sync/libc_pthread_mutex_deadlock.stderr b/src/tools/miri/tests/fail-dep/shims/sync/libc_pthread_mutex_deadlock.stderr
index 76b1d26bd3326..987d0fc4c2d04 100644
--- a/src/tools/miri/tests/fail-dep/shims/sync/libc_pthread_mutex_deadlock.stderr
+++ b/src/tools/miri/tests/fail-dep/shims/sync/libc_pthread_mutex_deadlock.stderr
@@ -7,7 +7,26 @@ LL |             assert_eq!(libc::pthread_mutex_lock(lock_copy.0.get() as *mut _
    = note: BACKTRACE on thread `unnamed-ID`:
    = note: inside closure at $DIR/libc_pthread_mutex_deadlock.rs:LL:CC
 
+error: deadlock: the evaluated program deadlocked
+  --> RUSTLIB/std/src/sys/pal/PLATFORM/thread.rs:LL:CC
+   |
+LL |             let ret = libc::pthread_join(self.id, ptr::null_mut());
+   |                                                                  ^ the evaluated program deadlocked
+   |
+   = note: BACKTRACE:
+   = note: inside `std::sys::pal::PLATFORM::thread::Thread::join` at RUSTLIB/std/src/sys/pal/PLATFORM/thread.rs:LL:CC
+   = note: inside `std::thread::JoinInner::<'_, ()>::join` at RUSTLIB/std/src/thread/mod.rs:LL:CC
+   = note: inside `std::thread::JoinHandle::<()>::join` at RUSTLIB/std/src/thread/mod.rs:LL:CC
+note: inside `main`
+  --> $DIR/libc_pthread_mutex_deadlock.rs:LL:CC
+   |
+LL | /         thread::spawn(move || {
+LL | |             assert_eq!(libc::pthread_mutex_lock(lock_copy.0.get() as *mut _), 0);
+LL | |         })
+LL | |         .join()
+   | |_______________^
+
 note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
 
-error: aborting due to 1 previous error
+error: aborting due to 2 previous errors
 
diff --git a/src/tools/miri/tests/fail-dep/shims/sync/libc_pthread_rwlock_write_read_deadlock.rs b/src/tools/miri/tests/fail-dep/shims/sync/libc_pthread_rwlock_write_read_deadlock.rs
index 201844615e182..0f02c3231a688 100644
--- a/src/tools/miri/tests/fail-dep/shims/sync/libc_pthread_rwlock_write_read_deadlock.rs
+++ b/src/tools/miri/tests/fail-dep/shims/sync/libc_pthread_rwlock_write_read_deadlock.rs
@@ -1,4 +1,5 @@
 //@ignore-target-windows: No libc on Windows
+//@error-in-other-file: deadlock
 
 use std::cell::UnsafeCell;
 use std::sync::Arc;
diff --git a/src/tools/miri/tests/fail-dep/shims/sync/libc_pthread_rwlock_write_read_deadlock.stderr b/src/tools/miri/tests/fail-dep/shims/sync/libc_pthread_rwlock_write_read_deadlock.stderr
index 5501dab81aca3..bc9b15f293ef8 100644
--- a/src/tools/miri/tests/fail-dep/shims/sync/libc_pthread_rwlock_write_read_deadlock.stderr
+++ b/src/tools/miri/tests/fail-dep/shims/sync/libc_pthread_rwlock_write_read_deadlock.stderr
@@ -7,7 +7,26 @@ LL |             assert_eq!(libc::pthread_rwlock_wrlock(lock_copy.0.get() as *mu
    = note: BACKTRACE on thread `unnamed-ID`:
    = note: inside closure at $DIR/libc_pthread_rwlock_write_read_deadlock.rs:LL:CC
 
+error: deadlock: the evaluated program deadlocked
+  --> RUSTLIB/std/src/sys/pal/PLATFORM/thread.rs:LL:CC
+   |
+LL |             let ret = libc::pthread_join(self.id, ptr::null_mut());
+   |                                                                  ^ the evaluated program deadlocked
+   |
+   = note: BACKTRACE:
+   = note: inside `std::sys::pal::PLATFORM::thread::Thread::join` at RUSTLIB/std/src/sys/pal/PLATFORM/thread.rs:LL:CC
+   = note: inside `std::thread::JoinInner::<'_, ()>::join` at RUSTLIB/std/src/thread/mod.rs:LL:CC
+   = note: inside `std::thread::JoinHandle::<()>::join` at RUSTLIB/std/src/thread/mod.rs:LL:CC
+note: inside `main`
+  --> $DIR/libc_pthread_rwlock_write_read_deadlock.rs:LL:CC
+   |
+LL | /         thread::spawn(move || {
+LL | |             assert_eq!(libc::pthread_rwlock_wrlock(lock_copy.0.get() as *mut _), 0);
+LL | |         })
+LL | |         .join()
+   | |_______________^
+
 note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
 
-error: aborting due to 1 previous error
+error: aborting due to 2 previous errors
 
diff --git a/src/tools/miri/tests/fail-dep/shims/sync/libc_pthread_rwlock_write_write_deadlock.rs b/src/tools/miri/tests/fail-dep/shims/sync/libc_pthread_rwlock_write_write_deadlock.rs
index b1d7e0492e5a2..10be5b3375230 100644
--- a/src/tools/miri/tests/fail-dep/shims/sync/libc_pthread_rwlock_write_write_deadlock.rs
+++ b/src/tools/miri/tests/fail-dep/shims/sync/libc_pthread_rwlock_write_write_deadlock.rs
@@ -1,4 +1,5 @@
 //@ignore-target-windows: No libc on Windows
+//@error-in-other-file: deadlock
 
 use std::cell::UnsafeCell;
 use std::sync::Arc;
diff --git a/src/tools/miri/tests/fail-dep/shims/sync/libc_pthread_rwlock_write_write_deadlock.stderr b/src/tools/miri/tests/fail-dep/shims/sync/libc_pthread_rwlock_write_write_deadlock.stderr
index 815d85af502c9..66c142bbc5c80 100644
--- a/src/tools/miri/tests/fail-dep/shims/sync/libc_pthread_rwlock_write_write_deadlock.stderr
+++ b/src/tools/miri/tests/fail-dep/shims/sync/libc_pthread_rwlock_write_write_deadlock.stderr
@@ -7,7 +7,26 @@ LL |             assert_eq!(libc::pthread_rwlock_wrlock(lock_copy.0.get() as *mu
    = note: BACKTRACE on thread `unnamed-ID`:
    = note: inside closure at $DIR/libc_pthread_rwlock_write_write_deadlock.rs:LL:CC
 
+error: deadlock: the evaluated program deadlocked
+  --> RUSTLIB/std/src/sys/pal/PLATFORM/thread.rs:LL:CC
+   |
+LL |             let ret = libc::pthread_join(self.id, ptr::null_mut());
+   |                                                                  ^ the evaluated program deadlocked
+   |
+   = note: BACKTRACE:
+   = note: inside `std::sys::pal::PLATFORM::thread::Thread::join` at RUSTLIB/std/src/sys/pal/PLATFORM/thread.rs:LL:CC
+   = note: inside `std::thread::JoinInner::<'_, ()>::join` at RUSTLIB/std/src/thread/mod.rs:LL:CC
+   = note: inside `std::thread::JoinHandle::<()>::join` at RUSTLIB/std/src/thread/mod.rs:LL:CC
+note: inside `main`
+  --> $DIR/libc_pthread_rwlock_write_write_deadlock.rs:LL:CC
+   |
+LL | /         thread::spawn(move || {
+LL | |             assert_eq!(libc::pthread_rwlock_wrlock(lock_copy.0.get() as *mut _), 0);
+LL | |         })
+LL | |         .join()
+   | |_______________^
+
 note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
 
-error: aborting due to 1 previous error
+error: aborting due to 2 previous errors
 
diff --git a/src/tools/miri/tests/fail/alloc/alloc_error_handler.rs b/src/tools/miri/tests/fail/alloc/alloc_error_handler.rs
new file mode 100644
index 0000000000000..2097126e16b9b
--- /dev/null
+++ b/src/tools/miri/tests/fail/alloc/alloc_error_handler.rs
@@ -0,0 +1,10 @@
+//@error-in-other-file: aborted
+//@normalize-stderr-test: "unsafe \{ libc::abort\(\) \}|crate::intrinsics::abort\(\);" -> "ABORT();"
+//@normalize-stderr-test: "\| +\^+" -> "| ^"
+#![feature(allocator_api)]
+
+use std::alloc::*;
+
+fn main() {
+    handle_alloc_error(Layout::for_value(&0));
+}
diff --git a/src/tools/miri/tests/fail/alloc/alloc_error_handler.stderr b/src/tools/miri/tests/fail/alloc/alloc_error_handler.stderr
new file mode 100644
index 0000000000000..d1731a0f4206f
--- /dev/null
+++ b/src/tools/miri/tests/fail/alloc/alloc_error_handler.stderr
@@ -0,0 +1,24 @@
+memory allocation of 4 bytes failed
+error: abnormal termination: the program aborted execution
+  --> RUSTLIB/std/src/sys/pal/PLATFORM/mod.rs:LL:CC
+   |
+LL |     ABORT();
+   | ^ the program aborted execution
+   |
+   = note: BACKTRACE:
+   = note: inside `std::sys::pal::PLATFORM::abort_internal` at RUSTLIB/std/src/sys/pal/PLATFORM/mod.rs:LL:CC
+   = note: inside `std::process::abort` at RUSTLIB/std/src/process.rs:LL:CC
+   = note: inside `std::alloc::rust_oom` at RUSTLIB/std/src/alloc.rs:LL:CC
+   = note: inside `std::alloc::_::__rg_oom` at RUSTLIB/std/src/alloc.rs:LL:CC
+   = note: inside `std::alloc::handle_alloc_error::rt_error` at RUSTLIB/alloc/src/alloc.rs:LL:CC
+   = note: inside `std::alloc::handle_alloc_error` at RUSTLIB/alloc/src/alloc.rs:LL:CC
+note: inside `main`
+  --> $DIR/alloc_error_handler.rs:LL:CC
+   |
+LL |     handle_alloc_error(Layout::for_value(&0));
+   | ^
+
+note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
+
+error: aborting due to 1 previous error
+
diff --git a/src/tools/miri/tests/fail/alloc/alloc_error_handler_custom.rs b/src/tools/miri/tests/fail/alloc/alloc_error_handler_custom.rs
new file mode 100644
index 0000000000000..babdb73f093a1
--- /dev/null
+++ b/src/tools/miri/tests/fail/alloc/alloc_error_handler_custom.rs
@@ -0,0 +1,49 @@
+//@compile-flags: -Cpanic=abort
+#![feature(start, core_intrinsics)]
+#![feature(alloc_error_handler)]
+#![feature(allocator_api)]
+#![no_std]
+
+extern crate alloc;
+
+use alloc::alloc::*;
+use core::fmt::Write;
+
+#[path = "../../utils/mod.no_std.rs"]
+mod utils;
+
+#[alloc_error_handler]
+fn alloc_error_handler(layout: Layout) -> ! {
+    let _ = writeln!(utils::MiriStderr, "custom alloc error handler: {layout:?}");
+    core::intrinsics::abort(); //~ERROR: aborted
+}
+
+// rustc requires us to provide some more things that aren't actually used by this test
+mod plumbing {
+    use super::*;
+
+    #[panic_handler]
+    fn panic_handler(_: &core::panic::PanicInfo) -> ! {
+        loop {}
+    }
+
+    struct NoAlloc;
+
+    unsafe impl GlobalAlloc for NoAlloc {
+        unsafe fn alloc(&self, _: Layout) -> *mut u8 {
+            unreachable!();
+        }
+
+        unsafe fn dealloc(&self, _: *mut u8, _: Layout) {
+            unreachable!();
+        }
+    }
+
+    #[global_allocator]
+    static GLOBAL: NoAlloc = NoAlloc;
+}
+
+#[start]
+fn start(_: isize, _: *const *const u8) -> isize {
+    handle_alloc_error(Layout::for_value(&0));
+}
diff --git a/src/tools/miri/tests/fail/alloc/alloc_error_handler_custom.stderr b/src/tools/miri/tests/fail/alloc/alloc_error_handler_custom.stderr
new file mode 100644
index 0000000000000..5d9c2e2fb4c38
--- /dev/null
+++ b/src/tools/miri/tests/fail/alloc/alloc_error_handler_custom.stderr
@@ -0,0 +1,27 @@
+custom alloc error handler: Layout { size: 4, align: 4 (1 << 2) }
+error: abnormal termination: the program aborted execution
+  --> $DIR/alloc_error_handler_custom.rs:LL:CC
+   |
+LL |     core::intrinsics::abort();
+   |     ^^^^^^^^^^^^^^^^^^^^^^^^^ the program aborted execution
+   |
+   = note: BACKTRACE:
+   = note: inside `alloc_error_handler` at $DIR/alloc_error_handler_custom.rs:LL:CC
+note: inside `_::__rg_oom`
+  --> $DIR/alloc_error_handler_custom.rs:LL:CC
+   |
+LL | #[alloc_error_handler]
+   | ---------------------- in this procedural macro expansion
+LL | fn alloc_error_handler(layout: Layout) -> ! {
+   | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+   = note: inside `alloc::alloc::handle_alloc_error::rt_error` at RUSTLIB/alloc/src/alloc.rs:LL:CC
+   = note: inside `alloc::alloc::handle_alloc_error` at RUSTLIB/alloc/src/alloc.rs:LL:CC
+note: inside `start`
+  --> $DIR/alloc_error_handler_custom.rs:LL:CC
+   |
+LL |     handle_alloc_error(Layout::for_value(&0));
+   |     ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+   = note: this error originates in the attribute macro `alloc_error_handler` (in Nightly builds, run with -Z macro-backtrace for more info)
+
+error: aborting due to 1 previous error
+
diff --git a/src/tools/miri/tests/fail/alloc/alloc_error_handler_no_std.rs b/src/tools/miri/tests/fail/alloc/alloc_error_handler_no_std.rs
new file mode 100644
index 0000000000000..18a8a61f22f65
--- /dev/null
+++ b/src/tools/miri/tests/fail/alloc/alloc_error_handler_no_std.rs
@@ -0,0 +1,47 @@
+//@compile-flags: -Cpanic=abort
+#![feature(start, core_intrinsics)]
+#![feature(alloc_error_handler)]
+#![feature(allocator_api)]
+#![no_std]
+
+extern crate alloc;
+
+use alloc::alloc::*;
+use core::fmt::Write;
+
+#[path = "../../utils/mod.no_std.rs"]
+mod utils;
+
+// The default no_std alloc_error_handler is a panic.
+
+#[panic_handler]
+fn panic_handler(panic_info: &core::panic::PanicInfo) -> ! {
+    let _ = writeln!(utils::MiriStderr, "custom panic handler called!");
+    let _ = writeln!(utils::MiriStderr, "{panic_info}");
+    core::intrinsics::abort(); //~ERROR: aborted
+}
+
+// rustc requires us to provide some more things that aren't actually used by this test
+mod plumbing {
+    use super::*;
+
+    struct NoAlloc;
+
+    unsafe impl GlobalAlloc for NoAlloc {
+        unsafe fn alloc(&self, _: Layout) -> *mut u8 {
+            unreachable!();
+        }
+
+        unsafe fn dealloc(&self, _: *mut u8, _: Layout) {
+            unreachable!();
+        }
+    }
+
+    #[global_allocator]
+    static GLOBAL: NoAlloc = NoAlloc;
+}
+
+#[start]
+fn start(_: isize, _: *const *const u8) -> isize {
+    handle_alloc_error(Layout::for_value(&0));
+}
diff --git a/src/tools/miri/tests/fail/alloc/alloc_error_handler_no_std.stderr b/src/tools/miri/tests/fail/alloc/alloc_error_handler_no_std.stderr
new file mode 100644
index 0000000000000..6b98f6f75d853
--- /dev/null
+++ b/src/tools/miri/tests/fail/alloc/alloc_error_handler_no_std.stderr
@@ -0,0 +1,24 @@
+custom panic handler called!
+panicked at RUSTLIB/alloc/src/alloc.rs:LL:CC:
+memory allocation of 4 bytes failed
+error: abnormal termination: the program aborted execution
+  --> $DIR/alloc_error_handler_no_std.rs:LL:CC
+   |
+LL |     core::intrinsics::abort();
+   |     ^^^^^^^^^^^^^^^^^^^^^^^^^ the program aborted execution
+   |
+   = note: BACKTRACE:
+   = note: inside `panic_handler` at $DIR/alloc_error_handler_no_std.rs:LL:CC
+   = note: inside `alloc::alloc::__alloc_error_handler::__rdl_oom` at RUSTLIB/alloc/src/alloc.rs:LL:CC
+   = note: inside `alloc::alloc::handle_alloc_error::rt_error` at RUSTLIB/alloc/src/alloc.rs:LL:CC
+   = note: inside `alloc::alloc::handle_alloc_error` at RUSTLIB/alloc/src/alloc.rs:LL:CC
+note: inside `start`
+  --> $DIR/alloc_error_handler_no_std.rs:LL:CC
+   |
+LL |     handle_alloc_error(Layout::for_value(&0));
+   |     ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
+
+error: aborting due to 1 previous error
+
diff --git a/src/tools/miri/tests/fail/both_borrows/retag_data_race_write.rs b/src/tools/miri/tests/fail/both_borrows/retag_data_race_write.rs
index 3edaf10f3dc69..0061679eaa4e7 100644
--- a/src/tools/miri/tests/fail/both_borrows/retag_data_race_write.rs
+++ b/src/tools/miri/tests/fail/both_borrows/retag_data_race_write.rs
@@ -1,6 +1,8 @@
 //! Make sure that a retag acts like a write for the data race model.
 //@revisions: stack tree
 //@compile-flags: -Zmiri-preemption-rate=0
+// Avoid accidental synchronization via address reuse inside `thread::spawn`.
+//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
 //@[tree]compile-flags: -Zmiri-tree-borrows
 #[derive(Copy, Clone)]
 struct SendPtr(*mut u8);
diff --git a/src/tools/miri/tests/fail/data_race/alloc_read_race.rs b/src/tools/miri/tests/fail/data_race/alloc_read_race.rs
index 2cf366069073a..c85c0ebe24451 100644
--- a/src/tools/miri/tests/fail/data_race/alloc_read_race.rs
+++ b/src/tools/miri/tests/fail/data_race/alloc_read_race.rs
@@ -1,4 +1,6 @@
 //@compile-flags: -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
+// Avoid accidental synchronization via address reuse inside `thread::spawn`.
+//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
 #![feature(new_uninit)]
 
 use std::mem::MaybeUninit;
diff --git a/src/tools/miri/tests/fail/data_race/alloc_write_race.rs b/src/tools/miri/tests/fail/data_race/alloc_write_race.rs
index e95e0e1a841d6..9e2a430dd94f5 100644
--- a/src/tools/miri/tests/fail/data_race/alloc_write_race.rs
+++ b/src/tools/miri/tests/fail/data_race/alloc_write_race.rs
@@ -1,4 +1,6 @@
 //@compile-flags: -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
+// Avoid accidental synchronization via address reuse inside `thread::spawn`.
+//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
 #![feature(new_uninit)]
 
 use std::ptr::null_mut;
diff --git a/src/tools/miri/tests/fail/data_race/atomic_read_na_write_race1.rs b/src/tools/miri/tests/fail/data_race/atomic_read_na_write_race1.rs
index a256267bcda09..4003892f0a609 100644
--- a/src/tools/miri/tests/fail/data_race/atomic_read_na_write_race1.rs
+++ b/src/tools/miri/tests/fail/data_race/atomic_read_na_write_race1.rs
@@ -1,5 +1,7 @@
 // We want to control preemption here. Stacked borrows interferes by having its own accesses.
 //@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
+// Avoid accidental synchronization via address reuse inside `thread::spawn`.
+//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
 
 use std::sync::atomic::{AtomicUsize, Ordering};
 use std::thread::spawn;
diff --git a/src/tools/miri/tests/fail/data_race/atomic_read_na_write_race2.rs b/src/tools/miri/tests/fail/data_race/atomic_read_na_write_race2.rs
index cc6a0742c23eb..8bceba9380adf 100644
--- a/src/tools/miri/tests/fail/data_race/atomic_read_na_write_race2.rs
+++ b/src/tools/miri/tests/fail/data_race/atomic_read_na_write_race2.rs
@@ -1,5 +1,7 @@
 // We want to control preemption here. Stacked borrows interferes by having its own accesses.
 //@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
+// Avoid accidental synchronization via address reuse inside `thread::spawn`.
+//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
 
 use std::sync::atomic::AtomicUsize;
 use std::sync::atomic::Ordering;
diff --git a/src/tools/miri/tests/fail/data_race/atomic_write_na_read_race1.rs b/src/tools/miri/tests/fail/data_race/atomic_write_na_read_race1.rs
index 7392781e6c6b1..1a2746a26f4d8 100644
--- a/src/tools/miri/tests/fail/data_race/atomic_write_na_read_race1.rs
+++ b/src/tools/miri/tests/fail/data_race/atomic_write_na_read_race1.rs
@@ -1,5 +1,7 @@
 // We want to control preemption here. Stacked borrows interferes by having its own accesses.
 //@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
+// Avoid accidental synchronization via address reuse inside `thread::spawn`.
+//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
 
 use std::sync::atomic::AtomicUsize;
 use std::sync::atomic::Ordering;
diff --git a/src/tools/miri/tests/fail/data_race/atomic_write_na_read_race2.rs b/src/tools/miri/tests/fail/data_race/atomic_write_na_read_race2.rs
index f681ce0c051ad..e0876a93fdd8c 100644
--- a/src/tools/miri/tests/fail/data_race/atomic_write_na_read_race2.rs
+++ b/src/tools/miri/tests/fail/data_race/atomic_write_na_read_race2.rs
@@ -1,5 +1,7 @@
 // We want to control preemption here. Stacked borrows interferes by having its own accesses.
 //@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
+// Avoid accidental synchronization via address reuse inside `thread::spawn`.
+//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
 
 use std::sync::atomic::{AtomicUsize, Ordering};
 use std::thread::spawn;
diff --git a/src/tools/miri/tests/fail/data_race/atomic_write_na_write_race1.rs b/src/tools/miri/tests/fail/data_race/atomic_write_na_write_race1.rs
index 47a3ef5d16897..1010216a4976b 100644
--- a/src/tools/miri/tests/fail/data_race/atomic_write_na_write_race1.rs
+++ b/src/tools/miri/tests/fail/data_race/atomic_write_na_write_race1.rs
@@ -1,5 +1,7 @@
 // We want to control preemption here. Stacked borrows interferes by having its own accesses.
 //@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
+// Avoid accidental synchronization via address reuse inside `thread::spawn`.
+//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
 
 use std::sync::atomic::{AtomicUsize, Ordering};
 use std::thread::spawn;
diff --git a/src/tools/miri/tests/fail/data_race/atomic_write_na_write_race2.rs b/src/tools/miri/tests/fail/data_race/atomic_write_na_write_race2.rs
index 8bba4a88924e6..b494bd3a003d3 100644
--- a/src/tools/miri/tests/fail/data_race/atomic_write_na_write_race2.rs
+++ b/src/tools/miri/tests/fail/data_race/atomic_write_na_write_race2.rs
@@ -1,5 +1,7 @@
 // We want to control preemption here. Stacked borrows interferes by having its own accesses.
 //@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
+// Avoid accidental synchronization via address reuse inside `thread::spawn`.
+//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
 
 use std::sync::atomic::AtomicUsize;
 use std::sync::atomic::Ordering;
diff --git a/src/tools/miri/tests/fail/data_race/dangling_thread_async_race.rs b/src/tools/miri/tests/fail/data_race/dangling_thread_async_race.rs
index 5b9005606e0e7..dffafe3cfaa97 100644
--- a/src/tools/miri/tests/fail/data_race/dangling_thread_async_race.rs
+++ b/src/tools/miri/tests/fail/data_race/dangling_thread_async_race.rs
@@ -1,5 +1,7 @@
 // We want to control preemption here. Stacked borrows interferes by having its own accesses.
 //@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
+// Avoid accidental synchronization via address reuse.
+//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
 
 use std::mem;
 use std::thread::{sleep, spawn};
diff --git a/src/tools/miri/tests/fail/data_race/dangling_thread_race.rs b/src/tools/miri/tests/fail/data_race/dangling_thread_race.rs
index 91c1191e03636..8dc35c7ea720e 100644
--- a/src/tools/miri/tests/fail/data_race/dangling_thread_race.rs
+++ b/src/tools/miri/tests/fail/data_race/dangling_thread_race.rs
@@ -1,5 +1,7 @@
 // We want to control preemption here. Stacked borrows interferes by having its own accesses.
 //@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
+// Avoid accidental synchronization via address reuse.
+//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
 
 use std::mem;
 use std::thread::{sleep, spawn};
diff --git a/src/tools/miri/tests/fail/data_race/dealloc_read_race1.rs b/src/tools/miri/tests/fail/data_race/dealloc_read_race1.rs
index 5928e47176050..f174909e9d56b 100644
--- a/src/tools/miri/tests/fail/data_race/dealloc_read_race1.rs
+++ b/src/tools/miri/tests/fail/data_race/dealloc_read_race1.rs
@@ -1,5 +1,7 @@
 // We want to control preemption here. Stacked borrows interferes by having its own accesses.
 //@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
+// Avoid accidental synchronization via address reuse inside `thread::spawn`.
+//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
 
 use std::thread::spawn;
 
diff --git a/src/tools/miri/tests/fail/data_race/dealloc_read_race2.rs b/src/tools/miri/tests/fail/data_race/dealloc_read_race2.rs
index c5f82cc9a74e5..1edfbf5e61c0d 100644
--- a/src/tools/miri/tests/fail/data_race/dealloc_read_race2.rs
+++ b/src/tools/miri/tests/fail/data_race/dealloc_read_race2.rs
@@ -1,5 +1,7 @@
 // We want to control preemption here. Stacked borrows interferes by having its own accesses.
 //@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
+// Avoid accidental synchronization via address reuse inside `thread::spawn`.
+//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
 
 use std::thread::spawn;
 
diff --git a/src/tools/miri/tests/fail/data_race/dealloc_read_race_stack.rs b/src/tools/miri/tests/fail/data_race/dealloc_read_race_stack.rs
index 1095f1e4e82fa..c67e03d362b05 100644
--- a/src/tools/miri/tests/fail/data_race/dealloc_read_race_stack.rs
+++ b/src/tools/miri/tests/fail/data_race/dealloc_read_race_stack.rs
@@ -1,4 +1,6 @@
 //@compile-flags: -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
+// Avoid accidental synchronization via address reuse inside `thread::spawn`.
+//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
 
 use std::ptr::null_mut;
 use std::sync::atomic::{AtomicPtr, Ordering};
diff --git a/src/tools/miri/tests/fail/data_race/dealloc_write_race1.rs b/src/tools/miri/tests/fail/data_race/dealloc_write_race1.rs
index b5911e5111b3d..7605f1911db77 100644
--- a/src/tools/miri/tests/fail/data_race/dealloc_write_race1.rs
+++ b/src/tools/miri/tests/fail/data_race/dealloc_write_race1.rs
@@ -1,5 +1,7 @@
 // We want to control preemption here. Stacked borrows interferes by having its own accesses.
 //@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
+// Avoid accidental synchronization via address reuse inside `thread::spawn`.
+//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
 
 use std::thread::spawn;
 
diff --git a/src/tools/miri/tests/fail/data_race/dealloc_write_race2.rs b/src/tools/miri/tests/fail/data_race/dealloc_write_race2.rs
index 7a2c882f7ecc4..4f3819bd636b2 100644
--- a/src/tools/miri/tests/fail/data_race/dealloc_write_race2.rs
+++ b/src/tools/miri/tests/fail/data_race/dealloc_write_race2.rs
@@ -1,5 +1,7 @@
 // We want to control preemption here. Stacked borrows interferes by having its own accesses.
 //@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
+// Avoid accidental synchronization via address reuse inside `thread::spawn`.
+//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
 
 use std::thread::spawn;
 
diff --git a/src/tools/miri/tests/fail/data_race/dealloc_write_race_stack.rs b/src/tools/miri/tests/fail/data_race/dealloc_write_race_stack.rs
index 5ee4cc04a8fb0..8e63bc1dc7b41 100644
--- a/src/tools/miri/tests/fail/data_race/dealloc_write_race_stack.rs
+++ b/src/tools/miri/tests/fail/data_race/dealloc_write_race_stack.rs
@@ -1,4 +1,6 @@
 //@compile-flags: -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
+// Avoid accidental synchronization via address reuse inside `thread::spawn`.
+//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
 
 use std::ptr::null_mut;
 use std::sync::atomic::{AtomicPtr, Ordering};
diff --git a/src/tools/miri/tests/fail/data_race/enable_after_join_to_main.rs b/src/tools/miri/tests/fail/data_race/enable_after_join_to_main.rs
index f2da45d7275b0..53050608d2715 100644
--- a/src/tools/miri/tests/fail/data_race/enable_after_join_to_main.rs
+++ b/src/tools/miri/tests/fail/data_race/enable_after_join_to_main.rs
@@ -1,5 +1,7 @@
 // We want to control preemption here. Stacked borrows interferes by having its own accesses.
 //@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
+// Avoid accidental synchronization via address reuse inside `thread::spawn`.
+//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
 
 use std::thread::spawn;
 
diff --git a/src/tools/miri/tests/fail/data_race/fence_after_load.rs b/src/tools/miri/tests/fail/data_race/fence_after_load.rs
index 683e3b9c7ac66..92cb4ccccf530 100644
--- a/src/tools/miri/tests/fail/data_race/fence_after_load.rs
+++ b/src/tools/miri/tests/fail/data_race/fence_after_load.rs
@@ -1,5 +1,8 @@
 // We want to control preemption here. Stacked borrows interferes by having its own accesses.
 //@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
+// Avoid accidental synchronization via address reuse inside `thread::spawn`.
+//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
+
 use std::sync::atomic::{fence, AtomicUsize, Ordering};
 use std::sync::Arc;
 use std::thread;
diff --git a/src/tools/miri/tests/fail/data_race/mixed_size_read.rs b/src/tools/miri/tests/fail/data_race/mixed_size_read.rs
index 091a47070bafb..61af972b3dca8 100644
--- a/src/tools/miri/tests/fail/data_race/mixed_size_read.rs
+++ b/src/tools/miri/tests/fail/data_race/mixed_size_read.rs
@@ -1,4 +1,7 @@
 //@compile-flags: -Zmiri-preemption-rate=0.0 -Zmiri-disable-weak-memory-emulation
+// Avoid accidental synchronization via address reuse inside `thread::spawn`.
+//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
+
 use std::sync::atomic::{AtomicU16, AtomicU8, Ordering};
 use std::thread;
 
diff --git a/src/tools/miri/tests/fail/data_race/mixed_size_write.rs b/src/tools/miri/tests/fail/data_race/mixed_size_write.rs
index 49fb6c1d5c3e3..12e51bb94296a 100644
--- a/src/tools/miri/tests/fail/data_race/mixed_size_write.rs
+++ b/src/tools/miri/tests/fail/data_race/mixed_size_write.rs
@@ -1,4 +1,7 @@
 //@compile-flags: -Zmiri-preemption-rate=0.0 -Zmiri-disable-weak-memory-emulation
+// Avoid accidental synchronization via address reuse inside `thread::spawn`.
+//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
+
 use std::sync::atomic::{AtomicU16, AtomicU8, Ordering};
 use std::thread;
 
diff --git a/src/tools/miri/tests/fail/data_race/read_read_race1.rs b/src/tools/miri/tests/fail/data_race/read_read_race1.rs
index f66b5ca3d53af..02aa4e4b716e3 100644
--- a/src/tools/miri/tests/fail/data_race/read_read_race1.rs
+++ b/src/tools/miri/tests/fail/data_race/read_read_race1.rs
@@ -1,4 +1,7 @@
 //@compile-flags: -Zmiri-preemption-rate=0.0
+// Avoid accidental synchronization via address reuse inside `thread::spawn`.
+//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
+
 use std::sync::atomic::{AtomicU16, Ordering};
 use std::thread;
 
diff --git a/src/tools/miri/tests/fail/data_race/read_read_race2.rs b/src/tools/miri/tests/fail/data_race/read_read_race2.rs
index d87b667d91287..3b94c9143f3b2 100644
--- a/src/tools/miri/tests/fail/data_race/read_read_race2.rs
+++ b/src/tools/miri/tests/fail/data_race/read_read_race2.rs
@@ -1,4 +1,7 @@
 //@compile-flags: -Zmiri-preemption-rate=0.0
+// Avoid accidental synchronization via address reuse inside `thread::spawn`.
+//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
+
 use std::sync::atomic::{AtomicU16, Ordering};
 use std::thread;
 
diff --git a/src/tools/miri/tests/fail/data_race/read_write_race.rs b/src/tools/miri/tests/fail/data_race/read_write_race.rs
index 70971b59ffe89..adf19dda9d3db 100644
--- a/src/tools/miri/tests/fail/data_race/read_write_race.rs
+++ b/src/tools/miri/tests/fail/data_race/read_write_race.rs
@@ -1,5 +1,7 @@
 // We want to control preemption here. Stacked borrows interferes by having its own accesses.
 //@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
+// Avoid accidental synchronization via address reuse inside `thread::spawn`.
+//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
 
 use std::thread::spawn;
 
diff --git a/src/tools/miri/tests/fail/data_race/read_write_race_stack.rs b/src/tools/miri/tests/fail/data_race/read_write_race_stack.rs
index 9fec3ceee07fb..f411767f7b57e 100644
--- a/src/tools/miri/tests/fail/data_race/read_write_race_stack.rs
+++ b/src/tools/miri/tests/fail/data_race/read_write_race_stack.rs
@@ -1,4 +1,6 @@
 //@compile-flags: -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
+// Avoid accidental synchronization via address reuse inside `thread::spawn`.
+//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
 
 use std::ptr::null_mut;
 use std::sync::atomic::{AtomicPtr, Ordering};
diff --git a/src/tools/miri/tests/fail/data_race/relax_acquire_race.rs b/src/tools/miri/tests/fail/data_race/relax_acquire_race.rs
index be4450794ca6c..c4f943808229c 100644
--- a/src/tools/miri/tests/fail/data_race/relax_acquire_race.rs
+++ b/src/tools/miri/tests/fail/data_race/relax_acquire_race.rs
@@ -1,4 +1,6 @@
 //@compile-flags: -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
+// Avoid accidental synchronization via address reuse inside `thread::spawn`.
+//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
 
 use std::sync::atomic::{AtomicUsize, Ordering};
 use std::thread::spawn;
diff --git a/src/tools/miri/tests/fail/data_race/release_seq_race.rs b/src/tools/miri/tests/fail/data_race/release_seq_race.rs
index 9810832413ec5..f03ab3efa0624 100644
--- a/src/tools/miri/tests/fail/data_race/release_seq_race.rs
+++ b/src/tools/miri/tests/fail/data_race/release_seq_race.rs
@@ -1,4 +1,6 @@
 //@compile-flags: -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
+// Avoid accidental synchronization via address reuse inside `thread::spawn`.
+//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
 
 use std::sync::atomic::{AtomicUsize, Ordering};
 use std::thread::{sleep, spawn};
diff --git a/src/tools/miri/tests/fail/data_race/release_seq_race_same_thread.rs b/src/tools/miri/tests/fail/data_race/release_seq_race_same_thread.rs
index 93cbc2a57d6be..88ae01b3ca1c7 100644
--- a/src/tools/miri/tests/fail/data_race/release_seq_race_same_thread.rs
+++ b/src/tools/miri/tests/fail/data_race/release_seq_race_same_thread.rs
@@ -1,4 +1,6 @@
 //@compile-flags: -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
+// Avoid accidental synchronization via address reuse inside `thread::spawn`.
+//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
 
 use std::sync::atomic::{AtomicUsize, Ordering};
 use std::thread::spawn;
diff --git a/src/tools/miri/tests/fail/data_race/rmw_race.rs b/src/tools/miri/tests/fail/data_race/rmw_race.rs
index 982e9c1c4109d..d738caa105879 100644
--- a/src/tools/miri/tests/fail/data_race/rmw_race.rs
+++ b/src/tools/miri/tests/fail/data_race/rmw_race.rs
@@ -1,4 +1,6 @@
 //@compile-flags: -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
+// Avoid accidental synchronization via address reuse inside `thread::spawn`.
+//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
 
 use std::sync::atomic::{AtomicUsize, Ordering};
 use std::thread::spawn;
diff --git a/src/tools/miri/tests/fail/data_race/stack_pop_race.rs b/src/tools/miri/tests/fail/data_race/stack_pop_race.rs
index 68d82bc30a58d..762a8e51f692a 100644
--- a/src/tools/miri/tests/fail/data_race/stack_pop_race.rs
+++ b/src/tools/miri/tests/fail/data_race/stack_pop_race.rs
@@ -1,4 +1,7 @@
 //@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
+// Avoid accidental synchronization via address reuse inside `thread::spawn`.
+//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
+
 use std::thread;
 
 #[derive(Copy, Clone)]
diff --git a/src/tools/miri/tests/fail/data_race/write_write_race.rs b/src/tools/miri/tests/fail/data_race/write_write_race.rs
index e8924702af818..993d8d25b4c17 100644
--- a/src/tools/miri/tests/fail/data_race/write_write_race.rs
+++ b/src/tools/miri/tests/fail/data_race/write_write_race.rs
@@ -1,5 +1,7 @@
 // We want to control preemption here.
 //@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
+// Avoid accidental synchronization via address reuse inside `thread::spawn`.
+//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
 
 use std::thread::spawn;
 
diff --git a/src/tools/miri/tests/fail/data_race/write_write_race_stack.rs b/src/tools/miri/tests/fail/data_race/write_write_race_stack.rs
index 984ae2ee83dce..8070a7f4fc2a0 100644
--- a/src/tools/miri/tests/fail/data_race/write_write_race_stack.rs
+++ b/src/tools/miri/tests/fail/data_race/write_write_race_stack.rs
@@ -1,4 +1,6 @@
 //@compile-flags: -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
+// Avoid accidental synchronization via address reuse inside `thread::spawn`.
+//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0
 
 use std::ptr::null_mut;
 use std::sync::atomic::{AtomicPtr, Ordering};
diff --git a/src/tools/miri/tests/fail/panic/no_std.rs b/src/tools/miri/tests/fail/panic/no_std.rs
index bad425804dc0a..4d32b6d746190 100644
--- a/src/tools/miri/tests/fail/panic/no_std.rs
+++ b/src/tools/miri/tests/fail/panic/no_std.rs
@@ -1,31 +1,11 @@
+//@compile-flags: -Cpanic=abort
 #![feature(start, core_intrinsics)]
 #![no_std]
-//@compile-flags: -Cpanic=abort
-// windows tls dtors go through libstd right now, thus this test
-// cannot pass. When windows tls dtors go through the special magic
-// windows linker section, we can run this test on windows again.
-//@ignore-target-windows: no-std not supported on Windows
-
-// Plumbing to let us use `writeln!` to host stderr:
-
-extern "Rust" {
-    fn miri_write_to_stderr(bytes: &[u8]);
-}
-
-struct HostErr;
 
 use core::fmt::Write;
 
-impl Write for HostErr {
-    fn write_str(&mut self, s: &str) -> core::fmt::Result {
-        unsafe {
-            miri_write_to_stderr(s.as_bytes());
-        }
-        Ok(())
-    }
-}
-
-// Aaaand the test:
+#[path = "../../utils/mod.no_std.rs"]
+mod utils;
 
 #[start]
 fn start(_: isize, _: *const *const u8) -> isize {
@@ -34,6 +14,6 @@ fn start(_: isize, _: *const *const u8) -> isize {
 
 #[panic_handler]
 fn panic_handler(panic_info: &core::panic::PanicInfo) -> ! {
-    writeln!(HostErr, "{panic_info}").ok();
+    writeln!(utils::MiriStderr, "{panic_info}").ok();
     core::intrinsics::abort(); //~ ERROR: the program aborted execution
 }
diff --git a/src/tools/miri/tests/fail/stacked_borrows/retag_data_race_protected_read.rs b/src/tools/miri/tests/fail/stacked_borrows/retag_data_race_protected_read.rs
index 3de517055ec66..a6ee7b40c340b 100644
--- a/src/tools/miri/tests/fail/stacked_borrows/retag_data_race_protected_read.rs
+++ b/src/tools/miri/tests/fail/stacked_borrows/retag_data_race_protected_read.rs
@@ -1,4 +1,5 @@
-//@compile-flags: -Zmiri-preemption-rate=0
+// Avoid accidental synchronization via address reuse.
+//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-address-reuse-cross-thread-rate=0
 use std::thread;
 
 #[derive(Copy, Clone)]
diff --git a/src/tools/miri/tests/fail/stacked_borrows/retag_data_race_read.rs b/src/tools/miri/tests/fail/stacked_borrows/retag_data_race_read.rs
index 25c92ddf6ca08..949f659e7e8e5 100644
--- a/src/tools/miri/tests/fail/stacked_borrows/retag_data_race_read.rs
+++ b/src/tools/miri/tests/fail/stacked_borrows/retag_data_race_read.rs
@@ -1,5 +1,6 @@
 //! Make sure that a retag acts like a read for the data race model.
-//@compile-flags: -Zmiri-preemption-rate=0
+// Avoid accidental synchronization via address reuse.
+//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-address-reuse-cross-thread-rate=0
 #[derive(Copy, Clone)]
 struct SendPtr(*mut u8);
 
diff --git a/src/tools/miri/tests/fail/uninit/uninit_alloc_diagnostic.stderr b/src/tools/miri/tests/fail/uninit/uninit_alloc_diagnostic.stderr
index cca17a07ec208..960cae9012484 100644
--- a/src/tools/miri/tests/fail/uninit/uninit_alloc_diagnostic.stderr
+++ b/src/tools/miri/tests/fail/uninit/uninit_alloc_diagnostic.stderr
@@ -15,13 +15,13 @@ note: inside `main`
 LL |         drop(slice1.cmp(slice2));
    |              ^^^^^^^^^^^^^^^^^^
 
-note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
-
 Uninitialized memory occurred at ALLOC[0x4..0x10], in this allocation:
 ALLOC (Rust heap, size: 32, align: 8) {
     0x00 │ 41 42 43 44 __ __ __ __ __ __ __ __ __ __ __ __ │ ABCD░░░░░░░░░░░░
     0x10 │ 00 __ __ __ __ __ __ __ __ __ __ __ __ __ __ __ │ .░░░░░░░░░░░░░░░
 }
 
+note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
+
 error: aborting due to 1 previous error
 
diff --git a/src/tools/miri/tests/fail/uninit/uninit_alloc_diagnostic_with_provenance.stderr b/src/tools/miri/tests/fail/uninit/uninit_alloc_diagnostic_with_provenance.stderr
index 4dc2d27ead433..5439418f26771 100644
--- a/src/tools/miri/tests/fail/uninit/uninit_alloc_diagnostic_with_provenance.stderr
+++ b/src/tools/miri/tests/fail/uninit/uninit_alloc_diagnostic_with_provenance.stderr
@@ -15,8 +15,6 @@ note: inside `main`
 LL |         drop(slice1.cmp(slice2));
    |              ^^^^^^^^^^^^^^^^^^
 
-note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
-
 Uninitialized memory occurred at ALLOC[0x4..0x8], in this allocation:
 ALLOC (Rust heap, size: 16, align: 8) {
     ╾42[ALLOC]<TAG> (1 ptr byte)╼ 12 13 ╾43[ALLOC]<TAG> (1 ptr byte)╼ __ __ __ __ __ __ __ __ __ __ __ __ │ ━..━░░░░░░░░░░░░
@@ -28,5 +26,7 @@ ALLOC (global (static or const), size: 1, align: 1) {
     00                                              │ .
 }
 
+note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
+
 error: aborting due to 1 previous error
 
diff --git a/src/tools/miri/tests/fail/weak_memory/racing_mixed_size.rs b/src/tools/miri/tests/fail/weak_memory/racing_mixed_size.rs
index dfe9397a4c46a..1193dddc57784 100644
--- a/src/tools/miri/tests/fail/weak_memory/racing_mixed_size.rs
+++ b/src/tools/miri/tests/fail/weak_memory/racing_mixed_size.rs
@@ -1,5 +1,6 @@
 // We want to control preemption here.
-//@compile-flags: -Zmiri-preemption-rate=0
+// Avoid accidental synchronization via address reuse.
+//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-address-reuse-cross-thread-rate=0
 
 #![feature(core_intrinsics)]
 
diff --git a/src/tools/miri/tests/fail/weak_memory/racing_mixed_size_read.rs b/src/tools/miri/tests/fail/weak_memory/racing_mixed_size_read.rs
index b946a75c3abb3..0a0e372f1f3d2 100644
--- a/src/tools/miri/tests/fail/weak_memory/racing_mixed_size_read.rs
+++ b/src/tools/miri/tests/fail/weak_memory/racing_mixed_size_read.rs
@@ -1,5 +1,6 @@
 // We want to control preemption here.
-//@compile-flags: -Zmiri-preemption-rate=0
+// Avoid accidental synchronization via address reuse.
+//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-address-reuse-cross-thread-rate=0
 
 use std::sync::atomic::Ordering::*;
 use std::sync::atomic::{AtomicU16, AtomicU32};
diff --git a/src/tools/miri/tests/panic/alloc_error_handler_hook.rs b/src/tools/miri/tests/panic/alloc_error_handler_hook.rs
new file mode 100644
index 0000000000000..a1eadb45fd13b
--- /dev/null
+++ b/src/tools/miri/tests/panic/alloc_error_handler_hook.rs
@@ -0,0 +1,20 @@
+#![feature(allocator_api, alloc_error_hook)]
+
+use std::alloc::*;
+
+struct Bomb;
+impl Drop for Bomb {
+    fn drop(&mut self) {
+        eprintln!("yes we are unwinding!");
+    }
+}
+
+#[allow(unreachable_code, unused_variables)]
+fn main() {
+    // This is a particularly tricky hook, since it unwinds, which the default one does not.
+    set_alloc_error_hook(|_layout| panic!("alloc error hook called"));
+
+    let bomb = Bomb;
+    handle_alloc_error(Layout::for_value(&0));
+    std::mem::forget(bomb); // defuse unwinding bomb
+}
diff --git a/src/tools/miri/tests/panic/alloc_error_handler_hook.stderr b/src/tools/miri/tests/panic/alloc_error_handler_hook.stderr
new file mode 100644
index 0000000000000..5b309ed09bb42
--- /dev/null
+++ b/src/tools/miri/tests/panic/alloc_error_handler_hook.stderr
@@ -0,0 +1,5 @@
+thread 'main' panicked at $DIR/alloc_error_handler_hook.rs:LL:CC:
+alloc error hook called
+note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace
+note: in Miri, you may have to set `-Zmiri-env-forward=RUST_BACKTRACE` for the environment variable to have an effect
+yes we are unwinding!
diff --git a/src/tools/miri/tests/panic/alloc_error_handler_panic.rs b/src/tools/miri/tests/panic/alloc_error_handler_panic.rs
new file mode 100644
index 0000000000000..c434e8d3227a2
--- /dev/null
+++ b/src/tools/miri/tests/panic/alloc_error_handler_panic.rs
@@ -0,0 +1,18 @@
+//@compile-flags: -Zoom=panic
+#![feature(allocator_api)]
+
+use std::alloc::*;
+
+struct Bomb;
+impl Drop for Bomb {
+    fn drop(&mut self) {
+        eprintln!("yes we are unwinding!");
+    }
+}
+
+#[allow(unreachable_code, unused_variables)]
+fn main() {
+    let bomb = Bomb;
+    handle_alloc_error(Layout::for_value(&0));
+    std::mem::forget(bomb); // defuse unwinding bomb
+}
diff --git a/src/tools/miri/tests/panic/alloc_error_handler_panic.stderr b/src/tools/miri/tests/panic/alloc_error_handler_panic.stderr
new file mode 100644
index 0000000000000..3d5457799f60d
--- /dev/null
+++ b/src/tools/miri/tests/panic/alloc_error_handler_panic.stderr
@@ -0,0 +1,5 @@
+thread 'main' panicked at RUSTLIB/std/src/alloc.rs:LL:CC:
+memory allocation of 4 bytes failed
+note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace
+note: in Miri, you may have to set `-Zmiri-env-forward=RUST_BACKTRACE` for the environment variable to have an effect
+yes we are unwinding!
diff --git a/src/tools/miri/tests/panic/mir-validation.rs b/src/tools/miri/tests/panic/mir-validation.rs
new file mode 100644
index 0000000000000..f1d0ccc7d0376
--- /dev/null
+++ b/src/tools/miri/tests/panic/mir-validation.rs
@@ -0,0 +1,26 @@
+//! Ensure that the MIR validator runs on Miri's input.
+//@rustc-env:RUSTC_ICE=0
+//@normalize-stderr-test: "\n +[0-9]+:.+" -> ""
+//@normalize-stderr-test: "\n +at .+" -> ""
+//@normalize-stderr-test: "\n +\[\.\.\. omitted [0-9]+ frames? \.\.\.\].*" -> ""
+//@normalize-stderr-test: "\n[ =]*note:.*" -> ""
+//@normalize-stderr-test: "DefId\([^()]*\)" -> "DefId"
+// Somehow on rustc Windows CI, the "Miri caused an ICE" message is not shown
+// and we don't even get a regular panic; rustc aborts with a different exit code instead.
+//@ignore-host-windows
+#![feature(custom_mir, core_intrinsics)]
+use core::intrinsics::mir::*;
+
+#[custom_mir(dialect = "runtime", phase = "optimized")]
+pub fn main() {
+    mir! {
+        let x: i32;
+        let tuple: (*mut i32,);
+        {
+            tuple.0 = core::ptr::addr_of_mut!(x);
+            // Deref at the wrong place!
+            *(tuple.0) = 1;
+            Return()
+        }
+    }
+}
diff --git a/src/tools/miri/tests/panic/mir-validation.stderr b/src/tools/miri/tests/panic/mir-validation.stderr
new file mode 100644
index 0000000000000..d158c996dc3de
--- /dev/null
+++ b/src/tools/miri/tests/panic/mir-validation.stderr
@@ -0,0 +1,21 @@
+thread 'rustc' panicked at compiler/rustc_const_eval/src/transform/validate.rs:LL:CC:
+broken MIR in Item(DefId) (after phase change to runtime-optimized) at bb0[1]:
+(*(_2.0: *mut i32)), has deref at the wrong place
+stack backtrace:
+
+error: the compiler unexpectedly panicked. this is a bug.
+
+
+
+
+query stack during panic:
+#0 [optimized_mir] optimizing MIR for `main`
+end of query stack
+
+Miri caused an ICE during evaluation. Here's the interpreter backtrace at the time of the panic:
+  --> RUSTLIB/core/src/ops/function.rs:LL:CC
+   |
+LL |     extern "rust-call" fn call_once(self, args: Args) -> Self::Output;
+   |     ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+   |
+
diff --git a/src/tools/miri/tests/pass-dep/malloc.rs b/src/tools/miri/tests/pass-dep/malloc.rs
index f5e014c000d15..35cd137931fd6 100644
--- a/src/tools/miri/tests/pass-dep/malloc.rs
+++ b/src/tools/miri/tests/pass-dep/malloc.rs
@@ -34,9 +34,8 @@ fn main() {
     }
 
     unsafe {
-        let p1 = libc::malloc(20);
-
-        let p2 = libc::realloc(p1, 0);
+        // Realloc with size 0 is okay for the null pointer
+        let p2 = libc::realloc(ptr::null_mut(), 0);
         assert!(p2.is_null());
     }
 
diff --git a/src/tools/miri/tests/pass/alloc-access-tracking.rs b/src/tools/miri/tests/pass/alloc-access-tracking.rs
index 5c782fca2df47..29c1ee2f7b765 100644
--- a/src/tools/miri/tests/pass/alloc-access-tracking.rs
+++ b/src/tools/miri/tests/pass/alloc-access-tracking.rs
@@ -1,6 +1,6 @@
 #![feature(start)]
 #![no_std]
-//@compile-flags: -Zmiri-track-alloc-id=17 -Zmiri-track-alloc-accesses -Cpanic=abort
+//@compile-flags: -Zmiri-track-alloc-id=18 -Zmiri-track-alloc-accesses -Cpanic=abort
 //@only-target-linux: alloc IDs differ between OSes for some reason
 
 extern "Rust" {
diff --git a/src/tools/miri/tests/pass/alloc-access-tracking.stderr b/src/tools/miri/tests/pass/alloc-access-tracking.stderr
index 5e219fa1bedd0..bef13701ea2c4 100644
--- a/src/tools/miri/tests/pass/alloc-access-tracking.stderr
+++ b/src/tools/miri/tests/pass/alloc-access-tracking.stderr
@@ -2,7 +2,7 @@ note: tracking was triggered
   --> $DIR/alloc-access-tracking.rs:LL:CC
    |
 LL |         let ptr = miri_alloc(123, 1);
-   |                   ^^^^^^^^^^^^^^^^^^ created Miri bare-metal heap allocation of 123 bytes (alignment ALIGN bytes) with id 17
+   |                   ^^^^^^^^^^^^^^^^^^ created Miri bare-metal heap allocation of 123 bytes (alignment ALIGN bytes) with id 18
    |
    = note: BACKTRACE:
    = note: inside `start` at $DIR/alloc-access-tracking.rs:LL:CC
@@ -11,7 +11,7 @@ note: tracking was triggered
   --> $DIR/alloc-access-tracking.rs:LL:CC
    |
 LL |         *ptr = 42; // Crucially, only a write is printed here, no read!
-   |         ^^^^^^^^^ write access to allocation with id 17
+   |         ^^^^^^^^^ write access to allocation with id 18
    |
    = note: BACKTRACE:
    = note: inside `start` at $DIR/alloc-access-tracking.rs:LL:CC
@@ -20,7 +20,7 @@ note: tracking was triggered
   --> $DIR/alloc-access-tracking.rs:LL:CC
    |
 LL |         assert_eq!(*ptr, 42);
-   |         ^^^^^^^^^^^^^^^^^^^^ read access to allocation with id 17
+   |         ^^^^^^^^^^^^^^^^^^^^ read access to allocation with id 18
    |
    = note: BACKTRACE:
    = note: inside `start` at RUSTLIB/core/src/macros/mod.rs:LL:CC
@@ -30,7 +30,7 @@ note: tracking was triggered
   --> $DIR/alloc-access-tracking.rs:LL:CC
    |
 LL |         miri_dealloc(ptr, 123, 1);
-   |         ^^^^^^^^^^^^^^^^^^^^^^^^^ freed allocation with id 17
+   |         ^^^^^^^^^^^^^^^^^^^^^^^^^ freed allocation with id 18
    |
    = note: BACKTRACE:
    = note: inside `start` at $DIR/alloc-access-tracking.rs:LL:CC
diff --git a/src/tools/miri/tests/pass/concurrency/address_reuse_happens_before.rs b/src/tools/miri/tests/pass/concurrency/address_reuse_happens_before.rs
new file mode 100644
index 0000000000000..cfc1ef7ae455a
--- /dev/null
+++ b/src/tools/miri/tests/pass/concurrency/address_reuse_happens_before.rs
@@ -0,0 +1,61 @@
+//! Regression test for <https://github.com/rust-lang/miri/issues/3450>:
+//! When the address gets reused, there should be a happens-before relation.
+//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=1.0
+#![feature(strict_provenance)]
+#![feature(sync_unsafe_cell)]
+
+use std::cell::SyncUnsafeCell;
+use std::sync::atomic::{AtomicUsize, Ordering::Relaxed};
+use std::thread;
+
+static ADDR: AtomicUsize = AtomicUsize::new(0);
+static VAL: SyncUnsafeCell<i32> = SyncUnsafeCell::new(0);
+
+fn addr() -> usize {
+    let alloc = Box::new(42);
+    <*const i32>::addr(&*alloc)
+}
+
+fn thread1() {
+    unsafe {
+        VAL.get().write(42);
+    }
+    let alloc = addr();
+    ADDR.store(alloc, Relaxed);
+}
+
+fn thread2() -> bool {
+    // We try to get an allocation at the same address as the global `ADDR`. If we fail too often,
+    // just bail. `main` will try again with a different allocation.
+    for _ in 0..16 {
+        let alloc = addr();
+        let addr = ADDR.load(Relaxed);
+        if alloc == addr {
+            // We got a reuse!
+            // If the new allocation is at the same address as the old one, there must be a
+            // happens-before relationship between them. Therefore, we can read VAL without racing
+            // and must observe the write above.
+            let val = unsafe { VAL.get().read() };
+            assert_eq!(val, 42);
+            return true;
+        }
+    }
+
+    false
+}
+
+fn main() {
+    let mut success = false;
+    while !success {
+        let t1 = thread::spawn(thread1);
+        let t2 = thread::spawn(thread2);
+        t1.join().unwrap();
+        success = t2.join().unwrap();
+
+        // Reset everything.
+        ADDR.store(0, Relaxed);
+        unsafe {
+            VAL.get().write(0);
+        }
+    }
+}
diff --git a/src/tools/miri/tests/pass/concurrency/disable_data_race_detector.rs b/src/tools/miri/tests/pass/concurrency/disable_data_race_detector.rs
index 049b5e7f49871..354a4bef932e9 100644
--- a/src/tools/miri/tests/pass/concurrency/disable_data_race_detector.rs
+++ b/src/tools/miri/tests/pass/concurrency/disable_data_race_detector.rs
@@ -1,4 +1,6 @@
 //@compile-flags: -Zmiri-disable-data-race-detector
+// Avoid non-determinism
+//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-address-reuse-cross-thread-rate=0
 
 use std::thread::spawn;
 
diff --git a/src/tools/miri/tests/pass/drop_type_without_drop_glue.rs b/src/tools/miri/tests/pass/drop_type_without_drop_glue.rs
new file mode 100644
index 0000000000000..43ddc8a4d8b55
--- /dev/null
+++ b/src/tools/miri/tests/pass/drop_type_without_drop_glue.rs
@@ -0,0 +1,21 @@
+#![feature(custom_mir, core_intrinsics, strict_provenance)]
+use std::intrinsics::mir::*;
+
+// The `Drop` terminator on a type with no drop glue should be a NOP.
+
+#[custom_mir(dialect = "runtime", phase = "optimized")]
+fn drop_in_place_with_terminator(ptr: *mut i32) {
+    mir! {
+        {
+            Drop(*ptr, ReturnTo(after_call), UnwindContinue())
+        }
+        after_call = {
+            Return()
+        }
+    }
+}
+
+pub fn main() {
+    drop_in_place_with_terminator(std::ptr::without_provenance_mut(0));
+    drop_in_place_with_terminator(std::ptr::without_provenance_mut(1));
+}
diff --git a/src/tools/miri/tests/pass/no_std.rs b/src/tools/miri/tests/pass/no_std.rs
index 3c98ee50aa9c0..fc1c16f5fb956 100644
--- a/src/tools/miri/tests/pass/no_std.rs
+++ b/src/tools/miri/tests/pass/no_std.rs
@@ -2,30 +2,14 @@
 #![feature(start)]
 #![no_std]
 
-// Plumbing to let us use `writeln!` to host stdout:
-
-extern "Rust" {
-    fn miri_write_to_stdout(bytes: &[u8]);
-}
-
-struct Host;
-
 use core::fmt::Write;
 
-impl Write for Host {
-    fn write_str(&mut self, s: &str) -> core::fmt::Result {
-        unsafe {
-            miri_write_to_stdout(s.as_bytes());
-        }
-        Ok(())
-    }
-}
-
-// Aaaand the test:
+#[path = "../utils/mod.no_std.rs"]
+mod utils;
 
 #[start]
 fn start(_: isize, _: *const *const u8) -> isize {
-    writeln!(Host, "hello, world!").unwrap();
+    writeln!(utils::MiriStdout, "hello, world!").unwrap();
     0
 }
 
diff --git a/src/tools/miri/tests/pass/stacked-borrows/stacked-borrows.rs b/src/tools/miri/tests/pass/stacked-borrows/stacked-borrows.rs
index 43ba490d5bb42..c75824d7f9be0 100644
--- a/src/tools/miri/tests/pass/stacked-borrows/stacked-borrows.rs
+++ b/src/tools/miri/tests/pass/stacked-borrows/stacked-borrows.rs
@@ -265,13 +265,15 @@ fn write_does_not_invalidate_all_aliases() {
     assert_eq!(*x, 1337); // oops, the value changed! I guess not all pointers were invalidated
 }
 
-fn box_into_raw_allows_interior_mutable_alias() { unsafe {
-    let b = Box::new(std::cell::Cell::new(42));
-    let raw = Box::into_raw(b);
-    let c = &*raw;
-    let d = raw.cast::<i32>(); // bypassing `Cell` -- only okay in Miri tests
-    // `c` and `d` should permit arbitrary aliasing with each other now.
-    *d = 1;
-    c.set(2);
-    drop(Box::from_raw(raw));
-} }
+fn box_into_raw_allows_interior_mutable_alias() {
+    unsafe {
+        let b = Box::new(std::cell::Cell::new(42));
+        let raw = Box::into_raw(b);
+        let c = &*raw;
+        let d = raw.cast::<i32>(); // bypassing `Cell` -- only okay in Miri tests
+        // `c` and `d` should permit arbitrary aliasing with each other now.
+        *d = 1;
+        c.set(2);
+        drop(Box::from_raw(raw));
+    }
+}
diff --git a/src/tools/miri/tests/pass/tree_borrows/reserved.rs b/src/tools/miri/tests/pass/tree_borrows/reserved.rs
index 87ce91a809f22..f93cac8361e31 100644
--- a/src/tools/miri/tests/pass/tree_borrows/reserved.rs
+++ b/src/tools/miri/tests/pass/tree_borrows/reserved.rs
@@ -27,9 +27,8 @@ fn main() {
     }
 }
 
-unsafe fn print(msg: &str) {
-    utils::miri_write_to_stderr(msg.as_bytes());
-    utils::miri_write_to_stderr("\n".as_bytes());
+fn print(msg: &str) {
+    eprintln!("{msg}");
 }
 
 unsafe fn read_second<T>(x: &mut T, y: *mut u8) {
diff --git a/src/tools/miri/tests/pass/weak_memory/weak.rs b/src/tools/miri/tests/pass/weak_memory/weak.rs
index e10ccc277f6f1..dac63eeeb0b24 100644
--- a/src/tools/miri/tests/pass/weak_memory/weak.rs
+++ b/src/tools/miri/tests/pass/weak_memory/weak.rs
@@ -37,6 +37,8 @@ fn relaxed() -> bool {
     let x = static_atomic(0);
     let j1 = spawn(move || {
         x.store(1, Relaxed);
+        // Preemption is disabled, so the store above will never be the
+        // latest store visible to another thread.
         x.store(2, Relaxed);
     });
 
@@ -138,6 +140,7 @@ fn faa_replaced_by_load() -> bool {
 }
 
 /// Asserts that the function returns true at least once in 100 runs
+#[track_caller]
 fn assert_once(f: fn() -> bool) {
     assert!(std::iter::repeat_with(|| f()).take(100).any(|x| x));
 }
diff --git a/src/tools/miri/tests/utils/io.rs b/src/tools/miri/tests/utils/io.rs
new file mode 100644
index 0000000000000..e3eaa6c468a1a
--- /dev/null
+++ b/src/tools/miri/tests/utils/io.rs
@@ -0,0 +1,25 @@
+use core::fmt::{self, Write};
+
+use super::miri_extern;
+
+pub struct MiriStderr;
+
+impl Write for MiriStderr {
+    fn write_str(&mut self, s: &str) -> fmt::Result {
+        unsafe {
+            miri_extern::miri_write_to_stderr(s.as_bytes());
+        }
+        Ok(())
+    }
+}
+
+pub struct MiriStdout;
+
+impl Write for MiriStdout {
+    fn write_str(&mut self, s: &str) -> fmt::Result {
+        unsafe {
+            miri_extern::miri_write_to_stdout(s.as_bytes());
+        }
+        Ok(())
+    }
+}
diff --git a/src/tools/miri/tests/utils/miri_extern.rs b/src/tools/miri/tests/utils/miri_extern.rs
index e2983f6c71a7e..d6c43b1882195 100644
--- a/src/tools/miri/tests/utils/miri_extern.rs
+++ b/src/tools/miri/tests/utils/miri_extern.rs
@@ -133,8 +133,8 @@ extern "Rust" {
     /// with a null terminator.
     /// Returns 0 if the `out` buffer was large enough, and the required size otherwise.
     pub fn miri_host_to_target_path(
-        path: *const std::ffi::c_char,
-        out: *mut std::ffi::c_char,
+        path: *const core::ffi::c_char,
+        out: *mut core::ffi::c_char,
         out_size: usize,
     ) -> usize;
 
diff --git a/src/tools/miri/tests/utils/mod.no_std.rs b/src/tools/miri/tests/utils/mod.no_std.rs
new file mode 100644
index 0000000000000..aaf2bf50c4e67
--- /dev/null
+++ b/src/tools/miri/tests/utils/mod.no_std.rs
@@ -0,0 +1,11 @@
+#![allow(dead_code)]
+#![allow(unused_imports)]
+
+#[macro_use]
+mod macros;
+
+mod io;
+mod miri_extern;
+
+pub use self::io::*;
+pub use self::miri_extern::*;
diff --git a/src/tools/miri/tests/utils/mod.rs b/src/tools/miri/tests/utils/mod.rs
index cb9380f5753ce..138ada4e20d7a 100644
--- a/src/tools/miri/tests/utils/mod.rs
+++ b/src/tools/miri/tests/utils/mod.rs
@@ -5,9 +5,11 @@
 mod macros;
 
 mod fs;
+mod io;
 mod miri_extern;
 
 pub use self::fs::*;
+pub use self::io::*;
 pub use self::miri_extern::*;
 
 pub fn run_provenance_gc() {