diff --git a/compiler/rustc_serialize/src/serialize.rs b/compiler/rustc_serialize/src/serialize.rs index 06166cabc1872..7c50d9391431c 100644 --- a/compiler/rustc_serialize/src/serialize.rs +++ b/compiler/rustc_serialize/src/serialize.rs @@ -1,6 +1,6 @@ //! Support code for encoding and decoding types. -use std::alloc::Allocator; +use std::alloc::{Allocator, Fatal}; use std::borrow::Cow; use std::cell::{Cell, RefCell}; use std::marker::PhantomData; @@ -273,7 +273,10 @@ impl Decodable for PhantomData { } } -impl> Decodable for Box<[T], A> { +impl> Decodable for Box<[T], A> +where + A: Allocator, +{ fn decode(d: &mut D) -> Box<[T], A> { let v: Vec = Decodable::decode(d); v.into_boxed_slice() @@ -308,7 +311,10 @@ impl> Encodable for Vec { } } -impl, A: Allocator + Default> Decodable for Vec { +impl, A: Default> Decodable for Vec +where + A: Allocator, +{ default fn decode(d: &mut D) -> Vec { let len = d.read_usize(); let allocator = A::default(); diff --git a/library/alloc/src/alloc.rs b/library/alloc/src/alloc.rs index 8c4f6a73d7fea..ca988d8772c35 100644 --- a/library/alloc/src/alloc.rs +++ b/library/alloc/src/alloc.rs @@ -321,6 +321,11 @@ unsafe impl Allocator for Global { }, } } + + #[cfg(not(no_global_oom_handling))] + type ErrorHandling = Fatal; + #[cfg(no_global_oom_handling)] + type ErrorHandling = Fallible; } /// The allocator for unique pointers. @@ -443,3 +448,218 @@ impl WriteCloneIntoRaw for T { unsafe { target.copy_from_nonoverlapping(self, 1) }; } } + +#[cfg(all(not(no_global_oom_handling), not(test)))] +use core::error::Error; + +/// Trait for handling alloc errors for allocators which +/// panic or abort instead of returning errors. +#[unstable(feature = "allocator_api", issue = "32838")] +#[cfg(all(not(no_global_oom_handling), not(test)))] +#[rustc_specialization_trait] +pub trait HandleAllocError: Error { + /// Globally handle this allocation error + fn handle_alloc_error(self) -> !; +} + +/// Error handling mode to use when the user of the type wants to ignore +/// allocation failures, treating them as a fatal error. Functions +/// performing allocation will return values directly. +#[derive(Debug)] +#[unstable(feature = "allocator_api", issue = "32838")] +#[cfg(all(not(no_global_oom_handling), not(test)))] +pub struct Fatal; + +#[unstable(feature = "alloc_internals", issue = "none")] +#[cfg(all(not(no_global_oom_handling), not(test)))] +impl error_handling_sealed::Sealed for Fatal {} + +#[unstable(feature = "allocator_api", issue = "32838")] +#[cfg(all(not(no_global_oom_handling), not(test)))] +impl ErrorHandling for Fatal { + type Result = T; + + fn map_result(result: Result) -> Self::Result { + /// Hack around lack of `cfg(no_global_oom_handling)` in core. + /// + /// Using post-monomorphization errors and specialization, + /// we can enforce that any error used with `Fatal` implements + /// `HandleAllocError`, without requiring that all errors used + /// with fallible allocation implement it. This also allows + /// for `HandleAllocError` to live with the rest of the + /// global allocation handling in the `alloc` crate. + trait HandleAllocErrorInternal { + fn handle_alloc_error_internal(self) -> !; + } + impl HandleAllocErrorInternal for E { + default fn handle_alloc_error_internal(self) -> ! { + const { + panic!( + "user must implement `HandleAllocError` for any error type used with the `Fatal` kind of `ErrorHandling`" + ) + } + } + } + impl HandleAllocErrorInternal for E { + fn handle_alloc_error_internal(self) -> ! { + self.handle_alloc_error() + } + } + + result.unwrap_or_else(|e| e.handle_alloc_error_internal()) + } +} + +/// Wrapper around an existing allocator allowing one to +/// use a fallible allocator as an infallible one. +#[unstable(feature = "allocator_api", issue = "32838")] +#[cfg(all(not(no_global_oom_handling), not(test)))] +#[derive(Debug)] +pub struct FatalAdapter>(pub A); + +#[unstable(feature = "allocator_api", issue = "32838")] +#[cfg(all(not(no_global_oom_handling), not(test)))] +unsafe impl> Allocator for FatalAdapter { + fn allocate(&self, layout: Layout) -> Result, AllocError> { + self.0.allocate(layout) + } + + fn allocate_zeroed(&self, layout: Layout) -> Result, AllocError> { + self.0.allocate_zeroed(layout) + } + + unsafe fn deallocate(&self, ptr: core::ptr::NonNull, layout: Layout) { + // SAFETY: the safety contract must be upheld by the caller + unsafe { self.0.deallocate(ptr, layout) } + } + + unsafe fn grow( + &self, + ptr: core::ptr::NonNull, + old_layout: Layout, + new_layout: Layout, + ) -> Result, AllocError> { + // SAFETY: the safety contract must be upheld by the caller + unsafe { self.0.grow(ptr, old_layout, new_layout) } + } + + unsafe fn grow_zeroed( + &self, + ptr: core::ptr::NonNull, + old_layout: Layout, + new_layout: Layout, + ) -> Result, AllocError> { + // SAFETY: the safety contract must be upheld by the caller + unsafe { self.0.grow_zeroed(ptr, old_layout, new_layout) } + } + + unsafe fn shrink( + &self, + ptr: core::ptr::NonNull, + old_layout: Layout, + new_layout: Layout, + ) -> Result, AllocError> { + // SAFETY: the safety contract must be upheld by the caller + unsafe { self.0.shrink(ptr, old_layout, new_layout) } + } + + fn by_ref(&self) -> &Self + where + Self: Sized, + { + self + } + + type ErrorHandling = Fatal; +} + +/// Wrapper around an existing allocator allowing one to +/// use an infallible allocator as a fallible one. +#[unstable(feature = "allocator_api", issue = "32838")] +#[cfg(all(not(no_global_oom_handling), not(test)))] +#[derive(Debug)] +pub struct FallibleAdapter>(pub A); + +#[unstable(feature = "allocator_api", issue = "32838")] +#[cfg(all(not(no_global_oom_handling), not(test)))] +unsafe impl> Allocator for FallibleAdapter { + fn allocate(&self, layout: Layout) -> Result, AllocError> { + self.0.allocate(layout) + } + + fn allocate_zeroed(&self, layout: Layout) -> Result, AllocError> { + self.0.allocate_zeroed(layout) + } + + unsafe fn deallocate(&self, ptr: core::ptr::NonNull, layout: Layout) { + // SAFETY: the safety contract must be upheld by the caller + unsafe { self.0.deallocate(ptr, layout) } + } + + unsafe fn grow( + &self, + ptr: core::ptr::NonNull, + old_layout: Layout, + new_layout: Layout, + ) -> Result, AllocError> { + // SAFETY: the safety contract must be upheld by the caller + unsafe { self.0.grow(ptr, old_layout, new_layout) } + } + + unsafe fn grow_zeroed( + &self, + ptr: core::ptr::NonNull, + old_layout: Layout, + new_layout: Layout, + ) -> Result, AllocError> { + // SAFETY: the safety contract must be upheld by the caller + unsafe { self.0.grow_zeroed(ptr, old_layout, new_layout) } + } + + unsafe fn shrink( + &self, + ptr: core::ptr::NonNull, + old_layout: Layout, + new_layout: Layout, + ) -> Result, AllocError> { + // SAFETY: the safety contract must be upheld by the caller + unsafe { self.0.shrink(ptr, old_layout, new_layout) } + } + + fn by_ref(&self) -> &Self + where + Self: Sized, + { + self + } + + type ErrorHandling = Fallible; +} + +#[cfg(test)] +pub use std::alloc::{FallibleAdapter, Fatal, FatalAdapter, HandleAllocError}; + +#[cfg(not(no_global_oom_handling))] +use crate::collections::{TryReserveError, TryReserveErrorKind}; + +// One central function responsible for reporting capacity overflows. This'll +// ensure that the code generation related to these panics is minimal as there's +// only one location which panics rather than a bunch throughout the module. +#[cfg(not(no_global_oom_handling))] +pub(crate) fn capacity_overflow() -> ! { + panic!("capacity overflow"); +} + +#[cfg(not(no_global_oom_handling))] +#[unstable(feature = "allocator_api", issue = "32838")] +impl HandleAllocError for TryReserveError { + fn handle_alloc_error(self) -> ! { + match self.kind() { + TryReserveErrorKind::CapacityOverflow => capacity_overflow(), + TryReserveErrorKind::AllocError { layout, .. } => handle_alloc_error(layout), + } + } +} + +pub(crate) type AllocResult = + <::ErrorHandling as ErrorHandling>::Result; diff --git a/library/alloc/src/boxed.rs b/library/alloc/src/boxed.rs index 1768687e8cd02..12b4e85e37512 100644 --- a/library/alloc/src/boxed.rs +++ b/library/alloc/src/boxed.rs @@ -165,11 +165,14 @@ use core::pin::Pin; use core::ptr::{self, Unique}; use core::task::{Context, Poll}; +#[cfg(not(no_global_oom_handling))] +use crate::alloc::Fatal; #[cfg(not(no_global_oom_handling))] use crate::alloc::{handle_alloc_error, WriteCloneIntoRaw}; -use crate::alloc::{AllocError, Allocator, Global, Layout}; +use crate::alloc::{AllocError, AllocResult, Allocator, ErrorHandling, Global, Layout}; #[cfg(not(no_global_oom_handling))] use crate::borrow::Cow; +use crate::collections::TryReserveError; use crate::raw_vec::RawVec; #[cfg(not(no_global_oom_handling))] use crate::str::from_boxed_utf8_unchecked; @@ -624,7 +627,10 @@ impl Box<[T]> { #[unstable(feature = "new_uninit", issue = "63291")] #[must_use] pub fn new_uninit_slice(len: usize) -> Box<[mem::MaybeUninit]> { - unsafe { RawVec::with_capacity(len).into_box(len) } + unsafe { + ::ErrorHandling::map_result(RawVec::with_capacity_in(len, Global)) + .into_box(len) + } } /// Constructs a new boxed slice with uninitialized contents, with the memory @@ -649,7 +655,12 @@ impl Box<[T]> { #[unstable(feature = "new_uninit", issue = "63291")] #[must_use] pub fn new_zeroed_slice(len: usize) -> Box<[mem::MaybeUninit]> { - unsafe { RawVec::with_capacity_zeroed(len).into_box(len) } + unsafe { + ::ErrorHandling::map_result(RawVec::with_capacity_zeroed_in( + len, Global, + )) + .into_box(len) + } } /// Constructs a new boxed slice with uninitialized contents. Returns an error if @@ -675,14 +686,7 @@ impl Box<[T]> { #[unstable(feature = "allocator_api", issue = "32838")] #[inline] pub fn try_new_uninit_slice(len: usize) -> Result]>, AllocError> { - unsafe { - let layout = match Layout::array::>(len) { - Ok(l) => l, - Err(_) => return Err(AllocError), - }; - let ptr = Global.allocate(layout)?; - Ok(RawVec::from_raw_parts_in(ptr.as_mut_ptr() as *mut _, len, Global).into_box(len)) - } + unsafe { Ok(RawVec::with_capacity_in(len, Global).map_err(|_| AllocError)?.into_box(len)) } } /// Constructs a new boxed slice with uninitialized contents, with the memory @@ -708,12 +712,7 @@ impl Box<[T]> { #[inline] pub fn try_new_zeroed_slice(len: usize) -> Result]>, AllocError> { unsafe { - let layout = match Layout::array::>(len) { - Ok(l) => l, - Err(_) => return Err(AllocError), - }; - let ptr = Global.allocate_zeroed(layout)?; - Ok(RawVec::from_raw_parts_in(ptr.as_mut_ptr() as *mut _, len, Global).into_box(len)) + Ok(RawVec::with_capacity_zeroed_in(len, Global).map_err(|_| AllocError)?.into_box(len)) } } } @@ -741,12 +740,18 @@ impl Box<[T], A> { /// /// assert_eq!(*values, [1, 2, 3]) /// ``` - #[cfg(not(no_global_oom_handling))] #[unstable(feature = "allocator_api", issue = "32838")] // #[unstable(feature = "new_uninit", issue = "63291")] #[must_use] - pub fn new_uninit_slice_in(len: usize, alloc: A) -> Box<[mem::MaybeUninit], A> { - unsafe { RawVec::with_capacity_in(len, alloc).into_box(len) } + pub fn new_uninit_slice_in( + len: usize, + alloc: A, + ) -> AllocResult], A>, TryReserveError> { + unsafe { + A::ErrorHandling::map_result( + RawVec::with_capacity_in(len, alloc).map(|r| r.into_box(len)), + ) + } } /// Constructs a new boxed slice with uninitialized contents in the provided allocator, @@ -769,12 +774,18 @@ impl Box<[T], A> { /// ``` /// /// [zeroed]: mem::MaybeUninit::zeroed - #[cfg(not(no_global_oom_handling))] #[unstable(feature = "allocator_api", issue = "32838")] // #[unstable(feature = "new_uninit", issue = "63291")] #[must_use] - pub fn new_zeroed_slice_in(len: usize, alloc: A) -> Box<[mem::MaybeUninit], A> { - unsafe { RawVec::with_capacity_zeroed_in(len, alloc).into_box(len) } + pub fn new_zeroed_slice_in( + len: usize, + alloc: A, + ) -> AllocResult], A>, TryReserveError> { + unsafe { + A::ErrorHandling::map_result( + RawVec::with_capacity_zeroed_in(len, alloc).map(|r| r.into_box(len)), + ) + } } } @@ -1474,7 +1485,8 @@ impl BoxFromSlice for Box<[T]> { #[inline] fn from_slice(slice: &[T]) -> Self { let len = slice.len(); - let buf = RawVec::with_capacity(len); + let buf = + ::ErrorHandling::map_result(RawVec::with_capacity_in(len, Global)); unsafe { ptr::copy_nonoverlapping(slice.as_ptr(), buf.ptr(), len); buf.into_box(slice.len()).assume_init() @@ -2016,7 +2028,10 @@ impl FromIterator for Box<[I]> { #[cfg(not(no_global_oom_handling))] #[stable(feature = "box_slice_clone", since = "1.3.0")] -impl Clone for Box<[T], A> { +impl Clone for Box<[T], A> +where + A: Allocator, +{ fn clone(&self) -> Self { let alloc = Box::allocator(self).clone(); self.to_vec_in(alloc).into_boxed_slice() diff --git a/library/alloc/src/collections/btree/append.rs b/library/alloc/src/collections/btree/append.rs index b6989afb6255d..b6e29e4f543c5 100644 --- a/library/alloc/src/collections/btree/append.rs +++ b/library/alloc/src/collections/btree/append.rs @@ -1,6 +1,6 @@ use super::merge_iter::MergeIterInner; use super::node::{self, Root}; -use core::alloc::Allocator; +use crate::alloc::Allocator; use core::iter::FusedIterator; impl Root { diff --git a/library/alloc/src/collections/btree/fix.rs b/library/alloc/src/collections/btree/fix.rs index 91b61218005a6..afc3ce93232a2 100644 --- a/library/alloc/src/collections/btree/fix.rs +++ b/library/alloc/src/collections/btree/fix.rs @@ -1,6 +1,6 @@ use super::map::MIN_LEN; use super::node::{marker, ForceResult::*, Handle, LeftOrRight::*, NodeRef, Root}; -use core::alloc::Allocator; +use crate::alloc::Allocator; impl<'a, K: 'a, V: 'a> NodeRef, K, V, marker::LeafOrInternal> { /// Stocks up a possibly underfull node by merging with or stealing from a diff --git a/library/alloc/src/collections/btree/remove.rs b/library/alloc/src/collections/btree/remove.rs index 0904299254f0a..8666287948d6a 100644 --- a/library/alloc/src/collections/btree/remove.rs +++ b/library/alloc/src/collections/btree/remove.rs @@ -1,6 +1,6 @@ use super::map::MIN_LEN; use super::node::{marker, ForceResult::*, Handle, LeftOrRight::*, NodeRef}; -use core::alloc::Allocator; +use crate::alloc::Allocator; impl<'a, K: 'a, V: 'a> Handle, K, V, marker::LeafOrInternal>, marker::KV> { /// Removes a key-value pair from the tree, and returns that pair, as well as diff --git a/library/alloc/src/collections/btree/split.rs b/library/alloc/src/collections/btree/split.rs index 638dc98fc3e41..7747c613c9401 100644 --- a/library/alloc/src/collections/btree/split.rs +++ b/library/alloc/src/collections/btree/split.rs @@ -1,6 +1,6 @@ use super::node::{ForceResult::*, Root}; use super::search::SearchResult::*; -use core::alloc::Allocator; +use crate::alloc::Allocator; use core::borrow::Borrow; impl Root { diff --git a/library/alloc/src/collections/vec_deque/into_iter.rs b/library/alloc/src/collections/vec_deque/into_iter.rs index d9e274df0f5f2..150b116727968 100644 --- a/library/alloc/src/collections/vec_deque/into_iter.rs +++ b/library/alloc/src/collections/vec_deque/into_iter.rs @@ -12,7 +12,6 @@ use super::VecDeque; /// (provided by the [`IntoIterator`] trait). See its documentation for more. /// /// [`into_iter`]: VecDeque::into_iter -#[derive(Clone)] #[stable(feature = "rust1", since = "1.0.0")] pub struct IntoIter< T, @@ -21,6 +20,16 @@ pub struct IntoIter< inner: VecDeque, } +#[stable(feature = "rust1", since = "1.0.0")] +impl Clone for IntoIter +where + VecDeque: Clone, +{ + fn clone(&self) -> Self { + Self { inner: self.inner.clone() } + } +} + impl IntoIter { pub(super) fn new(inner: VecDeque) -> Self { IntoIter { inner } diff --git a/library/alloc/src/collections/vec_deque/mod.rs b/library/alloc/src/collections/vec_deque/mod.rs index 896da37f94c02..72cacb966a93a 100644 --- a/library/alloc/src/collections/vec_deque/mod.rs +++ b/library/alloc/src/collections/vec_deque/mod.rs @@ -22,7 +22,7 @@ use core::slice; #[allow(unused_imports)] use core::mem; -use crate::alloc::{Allocator, Global}; +use crate::alloc::{AllocResult, Allocator, ErrorHandling, Fatal, Global}; use crate::collections::TryReserveError; use crate::collections::TryReserveErrorKind; use crate::raw_vec::RawVec; @@ -106,7 +106,10 @@ pub struct VecDeque< } #[stable(feature = "rust1", since = "1.0.0")] -impl Clone for VecDeque { +impl Clone for VecDeque +where + A: Allocator + Clone, +{ fn clone(&self) -> Self { let mut deq = Self::with_capacity_in(self.len(), self.allocator().clone()); deq.extend(self.iter().cloned()); @@ -577,6 +580,10 @@ impl VecDeque { VecDeque { head: 0, len: 0, buf: RawVec::new_in(alloc) } } + fn try_with_capacity_in(capacity: usize, alloc: A) -> Result { + Ok(VecDeque { head: 0, len: 0, buf: RawVec::with_capacity_in(capacity, alloc)? }) + } + /// Creates an empty deque with space for at least `capacity` elements. /// /// # Examples @@ -587,8 +594,11 @@ impl VecDeque { /// let deque: VecDeque = VecDeque::with_capacity(10); /// ``` #[unstable(feature = "allocator_api", issue = "32838")] - pub fn with_capacity_in(capacity: usize, alloc: A) -> VecDeque { - VecDeque { head: 0, len: 0, buf: RawVec::with_capacity_in(capacity, alloc) } + pub fn with_capacity_in( + capacity: usize, + alloc: A, + ) -> AllocResult, TryReserveError> { + A::ErrorHandling::map_result(Self::try_with_capacity_in(capacity, alloc)) } /// Creates a `VecDeque` from a raw allocation, when the initialized @@ -751,16 +761,8 @@ impl VecDeque { /// /// [`reserve`]: VecDeque::reserve #[stable(feature = "rust1", since = "1.0.0")] - pub fn reserve_exact(&mut self, additional: usize) { - let new_cap = self.len.checked_add(additional).expect("capacity overflow"); - let old_cap = self.capacity(); - - if new_cap > old_cap { - self.buf.reserve_exact(self.len, additional); - unsafe { - self.handle_capacity_increase(old_cap); - } - } + pub fn reserve_exact(&mut self, additional: usize) -> AllocResult { + A::ErrorHandling::map_result(self.try_reserve_exact(additional)) } /// Reserves capacity for at least `additional` more elements to be inserted in the given @@ -780,18 +782,8 @@ impl VecDeque { /// assert!(buf.capacity() >= 11); /// ``` #[stable(feature = "rust1", since = "1.0.0")] - pub fn reserve(&mut self, additional: usize) { - let new_cap = self.len.checked_add(additional).expect("capacity overflow"); - let old_cap = self.capacity(); - - if new_cap > old_cap { - // we don't need to reserve_exact(), as the size doesn't have - // to be a power of 2. - self.buf.reserve(self.len, additional); - unsafe { - self.handle_capacity_increase(old_cap); - } - } + pub fn reserve(&mut self, additional: usize) -> AllocResult { + A::ErrorHandling::map_result(self.try_reserve(additional)) } /// Tries to reserve the minimum capacity for at least `additional` more elements to @@ -838,7 +830,7 @@ impl VecDeque { let old_cap = self.capacity(); if new_cap > old_cap { - self.buf.try_reserve_exact(self.len, additional)?; + self.buf.reserve_exact(self.len, additional)?; unsafe { self.handle_capacity_increase(old_cap); } @@ -886,7 +878,7 @@ impl VecDeque { let old_cap = self.capacity(); if new_cap > old_cap { - self.buf.try_reserve(self.len, additional)?; + self.buf.reserve(self.len, additional)?; unsafe { self.handle_capacity_increase(old_cap); } @@ -936,80 +928,85 @@ impl VecDeque { /// assert!(buf.capacity() >= 4); /// ``` #[stable(feature = "shrink_to", since = "1.56.0")] - pub fn shrink_to(&mut self, min_capacity: usize) { - let target_cap = min_capacity.max(self.len); - - // never shrink ZSTs - if T::IS_ZST || self.capacity() <= target_cap { - return; - } + pub fn shrink_to(&mut self, min_capacity: usize) -> AllocResult { + A::ErrorHandling::map_result((|| { + // Substitute for try block + let target_cap = min_capacity.max(self.len); + + // never shrink ZSTs + if T::IS_ZST || self.capacity() <= target_cap { + return Ok(()); + } - // There are three cases of interest: - // All elements are out of desired bounds - // Elements are contiguous, and tail is out of desired bounds - // Elements are discontiguous - // - // At all other times, element positions are unaffected. + // There are three cases of interest: + // All elements are out of desired bounds + // Elements are contiguous, and tail is out of desired bounds + // Elements are discontiguous + // + // At all other times, element positions are unaffected. - // `head` and `len` are at most `isize::MAX` and `target_cap < self.capacity()`, so nothing can - // overflow. - let tail_outside = (target_cap + 1..=self.capacity()).contains(&(self.head + self.len)); + // `head` and `len` are at most `isize::MAX` and `target_cap < self.capacity()`, so nothing can + // overflow. + let tail_outside = (target_cap + 1..=self.capacity()).contains(&(self.head + self.len)); - if self.len == 0 { - self.head = 0; - } else if self.head >= target_cap && tail_outside { - // Head and tail are both out of bounds, so copy all of them to the front. - // - // H := head - // L := last element - // H L - // [. . . . . . . . o o o o o o o . ] - // H L - // [o o o o o o o . ] - unsafe { - // nonoverlapping because `self.head >= target_cap >= self.len`. - self.copy_nonoverlapping(self.head, 0, self.len); - } - self.head = 0; - } else if self.head < target_cap && tail_outside { - // Head is in bounds, tail is out of bounds. - // Copy the overflowing part to the beginning of the - // buffer. This won't overlap because `target_cap >= self.len`. - // - // H := head - // L := last element - // H L - // [. . . o o o o o o o . . . . . . ] - // L H - // [o o . o o o o o ] - let len = self.head + self.len - target_cap; - unsafe { - self.copy_nonoverlapping(target_cap, 0, len); - } - } else if !self.is_contiguous() { - // The head slice is at least partially out of bounds, tail is in bounds. - // Copy the head backwards so it lines up with the target capacity. - // This won't overlap because `target_cap >= self.len`. - // - // H := head - // L := last element - // L H - // [o o o o o . . . . . . . . . o o ] - // L H - // [o o o o o . o o ] - let head_len = self.capacity() - self.head; - let new_head = target_cap - head_len; - unsafe { - // can't use `copy_nonoverlapping()` here because the new and old - // regions for the head might overlap. - self.copy(self.head, new_head, head_len); + if self.len == 0 { + self.head = 0; + } else if self.head >= target_cap && tail_outside { + // Head and tail are both out of bounds, so copy all of them to the front. + // + // H := head + // L := last element + // H L + // [. . . . . . . . o o o o o o o . ] + // H L + // [o o o o o o o . ] + unsafe { + // nonoverlapping because `self.head >= target_cap >= self.len`. + self.copy_nonoverlapping(self.head, 0, self.len); + } + self.head = 0; + } else if self.head < target_cap && tail_outside { + // Head is in bounds, tail is out of bounds. + // Copy the overflowing part to the beginning of the + // buffer. This won't overlap because `target_cap >= self.len`. + // + // H := head + // L := last element + // H L + // [. . . o o o o o o o . . . . . . ] + // L H + // [o o . o o o o o ] + let len = self.head + self.len - target_cap; + unsafe { + self.copy_nonoverlapping(target_cap, 0, len); + } + } else if !self.is_contiguous() { + // The head slice is at least partially out of bounds, tail is in bounds. + // Copy the head backwards so it lines up with the target capacity. + // This won't overlap because `target_cap >= self.len`. + // + // H := head + // L := last element + // L H + // [o o o o o . . . . . . . . . o o ] + // L H + // [o o o o o . o o ] + let head_len = self.capacity() - self.head; + let new_head = target_cap - head_len; + unsafe { + // can't use `copy_nonoverlapping()` here because the new and old + // regions for the head might overlap. + self.copy(self.head, new_head, head_len); + } + self.head = new_head; } - self.head = new_head; - } - self.buf.shrink_to_fit(target_cap); + self.buf.shrink_to(target_cap)?; - debug_assert!(self.head < self.capacity() || self.capacity() == 0); - debug_assert!(self.len <= self.capacity()); + debug_assert!(self.head < self.capacity() || self.capacity() == 0); + debug_assert!(self.len <= self.capacity()); + + Ok(()) + })()) } /// Shortens the deque, keeping the first `len` elements and dropping @@ -1628,17 +1625,22 @@ impl VecDeque { /// assert_eq!(d.front(), Some(&2)); /// ``` #[stable(feature = "rust1", since = "1.0.0")] - pub fn push_front(&mut self, value: T) { - if self.is_full() { - self.grow(); - } + pub fn push_front(&mut self, value: T) -> AllocResult { + A::ErrorHandling::map_result((|| { + // Substitute for try block + if self.is_full() { + self.grow()?; + } - self.head = self.wrap_sub(self.head, 1); - self.len += 1; + self.head = self.wrap_sub(self.head, 1); + self.len += 1; - unsafe { - self.buffer_write(self.head, value); - } + unsafe { + self.buffer_write(self.head, value); + } + + Ok(()) + })()) } /// Appends an element to the back of the deque. @@ -1654,13 +1656,18 @@ impl VecDeque { /// assert_eq!(3, *buf.back().unwrap()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] - pub fn push_back(&mut self, value: T) { - if self.is_full() { - self.grow(); - } + pub fn push_back(&mut self, value: T) -> AllocResult { + A::ErrorHandling::map_result((|| { + // Substsitute for try block + if self.is_full() { + self.grow()?; + } - unsafe { self.buffer_write(self.to_physical_idx(self.len), value) } - self.len += 1; + unsafe { self.buffer_write(self.to_physical_idx(self.len), value) } + self.len += 1; + + Ok(()) + })()) } #[inline] @@ -1763,32 +1770,37 @@ impl VecDeque { /// assert_eq!(vec_deque, &['a', 'd', 'b', 'c']); /// ``` #[stable(feature = "deque_extras_15", since = "1.5.0")] - pub fn insert(&mut self, index: usize, value: T) { - assert!(index <= self.len(), "index out of bounds"); - if self.is_full() { - self.grow(); - } - - let k = self.len - index; - if k < index { - // `index + 1` can't overflow, because if index was usize::MAX, then either the - // assert would've failed, or the deque would've tried to grow past usize::MAX - // and panicked. - unsafe { - // see `remove()` for explanation why this wrap_copy() call is safe. - self.wrap_copy(self.to_physical_idx(index), self.to_physical_idx(index + 1), k); - self.buffer_write(self.to_physical_idx(index), value); - self.len += 1; + pub fn insert(&mut self, index: usize, value: T) -> AllocResult { + A::ErrorHandling::map_result((|| { + // Substitute for try block + assert!(index <= self.len(), "index out of bounds"); + if self.is_full() { + self.grow()?; } - } else { - let old_head = self.head; - self.head = self.wrap_sub(self.head, 1); - unsafe { - self.wrap_copy(old_head, self.head, index); - self.buffer_write(self.to_physical_idx(index), value); - self.len += 1; + + let k = self.len - index; + if k < index { + // `index + 1` can't overflow, because if index was usize::MAX, then either the + // assert would've failed, or the deque would've tried to grow past usize::MAX + // and panicked. + unsafe { + // see `remove()` for explanation why this wrap_copy() call is safe. + self.wrap_copy(self.to_physical_idx(index), self.to_physical_idx(index + 1), k); + self.buffer_write(self.to_physical_idx(index), value); + self.len += 1; + } + } else { + let old_head = self.head; + self.head = self.wrap_sub(self.head, 1); + unsafe { + self.wrap_copy(old_head, self.head, index); + self.buffer_write(self.to_physical_idx(index), value); + self.len += 1; + } } - } + + Ok(()) + })()) } /// Removes and returns the element at `index` from the deque. @@ -1865,51 +1877,57 @@ impl VecDeque { #[inline] #[must_use = "use `.truncate()` if you don't need the other half"] #[stable(feature = "split_off", since = "1.4.0")] - pub fn split_off(&mut self, at: usize) -> Self + pub fn split_off(&mut self, at: usize) -> AllocResult where A: Clone, { - let len = self.len; - assert!(at <= len, "`at` out of bounds"); - - let other_len = len - at; - let mut other = VecDeque::with_capacity_in(other_len, self.allocator().clone()); - - unsafe { - let (first_half, second_half) = self.as_slices(); - - let first_len = first_half.len(); - let second_len = second_half.len(); - if at < first_len { - // `at` lies in the first half. - let amount_in_first = first_len - at; + A::ErrorHandling::map_result((|| { + let len = self.len; + assert!(at <= len, "`at` out of bounds"); - ptr::copy_nonoverlapping(first_half.as_ptr().add(at), other.ptr(), amount_in_first); + let other_len = len - at; + let mut other = VecDeque::try_with_capacity_in(other_len, self.allocator().clone())?; - // just take all of the second half. - ptr::copy_nonoverlapping( - second_half.as_ptr(), - other.ptr().add(amount_in_first), - second_len, - ); - } else { - // `at` lies in the second half, need to factor in the elements we skipped - // in the first half. - let offset = at - first_len; - let amount_in_second = second_len - offset; - ptr::copy_nonoverlapping( - second_half.as_ptr().add(offset), - other.ptr(), - amount_in_second, - ); + unsafe { + let (first_half, second_half) = self.as_slices(); + + let first_len = first_half.len(); + let second_len = second_half.len(); + if at < first_len { + // `at` lies in the first half. + let amount_in_first = first_len - at; + + ptr::copy_nonoverlapping( + first_half.as_ptr().add(at), + other.ptr(), + amount_in_first, + ); + + // just take all of the second half. + ptr::copy_nonoverlapping( + second_half.as_ptr(), + other.ptr().add(amount_in_first), + second_len, + ); + } else { + // `at` lies in the second half, need to factor in the elements we skipped + // in the first half. + let offset = at - first_len; + let amount_in_second = second_len - offset; + ptr::copy_nonoverlapping( + second_half.as_ptr().add(offset), + other.ptr(), + amount_in_second, + ); + } } - } - // Cleanup where the ends of the buffers are - self.len = at; - other.len = other_len; + // Cleanup where the ends of the buffers are + self.len = at; + other.len = other_len; - other + Ok(other) + })()) } /// Moves all the elements of `other` into `self`, leaving `other` empty. @@ -2053,16 +2071,17 @@ impl VecDeque { // be called in cold paths. // This may panic or abort #[inline(never)] - fn grow(&mut self) { + fn grow(&mut self) -> Result<(), TryReserveError> { // Extend or possibly remove this assertion when valid use-cases for growing the // buffer without it being full emerge debug_assert!(self.is_full()); let old_cap = self.capacity(); - self.buf.reserve_for_push(old_cap); - unsafe { - self.handle_capacity_increase(old_cap); - } - debug_assert!(!self.is_full()); + self.buf.reserve_for_push(old_cap).map(|_| { + unsafe { + self.handle_capacity_increase(old_cap); + } + debug_assert!(!self.is_full()); + }) } /// Modifies the deque in-place so that `len()` is equal to `new_len`, diff --git a/library/alloc/src/lib.rs b/library/alloc/src/lib.rs index 59fa91c1066dc..27e779838c24d 100644 --- a/library/alloc/src/lib.rs +++ b/library/alloc/src/lib.rs @@ -95,6 +95,7 @@ #![cfg_attr(not(no_global_oom_handling), feature(const_btree_len))] #![cfg_attr(test, feature(is_sorted))] #![cfg_attr(test, feature(new_uninit))] +#![feature(alloc_internals)] #![feature(alloc_layout_extra)] #![feature(allocator_api)] #![feature(array_chunks)] @@ -115,6 +116,7 @@ #![feature(const_pin)] #![feature(const_refs_to_cell)] #![feature(const_size_of_val)] +#![feature(const_type_name)] #![feature(const_waker)] #![feature(core_intrinsics)] #![feature(core_panic)] diff --git a/library/alloc/src/raw_vec.rs b/library/alloc/src/raw_vec.rs index dfd30d99cf041..6b5af1963909f 100644 --- a/library/alloc/src/raw_vec.rs +++ b/library/alloc/src/raw_vec.rs @@ -1,15 +1,12 @@ #![unstable(feature = "raw_vec_internals", reason = "unstable const warnings", issue = "none")] -use core::alloc::LayoutError; use core::cmp; use core::intrinsics; use core::mem::{self, ManuallyDrop, MaybeUninit, SizedTypeProperties}; use core::ptr::{self, NonNull, Unique}; use core::slice; -#[cfg(not(no_global_oom_handling))] -use crate::alloc::handle_alloc_error; -use crate::alloc::{Allocator, Global, Layout}; +use crate::alloc::{Allocator, Global, Layout, LayoutError}; use crate::boxed::Box; use crate::collections::TryReserveError; use crate::collections::TryReserveErrorKind::*; @@ -17,7 +14,6 @@ use crate::collections::TryReserveErrorKind::*; #[cfg(test)] mod tests; -#[cfg(not(no_global_oom_handling))] enum AllocInit { /// The contents of the new memory are uninitialized. Uninitialized, @@ -71,34 +67,6 @@ impl RawVec { pub const fn new() -> Self { Self::new_in(Global) } - - /// Creates a `RawVec` (on the system heap) with exactly the - /// capacity and alignment requirements for a `[T; capacity]`. This is - /// equivalent to calling `RawVec::new` when `capacity` is `0` or `T` is - /// zero-sized. Note that if `T` is zero-sized this means you will - /// *not* get a `RawVec` with the requested capacity. - /// - /// # Panics - /// - /// Panics if the requested capacity exceeds `isize::MAX` bytes. - /// - /// # Aborts - /// - /// Aborts on OOM. - #[cfg(not(any(no_global_oom_handling, test)))] - #[must_use] - #[inline] - pub fn with_capacity(capacity: usize) -> Self { - Self::with_capacity_in(capacity, Global) - } - - /// Like `with_capacity`, but guarantees the buffer is zeroed. - #[cfg(not(any(no_global_oom_handling, test)))] - #[must_use] - #[inline] - pub fn with_capacity_zeroed(capacity: usize) -> Self { - Self::with_capacity_zeroed_in(capacity, Global) - } } impl RawVec { @@ -122,19 +90,19 @@ impl RawVec { Self { ptr: Unique::dangling(), cap: 0, alloc } } - /// Like `with_capacity`, but parameterized over the choice of - /// allocator for the returned `RawVec`. - #[cfg(not(no_global_oom_handling))] + /// Creates a `RawVec` (with the given allocator) with exactly the + /// capacity and alignment requirements for a `[T; capacity]`. This is + /// equivalent to calling `RawVec::new` when `capacity` is `0` or `T` is + /// zero-sized. Note that if `T` is zero-sized this means you will + /// *not* get a `RawVec` with the requested capacity. #[inline] - pub fn with_capacity_in(capacity: usize, alloc: A) -> Self { + pub fn with_capacity_in(capacity: usize, alloc: A) -> Result { Self::allocate_in(capacity, AllocInit::Uninitialized, alloc) } - /// Like `with_capacity_zeroed`, but parameterized over the choice - /// of allocator for the returned `RawVec`. - #[cfg(not(no_global_oom_handling))] + /// Like `with_capacity_in`, but guarantees the buffer is zeroed. #[inline] - pub fn with_capacity_zeroed_in(capacity: usize, alloc: A) -> Self { + pub fn with_capacity_zeroed_in(capacity: usize, alloc: A) -> Result { Self::allocate_in(capacity, AllocInit::Zeroed, alloc) } @@ -164,39 +132,33 @@ impl RawVec { } } - #[cfg(not(no_global_oom_handling))] - fn allocate_in(capacity: usize, init: AllocInit, alloc: A) -> Self { + fn allocate_in(capacity: usize, init: AllocInit, alloc: A) -> Result { // Don't allocate here because `Drop` will not deallocate when `capacity` is 0. if T::IS_ZST || capacity == 0 { - Self::new_in(alloc) + Ok(Self::new_in(alloc)) } else { // We avoid `unwrap_or_else` here because it bloats the amount of // LLVM IR generated. let layout = match Layout::array::(capacity) { Ok(layout) => layout, - Err(_) => capacity_overflow(), + Err(_) => Err(CapacityOverflow)?, }; - match alloc_guard(layout.size()) { - Ok(_) => {} - Err(_) => capacity_overflow(), - } + alloc_guard(layout.size())?; let result = match init { AllocInit::Uninitialized => alloc.allocate(layout), AllocInit::Zeroed => alloc.allocate_zeroed(layout), }; - let ptr = match result { - Ok(ptr) => ptr, - Err(_) => handle_alloc_error(layout), - }; + + let ptr = result.map_err(|_| AllocError { layout, non_exhaustive: () })?; // Allocators currently return a `NonNull<[u8]>` whose length // matches the size requested. If that ever changes, the capacity // here should change to `ptr.len() / mem::size_of::()`. - Self { + Ok(Self { ptr: unsafe { Unique::new_unchecked(ptr.cast().as_ptr()) }, cap: capacity, alloc, - } + }) } } @@ -265,50 +227,20 @@ impl RawVec { /// code *you* write that relies on the behavior of this function may break. /// /// This is ideal for implementing a bulk-push operation like `extend`. - /// - /// # Panics - /// - /// Panics if the new capacity exceeds `isize::MAX` bytes. - /// - /// # Aborts - /// - /// Aborts on OOM. - #[cfg(not(no_global_oom_handling))] #[inline] - pub fn reserve(&mut self, len: usize, additional: usize) { - // Callers expect this function to be very cheap when there is already sufficient capacity. - // Therefore, we move all the resizing and error-handling logic from grow_amortized and - // handle_reserve behind a call, while making sure that this function is likely to be - // inlined as just a comparison and a call if the comparison fails. - #[cold] - fn do_reserve_and_handle( - slf: &mut RawVec, - len: usize, - additional: usize, - ) { - handle_reserve(slf.grow_amortized(len, additional)); - } - + pub fn reserve(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> { if self.needs_to_grow(len, additional) { - do_reserve_and_handle(self, len, additional); + self.grow_amortized(len, additional) + } else { + Ok(()) } } /// A specialized version of `reserve()` used only by the hot and /// oft-instantiated `Vec::push()`, which does its own capacity check. - #[cfg(not(no_global_oom_handling))] #[inline(never)] - pub fn reserve_for_push(&mut self, len: usize) { - handle_reserve(self.grow_amortized(len, 1)); - } - - /// The same as `reserve`, but returns on errors instead of panicking or aborting. - pub fn try_reserve(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> { - if self.needs_to_grow(len, additional) { - self.grow_amortized(len, additional) - } else { - Ok(()) - } + pub fn reserve_for_push(&mut self, len: usize) -> Result<(), TryReserveError> { + self.grow_amortized(len, 1) } /// Ensures that the buffer contains at least enough space to hold `len + @@ -320,25 +252,7 @@ impl RawVec { /// If `len` exceeds `self.capacity()`, this may fail to actually allocate /// the requested space. This is not really unsafe, but the unsafe code /// *you* write that relies on the behavior of this function may break. - /// - /// # Panics - /// - /// Panics if the new capacity exceeds `isize::MAX` bytes. - /// - /// # Aborts - /// - /// Aborts on OOM. - #[cfg(not(no_global_oom_handling))] - pub fn reserve_exact(&mut self, len: usize, additional: usize) { - handle_reserve(self.try_reserve_exact(len, additional)); - } - - /// The same as `reserve_exact`, but returns on errors instead of panicking or aborting. - pub fn try_reserve_exact( - &mut self, - len: usize, - additional: usize, - ) -> Result<(), TryReserveError> { + pub fn reserve_exact(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> { if self.needs_to_grow(len, additional) { self.grow_exact(len, additional) } else { Ok(()) } } @@ -348,13 +262,8 @@ impl RawVec { /// # Panics /// /// Panics if the given amount is *larger* than the current capacity. - /// - /// # Aborts - /// - /// Aborts on OOM. - #[cfg(not(no_global_oom_handling))] - pub fn shrink_to_fit(&mut self, cap: usize) { - handle_reserve(self.shrink(cap)); + pub fn shrink_to(&mut self, cap: usize) -> Result<(), TryReserveError> { + self.shrink(cap) } } @@ -425,7 +334,6 @@ impl RawVec { Ok(()) } - #[cfg(not(no_global_oom_handling))] fn shrink(&mut self, cap: usize) -> Result<(), TryReserveError> { assert!(cap <= self.capacity(), "Tried to shrink to a larger capacity"); @@ -487,17 +395,6 @@ unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawVec { } } -// Central function for reserve error handling. -#[cfg(not(no_global_oom_handling))] -#[inline] -fn handle_reserve(result: Result<(), TryReserveError>) { - match result.map_err(|e| e.kind()) { - Err(CapacityOverflow) => capacity_overflow(), - Err(AllocError { layout, .. }) => handle_alloc_error(layout), - Ok(()) => { /* yay */ } - } -} - // We need to guarantee the following: // * We don't ever allocate `> isize::MAX` byte-size objects. // * We don't overflow `usize::MAX` and actually allocate too little. @@ -515,11 +412,3 @@ fn alloc_guard(alloc_size: usize) -> Result<(), TryReserveError> { Ok(()) } } - -// One central function responsible for reporting capacity overflows. This'll -// ensure that the code generation related to these panics is minimal as there's -// only one location which panics rather than a bunch throughout the module. -#[cfg(not(no_global_oom_handling))] -fn capacity_overflow() -> ! { - panic!("capacity overflow"); -} diff --git a/library/alloc/src/raw_vec/tests.rs b/library/alloc/src/raw_vec/tests.rs index ff322f0da97c6..b444df014582a 100644 --- a/library/alloc/src/raw_vec/tests.rs +++ b/library/alloc/src/raw_vec/tests.rs @@ -2,8 +2,8 @@ use super::*; use std::cell::Cell; #[test] -fn allocator_param() { - use crate::alloc::AllocError; +fn allocator_param() -> Result<(), TryReserveError> { + use crate::alloc::{AllocError, Fatal}; // Writing a test of integration between third-party // allocators and `RawVec` is a little tricky because the `RawVec` @@ -20,7 +20,7 @@ fn allocator_param() { struct BoundedAlloc { fuel: Cell, } - unsafe impl Allocator for BoundedAlloc { + unsafe impl crate::alloc::Allocator for BoundedAlloc { fn allocate(&self, layout: Layout) -> Result, AllocError> { let size = layout.size(); if size > self.fuel.get() { @@ -37,45 +37,50 @@ fn allocator_param() { unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { unsafe { Global.deallocate(ptr, layout) } } + type ErrorHandling = Fatal; } let a = BoundedAlloc { fuel: Cell::new(500) }; - let mut v: RawVec = RawVec::with_capacity_in(50, a); + let mut v: RawVec = RawVec::with_capacity_in(50, a)?; assert_eq!(v.alloc.fuel.get(), 450); - v.reserve(50, 150); // (causes a realloc, thus using 50 + 150 = 200 units of fuel) + v.reserve(50, 150)?; // (causes a realloc, thus using 50 + 150 = 200 units of fuel) assert_eq!(v.alloc.fuel.get(), 250); + + Ok(()) } #[test] -fn reserve_does_not_overallocate() { +fn reserve_does_not_overallocate() -> Result<(), TryReserveError> { { let mut v: RawVec = RawVec::new(); // First, `reserve` allocates like `reserve_exact`. - v.reserve(0, 9); + v.reserve(0, 9)?; assert_eq!(9, v.capacity()); } { let mut v: RawVec = RawVec::new(); - v.reserve(0, 7); + v.reserve(0, 7)?; assert_eq!(7, v.capacity()); // 97 is more than double of 7, so `reserve` should work // like `reserve_exact`. - v.reserve(7, 90); + v.reserve(7, 90)?; assert_eq!(97, v.capacity()); } { let mut v: RawVec = RawVec::new(); - v.reserve(0, 12); + v.reserve(0, 12)?; assert_eq!(12, v.capacity()); - v.reserve(12, 3); + v.reserve(12, 3)?; // 3 is less than half of 12, so `reserve` must grow // exponentially. At the time of writing this test grow // factor is 2, so new capacity is 24, however, grow factor // of 1.5 is OK too. Hence `>= 18` in assert. assert!(v.capacity() >= 12 + 12 / 2); } + + Ok(()) } struct ZST; @@ -88,7 +93,7 @@ fn zst_sanity(v: &RawVec) { } #[test] -fn zst() { +fn zst() -> Result<(), TryReserveError> { let cap_err = Err(crate::collections::TryReserveErrorKind::CapacityOverflow.into()); assert_eq!(std::mem::size_of::(), 0); @@ -98,19 +103,19 @@ fn zst() { let v: RawVec = RawVec::new(); zst_sanity(&v); - let v: RawVec = RawVec::with_capacity_in(100, Global); + let v: RawVec = RawVec::with_capacity_in(100, Global)?; zst_sanity(&v); - let v: RawVec = RawVec::with_capacity_in(100, Global); + let v: RawVec = RawVec::with_capacity_in(100, Global)?; zst_sanity(&v); - let v: RawVec = RawVec::allocate_in(0, AllocInit::Uninitialized, Global); + let v: RawVec = RawVec::allocate_in(0, AllocInit::Uninitialized, Global)?; zst_sanity(&v); - let v: RawVec = RawVec::allocate_in(100, AllocInit::Uninitialized, Global); + let v: RawVec = RawVec::allocate_in(100, AllocInit::Uninitialized, Global)?; zst_sanity(&v); - let mut v: RawVec = RawVec::allocate_in(usize::MAX, AllocInit::Uninitialized, Global); + let mut v: RawVec = RawVec::allocate_in(usize::MAX, AllocInit::Uninitialized, Global)?; zst_sanity(&v); // Check all these operations work as expected with zero-sized elements. @@ -119,20 +124,20 @@ fn zst() { assert!(v.needs_to_grow(101, usize::MAX - 100)); zst_sanity(&v); - v.reserve(100, usize::MAX - 100); + v.reserve(100, usize::MAX - 100)?; //v.reserve(101, usize::MAX - 100); // panics, in `zst_reserve_panic` below zst_sanity(&v); - v.reserve_exact(100, usize::MAX - 100); + v.reserve_exact(100, usize::MAX - 100)?; //v.reserve_exact(101, usize::MAX - 100); // panics, in `zst_reserve_exact_panic` below zst_sanity(&v); - assert_eq!(v.try_reserve(100, usize::MAX - 100), Ok(())); - assert_eq!(v.try_reserve(101, usize::MAX - 100), cap_err); + assert_eq!(v.reserve(100, usize::MAX - 100), Ok(())); + assert_eq!(v.reserve(101, usize::MAX - 100), cap_err); zst_sanity(&v); - assert_eq!(v.try_reserve_exact(100, usize::MAX - 100), Ok(())); - assert_eq!(v.try_reserve_exact(101, usize::MAX - 100), cap_err); + assert_eq!(v.reserve_exact(100, usize::MAX - 100), Ok(())); + assert_eq!(v.reserve_exact(101, usize::MAX - 100), cap_err); zst_sanity(&v); assert_eq!(v.grow_amortized(100, usize::MAX - 100), cap_err); @@ -142,22 +147,26 @@ fn zst() { assert_eq!(v.grow_exact(100, usize::MAX - 100), cap_err); assert_eq!(v.grow_exact(101, usize::MAX - 100), cap_err); zst_sanity(&v); + + Ok(()) } #[test] -#[should_panic(expected = "capacity overflow")] fn zst_reserve_panic() { + let cap_err = Err(crate::collections::TryReserveErrorKind::CapacityOverflow.into()); + let mut v: RawVec = RawVec::new(); zst_sanity(&v); - v.reserve(101, usize::MAX - 100); + assert_eq!(v.reserve(101, usize::MAX - 100), cap_err); } #[test] -#[should_panic(expected = "capacity overflow")] fn zst_reserve_exact_panic() { + let cap_err = Err(crate::collections::TryReserveErrorKind::CapacityOverflow.into()); + let mut v: RawVec = RawVec::new(); zst_sanity(&v); - v.reserve_exact(101, usize::MAX - 100); + assert_eq!(v.reserve_exact(101, usize::MAX - 100), cap_err); } diff --git a/library/alloc/src/slice.rs b/library/alloc/src/slice.rs index 093dcbbe8bf77..38c4e8ab1d4ae 100644 --- a/library/alloc/src/slice.rs +++ b/library/alloc/src/slice.rs @@ -26,8 +26,12 @@ use crate::alloc::Allocator; #[cfg(not(no_global_oom_handling))] use crate::alloc::{self, Global}; #[cfg(not(no_global_oom_handling))] +use crate::alloc::{AllocResult, ErrorHandling}; +#[cfg(not(no_global_oom_handling))] use crate::borrow::ToOwned; use crate::boxed::Box; +#[cfg(not(no_global_oom_handling))] +use crate::collections::TryReserveError; use crate::vec::Vec; #[cfg(test)] @@ -89,9 +93,11 @@ pub use hack::to_vec; // `core::slice::SliceExt` - we need to supply these functions for the // `test_permutations` test pub(crate) mod hack { - use core::alloc::Allocator; + use crate::alloc::Allocator; use crate::boxed::Box; + #[cfg(not(no_global_oom_handling))] + use crate::collections::TryReserveError; use crate::vec::Vec; // We shouldn't add inline attribute to this since this is used in @@ -107,13 +113,16 @@ pub(crate) mod hack { #[cfg(not(no_global_oom_handling))] #[inline] - pub fn to_vec(s: &[T], alloc: A) -> Vec { + pub fn to_vec( + s: &[T], + alloc: A, + ) -> Result, TryReserveError> { T::to_vec(s, alloc) } #[cfg(not(no_global_oom_handling))] pub trait ConvertVec { - fn to_vec(s: &[Self], alloc: A) -> Vec + fn to_vec(s: &[Self], alloc: A) -> Result, TryReserveError> where Self: Sized; } @@ -121,7 +130,10 @@ pub(crate) mod hack { #[cfg(not(no_global_oom_handling))] impl ConvertVec for T { #[inline] - default fn to_vec(s: &[Self], alloc: A) -> Vec { + default fn to_vec( + s: &[Self], + alloc: A, + ) -> Result, TryReserveError> { struct DropGuard<'a, T, A: Allocator> { vec: &'a mut Vec, num_init: usize, @@ -136,7 +148,7 @@ pub(crate) mod hack { } } } - let mut vec = Vec::with_capacity_in(s.len(), alloc); + let mut vec = Vec::try_with_capacity_in(s.len(), alloc)?; let mut guard = DropGuard { vec: &mut vec, num_init: 0 }; let slots = guard.vec.spare_capacity_mut(); // .take(slots.len()) is necessary for LLVM to remove bounds checks @@ -151,15 +163,15 @@ pub(crate) mod hack { unsafe { vec.set_len(s.len()); } - vec + Ok(vec) } } #[cfg(not(no_global_oom_handling))] impl ConvertVec for T { #[inline] - fn to_vec(s: &[Self], alloc: A) -> Vec { - let mut v = Vec::with_capacity_in(s.len(), alloc); + fn to_vec(s: &[Self], alloc: A) -> Result, TryReserveError> { + let mut v = Vec::try_with_capacity_in(s.len(), alloc)?; // SAFETY: // allocated above with the capacity of `s`, and initialize to `s.len()` in // ptr::copy_to_non_overlapping below. @@ -167,7 +179,7 @@ pub(crate) mod hack { s.as_ptr().copy_to_nonoverlapping(v.as_mut_ptr(), s.len()); v.set_len(s.len()); } - v + Ok(v) } } } @@ -433,12 +445,12 @@ impl [T] { #[rustc_allow_incoherent_impl] #[inline] #[unstable(feature = "allocator_api", issue = "32838")] - pub fn to_vec_in(&self, alloc: A) -> Vec + pub fn to_vec_in(&self, alloc: A) -> AllocResult, TryReserveError> where T: Clone, { // N.B., see the `hack` module in this file for more details. - hack::to_vec(self, alloc) + A::ErrorHandling::map_result(hack::to_vec(self, alloc)) } /// Converts `self` into a vector without clones or allocation. @@ -825,7 +837,7 @@ impl ToOwned for [T] { #[cfg(test)] fn to_owned(&self) -> Vec { - hack::to_vec(self, Global) + ::ErrorHandling::map_result(hack::to_vec(self, Global)) } fn clone_into(&self, target: &mut Vec) { diff --git a/library/alloc/src/vec/in_place_collect.rs b/library/alloc/src/vec/in_place_collect.rs index 5ecd0479971ea..86b8a32f94416 100644 --- a/library/alloc/src/vec/in_place_collect.rs +++ b/library/alloc/src/vec/in_place_collect.rs @@ -43,7 +43,7 @@ //! Additionally this specialization doesn't make sense for ZSTs as there is no reallocation to //! avoid and it would make pointer arithmetic more difficult. //! -//! [`Allocator`]: core::alloc::Allocator +//! [`Allocator`]: crate::alloc::Allocator //! //! # Drop- and panic-safety //! diff --git a/library/alloc/src/vec/into_iter.rs b/library/alloc/src/vec/into_iter.rs index b2db2fdfd18f1..5a0cfaaf1845b 100644 --- a/library/alloc/src/vec/into_iter.rs +++ b/library/alloc/src/vec/into_iter.rs @@ -1,5 +1,9 @@ #[cfg(not(no_global_oom_handling))] use super::AsVecIntoIter; +#[cfg(test)] +use crate::alloc::ErrorHandling; +#[cfg(not(no_global_oom_handling))] +use crate::alloc::Fatal; use crate::alloc::{Allocator, Global}; #[cfg(not(no_global_oom_handling))] use crate::collections::VecDeque; @@ -383,14 +387,21 @@ where #[cfg(not(no_global_oom_handling))] #[stable(feature = "vec_into_iter_clone", since = "1.8.0")] -impl Clone for IntoIter { +impl Clone for IntoIter +where + A: Allocator, +{ #[cfg(not(test))] fn clone(&self) -> Self { self.as_slice().to_vec_in(self.alloc.deref().clone()).into_iter() } #[cfg(test)] fn clone(&self) -> Self { - crate::slice::to_vec(self.as_slice(), self.alloc.deref().clone()).into_iter() + A::ErrorHandling::map_result(crate::slice::to_vec( + self.as_slice(), + self.alloc.deref().clone(), + )) + .into_iter() } } diff --git a/library/alloc/src/vec/mod.rs b/library/alloc/src/vec/mod.rs index d89cdff8e366c..33b45a060394a 100644 --- a/library/alloc/src/vec/mod.rs +++ b/library/alloc/src/vec/mod.rs @@ -53,7 +53,6 @@ #![stable(feature = "rust1", since = "1.0.0")] -#[cfg(not(no_global_oom_handling))] use core::cmp; use core::cmp::Ordering; use core::fmt; @@ -65,10 +64,12 @@ use core::ops::{self, Index, IndexMut, Range, RangeBounds}; use core::ptr::{self, NonNull}; use core::slice::{self, SliceIndex}; -use crate::alloc::{Allocator, Global}; +#[cfg(not(no_global_oom_handling))] +use crate::alloc::Fatal; +use crate::alloc::{AllocResult, Allocator, ErrorHandling, Global}; use crate::borrow::{Cow, ToOwned}; use crate::boxed::Box; -use crate::collections::TryReserveError; +use crate::collections::{TryReserveError, TryReserveErrorKind}; use crate::raw_vec::RawVec; #[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")] @@ -98,7 +99,6 @@ pub use self::into_iter::IntoIter; mod into_iter; -#[cfg(not(no_global_oom_handling))] use self::is_zero::IsZero; mod is_zero; @@ -108,16 +108,12 @@ mod in_place_collect; mod partial_eq; -#[cfg(not(no_global_oom_handling))] use self::spec_from_elem::SpecFromElem; -#[cfg(not(no_global_oom_handling))] mod spec_from_elem; -#[cfg(not(no_global_oom_handling))] use self::set_len_on_drop::SetLenOnDrop; -#[cfg(not(no_global_oom_handling))] mod set_len_on_drop; #[cfg(not(no_global_oom_handling))] @@ -610,6 +606,17 @@ impl Vec { Vec { buf: RawVec::new_in(alloc), len: 0 } } + pub(crate) fn try_with_capacity_in(capacity: usize, alloc: A) -> Result { + Ok(Vec { buf: RawVec::with_capacity_in(capacity, alloc)?, len: 0 }) + } + + pub(crate) fn try_with_capacity_zeroed_in( + capacity: usize, + alloc: A, + ) -> Result { + Ok(Vec { buf: RawVec::with_capacity_zeroed_in(capacity, alloc)?, len: 0 }) + } + /// Constructs a new, empty `Vec` with at least the specified capacity /// with the provided allocator. /// @@ -665,11 +672,10 @@ impl Vec { /// let vec_units = Vec::<(), System>::with_capacity_in(10, System); /// assert_eq!(vec_units.capacity(), usize::MAX); /// ``` - #[cfg(not(no_global_oom_handling))] #[inline] #[unstable(feature = "allocator_api", issue = "32838")] - pub fn with_capacity_in(capacity: usize, alloc: A) -> Self { - Vec { buf: RawVec::with_capacity_in(capacity, alloc), len: 0 } + pub fn with_capacity_in(capacity: usize, alloc: A) -> AllocResult { + A::ErrorHandling::map_result(Self::try_with_capacity_in(capacity, alloc)) } /// Creates a `Vec` directly from a pointer, a capacity, a length, @@ -902,10 +908,9 @@ impl Vec { /// vec.reserve(10); /// assert!(vec.capacity() >= 11); /// ``` - #[cfg(not(no_global_oom_handling))] #[stable(feature = "rust1", since = "1.0.0")] - pub fn reserve(&mut self, additional: usize) { - self.buf.reserve(self.len, additional); + pub fn reserve(&mut self, additional: usize) -> AllocResult { + A::ErrorHandling::map_result(self.try_reserve(additional)) } /// Reserves the minimum capacity for at least `additional` more elements to @@ -932,10 +937,9 @@ impl Vec { /// vec.reserve_exact(10); /// assert!(vec.capacity() >= 11); /// ``` - #[cfg(not(no_global_oom_handling))] #[stable(feature = "rust1", since = "1.0.0")] - pub fn reserve_exact(&mut self, additional: usize) { - self.buf.reserve_exact(self.len, additional); + pub fn reserve_exact(&mut self, additional: usize) -> AllocResult { + A::ErrorHandling::map_result(self.try_reserve_exact(additional)) } /// Tries to reserve capacity for at least `additional` more elements to be inserted @@ -972,7 +976,7 @@ impl Vec { /// ``` #[stable(feature = "try_reserve", since = "1.57.0")] pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> { - self.buf.try_reserve(self.len, additional) + self.buf.reserve(self.len, additional) } /// Tries to reserve the minimum capacity for at least `additional` @@ -1015,7 +1019,18 @@ impl Vec { /// ``` #[stable(feature = "try_reserve", since = "1.57.0")] pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> { - self.buf.try_reserve_exact(self.len, additional) + self.buf.reserve_exact(self.len, additional) + } + + fn try_shrink_to_fit(&mut self) -> Result<(), TryReserveError> { + // The capacity is never less than the length, and there's nothing to do when + // they are equal, so we can avoid the panic case in `RawVec::shrink_to_fit` + // by only calling it with a greater capacity. + if self.capacity() > self.len { + self.buf.shrink_to(self.len)?; + } + + Ok(()) } /// Shrinks the capacity of the vector as much as possible. @@ -1032,15 +1047,9 @@ impl Vec { /// vec.shrink_to_fit(); /// assert!(vec.capacity() >= 3); /// ``` - #[cfg(not(no_global_oom_handling))] #[stable(feature = "rust1", since = "1.0.0")] - pub fn shrink_to_fit(&mut self) { - // The capacity is never less than the length, and there's nothing to do when - // they are equal, so we can avoid the panic case in `RawVec::shrink_to_fit` - // by only calling it with a greater capacity. - if self.capacity() > self.len { - self.buf.shrink_to_fit(self.len); - } + pub fn shrink_to_fit(&mut self) -> AllocResult { + A::ErrorHandling::map_result(self.try_shrink_to_fit()) } /// Shrinks the capacity of the vector with a lower bound. @@ -1061,12 +1070,13 @@ impl Vec { /// vec.shrink_to(0); /// assert!(vec.capacity() >= 3); /// ``` - #[cfg(not(no_global_oom_handling))] #[stable(feature = "shrink_to", since = "1.56.0")] - pub fn shrink_to(&mut self, min_capacity: usize) { - if self.capacity() > min_capacity { - self.buf.shrink_to_fit(cmp::max(self.len, min_capacity)); - } + pub fn shrink_to(&mut self, min_capacity: usize) -> AllocResult { + A::ErrorHandling::map_result(if self.capacity() > min_capacity { + self.buf.shrink_to(cmp::max(self.len, min_capacity)) + } else { + Ok(()) + }) } /// Converts the vector into [`Box<[T]>`][owned slice]. @@ -1094,16 +1104,18 @@ impl Vec { /// let slice = vec.into_boxed_slice(); /// assert_eq!(slice.into_vec().capacity(), 3); /// ``` - #[cfg(not(no_global_oom_handling))] #[stable(feature = "rust1", since = "1.0.0")] - pub fn into_boxed_slice(mut self) -> Box<[T], A> { - unsafe { - self.shrink_to_fit(); - let me = ManuallyDrop::new(self); - let buf = ptr::read(&me.buf); - let len = me.len(); - buf.into_box(len).assume_init() - } + pub fn into_boxed_slice(mut self) -> AllocResult, TryReserveError> { + A::ErrorHandling::map_result((|| { + // Substitute for try block + self.try_shrink_to_fit()?; + unsafe { + let me = ManuallyDrop::new(self); + let buf = ptr::read(&me.buf); + let len = me.len(); + Ok(buf.into_box(len).assume_init()) + } + })()) } /// Shortens the vector, keeping the first `len` elements and dropping @@ -1430,42 +1442,46 @@ impl Vec { /// vec.insert(4, 5); /// assert_eq!(vec, [1, 4, 2, 3, 5]); /// ``` - #[cfg(not(no_global_oom_handling))] #[stable(feature = "rust1", since = "1.0.0")] - pub fn insert(&mut self, index: usize, element: T) { - #[cold] - #[inline(never)] - fn assert_failed(index: usize, len: usize) -> ! { - panic!("insertion index (is {index}) should be <= len (is {len})"); - } + pub fn insert(&mut self, index: usize, element: T) -> AllocResult { + A::ErrorHandling::map_result((|| { + // Substitute for try block + #[cold] + #[inline(never)] + fn assert_failed(index: usize, len: usize) -> ! { + panic!("insertion index (is {index}) should be <= len (is {len})"); + } - let len = self.len(); + let len = self.len(); - // space for the new element - if len == self.buf.capacity() { - self.reserve(1); - } + // space for the new element + if len == self.buf.capacity() { + self.buf.reserve_for_push(len)? + } - unsafe { - // infallible - // The spot to put the new value - { - let p = self.as_mut_ptr().add(index); - if index < len { - // Shift everything over to make space. (Duplicating the - // `index`th element into two consecutive places.) - ptr::copy(p, p.add(1), len - index); - } else if index == len { - // No elements need shifting. - } else { - assert_failed(index, len); + unsafe { + // infallible + // The spot to put the new value + { + let p = self.as_mut_ptr().add(index); + if index < len { + // Shift everything over to make space. (Duplicating the + // `index`th element into two consecutive places.) + ptr::copy(p, p.add(1), len - index); + } else if index == len { + // No elements need shifting. + } else { + assert_failed(index, len); + } + // Write it in, overwriting the first copy of the `index`th + // element. + ptr::write(p, element); } - // Write it in, overwriting the first copy of the `index`th - // element. - ptr::write(p, element); + self.set_len(len + 1); } - self.set_len(len + 1); - } + + Ok(()) + })()) } /// Removes and returns the element at position `index` within the vector, @@ -1720,7 +1736,7 @@ impl Vec { } /* INVARIANT: vec.len() > read >= write > write-1 >= 0 */ - struct FillGapOnDrop<'a, T, A: core::alloc::Allocator> { + struct FillGapOnDrop<'a, T, A: crate::alloc::Allocator> { /* Offset of the element we want to check if it is duplicate */ read: usize, @@ -1732,7 +1748,7 @@ impl Vec { vec: &'a mut Vec, } - impl<'a, T, A: core::alloc::Allocator> Drop for FillGapOnDrop<'a, T, A> { + impl<'a, T, A: crate::alloc::Allocator> Drop for FillGapOnDrop<'a, T, A> { fn drop(&mut self) { /* This code gets executed when `same_bucket` panics */ @@ -1818,20 +1834,24 @@ impl Vec { /// vec.push(3); /// assert_eq!(vec, [1, 2, 3]); /// ``` - #[cfg(not(no_global_oom_handling))] #[inline] #[stable(feature = "rust1", since = "1.0.0")] - pub fn push(&mut self, value: T) { - // This will panic or abort if we would allocate > isize::MAX bytes - // or if the length increment would overflow for zero-sized types. - if self.len == self.buf.capacity() { - self.buf.reserve_for_push(self.len); - } - unsafe { - let end = self.as_mut_ptr().add(self.len); - ptr::write(end, value); - self.len += 1; - } + pub fn push(&mut self, value: T) -> AllocResult { + A::ErrorHandling::map_result((|| { + // Substitute for try block + // This will panic or abort if we would allocate > isize::MAX bytes + // or if the length increment would overflow for zero-sized types. + if self.len == self.buf.capacity() { + self.buf.reserve_for_push(self.len)? + } + unsafe { + let end = self.as_mut_ptr().add(self.len); + ptr::write(end, value); + self.len += 1; + } + + Ok(()) + })()) } /// Appends an element if there is sufficient spare capacity, otherwise an error is returned @@ -1922,25 +1942,30 @@ impl Vec { /// assert_eq!(vec, [1, 2, 3, 4, 5, 6]); /// assert_eq!(vec2, []); /// ``` - #[cfg(not(no_global_oom_handling))] #[inline] #[stable(feature = "append", since = "1.4.0")] - pub fn append(&mut self, other: &mut Self) { - unsafe { - self.append_elements(other.as_slice() as _); - other.set_len(0); - } + pub fn append(&mut self, other: &mut Self) -> AllocResult { + A::ErrorHandling::map_result((|| { + // Substitute for try block + unsafe { + self.append_elements(other.as_slice() as _)?; + other.set_len(0); + } + + Ok(()) + })()) } /// Appends elements to `self` from other buffer. - #[cfg(not(no_global_oom_handling))] #[inline] - unsafe fn append_elements(&mut self, other: *const [T]) { + unsafe fn append_elements(&mut self, other: *const [T]) -> Result<(), TryReserveError> { let count = unsafe { (*other).len() }; - self.reserve(count); + self.try_reserve(count)?; let len = self.len(); unsafe { ptr::copy_nonoverlapping(other as *const T, self.as_mut_ptr().add(len), count) }; self.len += count; + + Ok(()) } /// Removes the specified range from the vector in bulk, returning all @@ -2084,43 +2109,45 @@ impl Vec { /// assert_eq!(vec, [1]); /// assert_eq!(vec2, [2, 3]); /// ``` - #[cfg(not(no_global_oom_handling))] #[inline] #[must_use = "use `.truncate()` if you don't need the other half"] #[stable(feature = "split_off", since = "1.4.0")] - pub fn split_off(&mut self, at: usize) -> Self + pub fn split_off(&mut self, at: usize) -> AllocResult where A: Clone, { - #[cold] - #[inline(never)] - fn assert_failed(at: usize, len: usize) -> ! { - panic!("`at` split index (is {at}) should be <= len (is {len})"); - } + A::ErrorHandling::map_result((|| { + // Substitute for try block + #[cold] + #[inline(never)] + fn assert_failed(at: usize, len: usize) -> ! { + panic!("`at` split index (is {at}) should be <= len (is {len})"); + } - if at > self.len() { - assert_failed(at, self.len()); - } + if at > self.len() { + assert_failed(at, self.len()); + } - if at == 0 { - // the new vector can take over the original buffer and avoid the copy - return mem::replace( - self, - Vec::with_capacity_in(self.capacity(), self.allocator().clone()), - ); - } + if at == 0 { + // the new vector can take over the original buffer and avoid the copy + return Ok(mem::replace( + self, + Vec::try_with_capacity_in(self.capacity(), self.allocator().clone())?, + )); + } - let other_len = self.len - at; - let mut other = Vec::with_capacity_in(other_len, self.allocator().clone()); + let other_len = self.len - at; + let mut other = Vec::try_with_capacity_in(other_len, self.allocator().clone())?; - // Unsafely `set_len` and copy items to `other`. - unsafe { - self.set_len(at); - other.set_len(other_len); + // Unsafely `set_len` and copy items to `other`. + unsafe { + self.set_len(at); + other.set_len(other_len); - ptr::copy_nonoverlapping(self.as_ptr().add(at), other.as_mut_ptr(), other.len()); - } - other + ptr::copy_nonoverlapping(self.as_ptr().add(at), other.as_mut_ptr(), other.len()); + } + Ok(other) + })()) } /// Resizes the `Vec` in-place so that `len` is equal to `new_len`. @@ -2149,18 +2176,22 @@ impl Vec { /// vec.resize_with(4, || { p *= 2; p }); /// assert_eq!(vec, [2, 4, 8, 16]); /// ``` - #[cfg(not(no_global_oom_handling))] #[stable(feature = "vec_resize_with", since = "1.33.0")] - pub fn resize_with(&mut self, new_len: usize, f: F) + pub fn resize_with(&mut self, new_len: usize, f: F) -> AllocResult where F: FnMut() -> T, { - let len = self.len(); - if new_len > len { - self.extend_trusted(iter::repeat_with(f).take(new_len - len)); - } else { - self.truncate(new_len); - } + A::ErrorHandling::map_result((|| { + // Substitute for try block + let len = self.len(); + if new_len > len { + self.extend_trusted(iter::repeat_with(f).take(new_len - len))?; + } else { + self.truncate(new_len); + } + + Ok(()) + })()) } /// Consumes and leaks the `Vec`, returning a mutable reference to the contents, @@ -2349,16 +2380,20 @@ impl Vec { /// vec.resize(2, 0); /// assert_eq!(vec, [1, 2]); /// ``` - #[cfg(not(no_global_oom_handling))] #[stable(feature = "vec_resize", since = "1.5.0")] - pub fn resize(&mut self, new_len: usize, value: T) { - let len = self.len(); + pub fn resize(&mut self, new_len: usize, value: T) -> AllocResult { + A::ErrorHandling::map_result((|| { + // Substitute for try block + let len = self.len(); - if new_len > len { - self.extend_with(new_len - len, value) - } else { - self.truncate(new_len); - } + if new_len > len { + self.extend_with(new_len - len, value)?; + } else { + self.truncate(new_len); + } + + Ok(()) + })()) } /// Clones and appends all elements in a slice to the `Vec`. @@ -2382,8 +2417,8 @@ impl Vec { /// [`extend`]: Vec::extend #[cfg(not(no_global_oom_handling))] #[stable(feature = "vec_extend_from_slice", since = "1.6.0")] - pub fn extend_from_slice(&mut self, other: &[T]) { - self.spec_extend(other.iter()) + pub fn extend_from_slice(&mut self, other: &[T]) -> AllocResult { + A::ErrorHandling::map_result(self.spec_extend(other.iter())) } /// Copies elements from `src` range to the end of the vector. @@ -2409,18 +2444,23 @@ impl Vec { /// ``` #[cfg(not(no_global_oom_handling))] #[stable(feature = "vec_extend_from_within", since = "1.53.0")] - pub fn extend_from_within(&mut self, src: R) + pub fn extend_from_within(&mut self, src: R) -> AllocResult where R: RangeBounds, { - let range = slice::range(src, ..self.len()); - self.reserve(range.len()); + A::ErrorHandling::map_result((|| { + // Substitute for try block + let range = slice::range(src, ..self.len()); + self.try_reserve(range.len())?; - // SAFETY: - // - `slice::range` guarantees that the given range is valid for indexing self - unsafe { - self.spec_extend_from_within(range); - } + // SAFETY: + // - `slice::range` guarantees that the given range is valid for indexing self + unsafe { + self.spec_extend_from_within(range); + } + + Ok(()) + })()) } } @@ -2470,10 +2510,9 @@ impl Vec<[T; N], A> { } impl Vec { - #[cfg(not(no_global_oom_handling))] /// Extend the vector by `n` clones of value. - fn extend_with(&mut self, n: usize, value: T) { - self.reserve(n); + fn extend_with(&mut self, n: usize, value: T) -> Result<(), TryReserveError> { + self.try_reserve(n)?; unsafe { let mut ptr = self.as_mut_ptr().add(self.len()); @@ -2498,6 +2537,8 @@ impl Vec { // len set by scope guard } + + Ok(()) } } @@ -2528,17 +2569,21 @@ impl Vec { //////////////////////////////////////////////////////////////////////////////// #[doc(hidden)] -#[cfg(not(no_global_oom_handling))] #[stable(feature = "rust1", since = "1.0.0")] -pub fn from_elem(elem: T, n: usize) -> Vec { - ::from_elem(elem, n, Global) +pub fn from_elem(elem: T, n: usize) -> AllocResult, TryReserveError> { + ::ErrorHandling::map_result(::from_elem( + elem, n, Global, + )) } #[doc(hidden)] -#[cfg(not(no_global_oom_handling))] #[unstable(feature = "allocator_api", issue = "32838")] -pub fn from_elem_in(elem: T, n: usize, alloc: A) -> Vec { - ::from_elem(elem, n, alloc) +pub fn from_elem_in( + elem: T, + n: usize, + alloc: A, +) -> AllocResult, TryReserveError> { + A::ErrorHandling::map_result(::from_elem(elem, n, alloc)) } trait ExtendFromWithinSpec { @@ -2620,7 +2665,10 @@ impl ops::DerefMut for Vec { #[cfg(not(no_global_oom_handling))] #[stable(feature = "rust1", since = "1.0.0")] -impl Clone for Vec { +impl Clone for Vec +where + A: Allocator, +{ #[cfg(not(test))] fn clone(&self) -> Self { let alloc = self.allocator().clone(); @@ -2634,7 +2682,7 @@ impl Clone for Vec { #[cfg(test)] fn clone(&self) -> Self { let alloc = self.allocator().clone(); - crate::slice::to_vec(&**self, alloc) + ::ErrorHandling::map_result(crate::slice::to_vec(&**self, alloc)) } fn clone_from(&mut self, other: &Self) { @@ -2763,10 +2811,13 @@ impl<'a, T, A: Allocator> IntoIterator for &'a mut Vec { #[cfg(not(no_global_oom_handling))] #[stable(feature = "rust1", since = "1.0.0")] -impl Extend for Vec { +impl> Extend for Vec { #[inline] fn extend>(&mut self, iter: I) { - >::spec_extend(self, iter.into_iter()) + A::ErrorHandling::map_result(>::spec_extend( + self, + iter.into_iter(), + )) } #[inline] @@ -2784,7 +2835,10 @@ impl Vec { // leaf method to which various SpecFrom/SpecExtend implementations delegate when // they have no further optimizations to apply #[cfg(not(no_global_oom_handling))] - fn extend_desugared>(&mut self, mut iterator: I) { + fn extend_desugared>( + &mut self, + mut iterator: I, + ) -> Result<(), TryReserveError> { // This is the case for a general iterator. // // This function should be the moral equivalent of: @@ -2796,7 +2850,7 @@ impl Vec { let len = self.len(); if len == self.capacity() { let (lower, _) = iterator.size_hint(); - self.reserve(lower.saturating_add(1)); + self.try_reserve(lower.saturating_add(1))?; } unsafe { ptr::write(self.as_mut_ptr().add(len), element); @@ -2806,12 +2860,16 @@ impl Vec { self.set_len(len + 1); } } + + Ok(()) } // specific extend for `TrustedLen` iterators, called both by the specializations // and internal places where resolving specialization makes compilation slower - #[cfg(not(no_global_oom_handling))] - fn extend_trusted(&mut self, iterator: impl iter::TrustedLen) { + fn extend_trusted( + &mut self, + iterator: impl iter::TrustedLen, + ) -> Result<(), TryReserveError> { let (low, high) = iterator.size_hint(); if let Some(additional) = high { debug_assert_eq!( @@ -2820,7 +2878,7 @@ impl Vec { "TrustedLen iterator's size hint is not exact: {:?}", (low, high) ); - self.reserve(additional); + self.try_reserve(additional)?; unsafe { let ptr = self.as_mut_ptr(); let mut local_len = SetLenOnDrop::new(&mut self.len); @@ -2832,13 +2890,15 @@ impl Vec { local_len.increment_len(1); }); } + + Ok(()) } else { // Per TrustedLen contract a `None` upper bound means that the iterator length // truly exceeds usize::MAX, which would eventually lead to a capacity overflow anyway. // Since the other branch already panics eagerly (via `reserve()`) we do the same here. // This avoids additional codegen for a fallback code path which would eventually // panic anyway. - panic!("capacity overflow"); + Err(TryReserveErrorKind::CapacityOverflow.into()) } } @@ -2882,6 +2942,7 @@ impl Vec { where R: RangeBounds, I: IntoIterator, + A: Allocator, { Splice { drain: self.drain(range), replace_with: replace_with.into_iter() } } @@ -2952,11 +3013,11 @@ impl Vec { /// append the entire slice at once. /// /// [`copy_from_slice`]: slice::copy_from_slice -#[cfg(not(no_global_oom_handling))] #[stable(feature = "extend_ref", since = "1.2.0")] -impl<'a, T: Copy + 'a, A: Allocator + 'a> Extend<&'a T> for Vec { +#[cfg(not(no_global_oom_handling))] +impl<'a, T: Copy + 'a, A: Allocator + 'a> Extend<&'a T> for Vec { fn extend>(&mut self, iter: I) { - self.spec_extend(iter.into_iter()) + A::ErrorHandling::map_result(self.spec_extend(iter.into_iter())) } #[inline] @@ -3065,7 +3126,7 @@ impl From<&[T]> for Vec { } #[cfg(test)] fn from(s: &[T]) -> Vec { - crate::slice::to_vec(s, Global) + ::ErrorHandling::map_result(crate::slice::to_vec(s, Global)) } } @@ -3085,7 +3146,7 @@ impl From<&mut [T]> for Vec { } #[cfg(test)] fn from(s: &mut [T]) -> Vec { - crate::slice::to_vec(s, Global) + ::ErrorHandling::map_result(crate::slice::to_vec(s, Global)) } } @@ -3153,10 +3214,12 @@ impl From> for Vec { } // note: test pulls in std, which causes errors here -#[cfg(not(no_global_oom_handling))] -#[cfg(not(test))] +#[cfg(all(not(test), not(no_global_oom_handling)))] #[stable(feature = "box_from_vec", since = "1.20.0")] -impl From> for Box<[T], A> { +impl From> for Box<[T], A> +where + A: Allocator, +{ /// Convert a vector into a boxed slice. /// /// If `v` has excess capacity, its items will be moved into a diff --git a/library/alloc/src/vec/spec_extend.rs b/library/alloc/src/vec/spec_extend.rs index 56065ce565bfc..6b976ba6df12b 100644 --- a/library/alloc/src/vec/spec_extend.rs +++ b/library/alloc/src/vec/spec_extend.rs @@ -1,4 +1,5 @@ use crate::alloc::Allocator; +use crate::collections::TryReserveError; use core::iter::TrustedLen; use core::slice::{self}; @@ -6,14 +7,14 @@ use super::{IntoIter, Vec}; // Specialization trait used for Vec::extend pub(super) trait SpecExtend { - fn spec_extend(&mut self, iter: I); + fn spec_extend(&mut self, iter: I) -> Result<(), TryReserveError>; } impl SpecExtend for Vec where I: Iterator, { - default fn spec_extend(&mut self, iter: I) { + default fn spec_extend(&mut self, iter: I) -> Result<(), TryReserveError> { self.extend_desugared(iter) } } @@ -22,17 +23,19 @@ impl SpecExtend for Vec where I: TrustedLen, { - default fn spec_extend(&mut self, iterator: I) { + default fn spec_extend(&mut self, iterator: I) -> Result<(), TryReserveError> { self.extend_trusted(iterator) } } impl SpecExtend> for Vec { - fn spec_extend(&mut self, mut iterator: IntoIter) { + fn spec_extend(&mut self, mut iterator: IntoIter) -> Result<(), TryReserveError> { unsafe { - self.append_elements(iterator.as_slice() as _); + self.append_elements(iterator.as_slice() as _)?; } iterator.forget_remaining_elements(); + + Ok(()) } } @@ -41,7 +44,7 @@ where I: Iterator, T: Clone, { - default fn spec_extend(&mut self, iterator: I) { + default fn spec_extend(&mut self, iterator: I) -> Result<(), TryReserveError> { self.spec_extend(iterator.cloned()) } } @@ -50,8 +53,8 @@ impl<'a, T: 'a, A: Allocator + 'a> SpecExtend<&'a T, slice::Iter<'a, T>> for Vec where T: Copy, { - fn spec_extend(&mut self, iterator: slice::Iter<'a, T>) { + fn spec_extend(&mut self, iterator: slice::Iter<'a, T>) -> Result<(), TryReserveError> { let slice = iterator.as_slice(); - unsafe { self.append_elements(slice) }; + unsafe { self.append_elements(slice) } } } diff --git a/library/alloc/src/vec/spec_from_elem.rs b/library/alloc/src/vec/spec_from_elem.rs index da43d17bf3624..096d4882942f5 100644 --- a/library/alloc/src/vec/spec_from_elem.rs +++ b/library/alloc/src/vec/spec_from_elem.rs @@ -1,61 +1,87 @@ use core::ptr; use crate::alloc::Allocator; -use crate::raw_vec::RawVec; +use crate::collections::TryReserveError; use super::{IsZero, Vec}; // Specialization trait used for Vec::from_elem pub(super) trait SpecFromElem: Sized { - fn from_elem(elem: Self, n: usize, alloc: A) -> Vec; + fn from_elem( + elem: Self, + n: usize, + alloc: A, + ) -> Result, TryReserveError>; } impl SpecFromElem for T { - default fn from_elem(elem: Self, n: usize, alloc: A) -> Vec { - let mut v = Vec::with_capacity_in(n, alloc); - v.extend_with(n, elem); - v + default fn from_elem( + elem: Self, + n: usize, + alloc: A, + ) -> Result, TryReserveError> { + let mut v = Vec::try_with_capacity_in(n, alloc)?; + v.extend_with(n, elem)?; + Ok(v) } } impl SpecFromElem for T { #[inline] - default fn from_elem(elem: T, n: usize, alloc: A) -> Vec { + default fn from_elem( + elem: T, + n: usize, + alloc: A, + ) -> Result, TryReserveError> { if elem.is_zero() { - return Vec { buf: RawVec::with_capacity_zeroed_in(n, alloc), len: n }; + let mut v = Vec::try_with_capacity_zeroed_in(n, alloc)?; + unsafe { v.set_len(n) }; + return Ok(v); } - let mut v = Vec::with_capacity_in(n, alloc); - v.extend_with(n, elem); - v + let mut v = Vec::try_with_capacity_in(n, alloc)?; + v.extend_with(n, elem)?; + Ok(v) } } impl SpecFromElem for i8 { #[inline] - fn from_elem(elem: i8, n: usize, alloc: A) -> Vec { + fn from_elem( + elem: i8, + n: usize, + alloc: A, + ) -> Result, TryReserveError> { if elem == 0 { - return Vec { buf: RawVec::with_capacity_zeroed_in(n, alloc), len: n }; + let mut v = Vec::try_with_capacity_zeroed_in(n, alloc)?; + unsafe { v.set_len(n) }; + return Ok(v); } unsafe { - let mut v = Vec::with_capacity_in(n, alloc); + let mut v = Vec::try_with_capacity_in(n, alloc)?; ptr::write_bytes(v.as_mut_ptr(), elem as u8, n); v.set_len(n); - v + Ok(v) } } } impl SpecFromElem for u8 { #[inline] - fn from_elem(elem: u8, n: usize, alloc: A) -> Vec { + fn from_elem( + elem: u8, + n: usize, + alloc: A, + ) -> Result, TryReserveError> { if elem == 0 { - return Vec { buf: RawVec::with_capacity_zeroed_in(n, alloc), len: n }; + let mut v = Vec::try_with_capacity_zeroed_in(n, alloc)?; + unsafe { v.set_len(n) }; + return Ok(v); } unsafe { - let mut v = Vec::with_capacity_in(n, alloc); + let mut v = Vec::try_with_capacity_in(n, alloc)?; ptr::write_bytes(v.as_mut_ptr(), elem, n); v.set_len(n); - v + Ok(v) } } } diff --git a/library/alloc/src/vec/spec_from_iter.rs b/library/alloc/src/vec/spec_from_iter.rs index efa6868473e49..458190220e88f 100644 --- a/library/alloc/src/vec/spec_from_iter.rs +++ b/library/alloc/src/vec/spec_from_iter.rs @@ -1,5 +1,7 @@ use core::mem::ManuallyDrop; -use core::ptr::{self}; +use core::ptr; + +use crate::alloc::{Allocator, ErrorHandling, Global}; use super::{IntoIter, SpecExtend, SpecFromIterNested, Vec}; @@ -58,7 +60,7 @@ impl SpecFromIter> for Vec { let mut vec = Vec::new(); // must delegate to spec_extend() since extend() itself delegates // to spec_from for empty Vecs - vec.spec_extend(iterator); + let () = ::ErrorHandling::map_result(vec.spec_extend(iterator)); vec } } diff --git a/library/alloc/src/vec/spec_from_iter_nested.rs b/library/alloc/src/vec/spec_from_iter_nested.rs index f915ebb86e5a5..3ee29a14379cd 100644 --- a/library/alloc/src/vec/spec_from_iter_nested.rs +++ b/library/alloc/src/vec/spec_from_iter_nested.rs @@ -2,6 +2,7 @@ use core::cmp; use core::iter::TrustedLen; use core::ptr; +use crate::alloc::{Allocator, ErrorHandling, Global}; use crate::raw_vec::RawVec; use super::{SpecExtend, Vec}; @@ -40,7 +41,9 @@ where }; // must delegate to spec_extend() since extend() itself delegates // to spec_from for empty Vecs - as SpecExtend>::spec_extend(&mut vector, iterator); + let () = ::ErrorHandling::map_result( + as SpecExtend>::spec_extend(&mut vector, iterator), + ); vector } } @@ -59,7 +62,7 @@ where _ => panic!("capacity overflow"), }; // reuse extend specialization for TrustedLen - vector.spec_extend(iterator); + let () = ::ErrorHandling::map_result(vector.spec_extend(iterator)); vector } } diff --git a/library/alloc/src/vec/splice.rs b/library/alloc/src/vec/splice.rs index 852fdcc3f5ce7..308bd992d24d8 100644 --- a/library/alloc/src/vec/splice.rs +++ b/library/alloc/src/vec/splice.rs @@ -1,6 +1,6 @@ -use crate::alloc::{Allocator, Global}; -use core::ptr::{self}; -use core::slice::{self}; +use crate::alloc::{Allocator, ErrorHandling, Fatal, Global}; +use core::ptr; +use core::slice; use super::{Drain, Vec}; @@ -21,14 +21,14 @@ use super::{Drain, Vec}; pub struct Splice< 'a, I: Iterator + 'a, - #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator + 'a = Global, + #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator + 'a = Global, > { pub(super) drain: Drain<'a, I::Item, A>, pub(super) replace_with: I, } #[stable(feature = "vec_splice", since = "1.21.0")] -impl Iterator for Splice<'_, I, A> { +impl> Iterator for Splice<'_, I, A> { type Item = I::Item; fn next(&mut self) -> Option { @@ -41,17 +41,17 @@ impl Iterator for Splice<'_, I, A> { } #[stable(feature = "vec_splice", since = "1.21.0")] -impl DoubleEndedIterator for Splice<'_, I, A> { +impl> DoubleEndedIterator for Splice<'_, I, A> { fn next_back(&mut self) -> Option { self.drain.next_back() } } #[stable(feature = "vec_splice", since = "1.21.0")] -impl ExactSizeIterator for Splice<'_, I, A> {} +impl> ExactSizeIterator for Splice<'_, I, A> {} #[stable(feature = "vec_splice", since = "1.21.0")] -impl Drop for Splice<'_, I, A> { +impl> Drop for Splice<'_, I, A> { fn drop(&mut self) { self.drain.by_ref().for_each(drop); // At this point draining is done and the only remaining tasks are splicing @@ -98,7 +98,7 @@ impl Drop for Splice<'_, I, A> { } /// Private helper methods for `Splice::drop` -impl Drain<'_, T, A> { +impl> Drain<'_, T, A> { /// The range from `self.vec.len` to `self.tail_start` contains elements /// that have been moved out. /// Fill that range as much as possible with new elements from the `replace_with` iterator. @@ -126,7 +126,7 @@ impl Drain<'_, T, A> { unsafe fn move_tail(&mut self, additional: usize) { let vec = unsafe { self.vec.as_mut() }; let len = self.tail_start + self.tail_len; - vec.buf.reserve(len, additional); + let () = A::ErrorHandling::map_result(vec.buf.reserve(len, additional)); let new_tail_start = self.tail_start + additional; unsafe { diff --git a/library/alloc/tests/boxed.rs b/library/alloc/tests/boxed.rs index 4cacee0414d7d..47356d0020382 100644 --- a/library/alloc/tests/boxed.rs +++ b/library/alloc/tests/boxed.rs @@ -1,4 +1,4 @@ -use core::alloc::{AllocError, Allocator, Layout}; +use alloc::alloc::{AllocError, Allocator, Layout, Fatal}; use core::cell::Cell; use core::mem::MaybeUninit; use core::ptr::NonNull; @@ -178,4 +178,6 @@ unsafe impl Allocator for ConstAllocator { { self } + + type ErrorHandling = Fatal; } diff --git a/library/alloc/tests/vec.rs b/library/alloc/tests/vec.rs index cc4c1f1272865..7ad6c8f107599 100644 --- a/library/alloc/tests/vec.rs +++ b/library/alloc/tests/vec.rs @@ -1,9 +1,8 @@ -use core::alloc::{Allocator, Layout}; use core::assert_eq; use core::iter::IntoIterator; use core::num::NonZeroUsize; use core::ptr::NonNull; -use std::alloc::System; +use std::alloc::{Allocator, Layout, System, Fatal}; use std::assert_matches::assert_matches; use std::borrow::Cow; use std::cell::Cell; @@ -1096,6 +1095,8 @@ fn test_into_iter_drop_allocator() { // Safety: Invariants passed to caller. unsafe { System.deallocate(ptr, layout) } } + + type ErrorHandling = Fatal; } let mut drop_count = 0; diff --git a/library/core/src/alloc/mod.rs b/library/core/src/alloc/mod.rs index 78091c0172955..6a57c2b0251dc 100644 --- a/library/core/src/alloc/mod.rs +++ b/library/core/src/alloc/mod.rs @@ -363,6 +363,68 @@ pub unsafe trait Allocator { { self } + + /// The mode of error handling for types using this allocator. + /// + /// `Fatal` means that any allocation failures should be handled + /// globally, often by panicking or aborting. Functions performing + /// allocation will simply return the value or nothing. + /// + /// `Fallible` means that any allocation failures should be handled + /// at the point of use. Functions performing allocation will return + /// `Result`. + type ErrorHandling: ErrorHandling; +} + +#[unstable(feature = "alloc_internals", issue = "none")] +#[doc(hidden)] +pub mod error_handling_sealed { + pub trait Sealed {} +} +use error_handling_sealed::Sealed; + +/// The mode of error handling for types using an allocator. +/// +/// `Fatal` means that any allocation failures should be handled +/// globally, often by panicking or aborting. Functions performing +/// allocation will simply return the value or nothing. +/// +/// `Fallible` means that any allocation failures should be handled +/// at the point of use. Functions performing allocation will return +/// `Result`. +#[unstable(feature = "allocator_api", issue = "32838")] +pub trait ErrorHandling: Sealed { + /// Result type returned by functions that are conditionally fallible. + /// + /// - `Fatal` allocators set `type Result = T` + /// - `Fallible` allocators set `type Result = Result` + type Result; + + /// Function to map allocation results into `Self::Result`. + /// + /// - For `Fatal` allocators, this should unwrap the result + /// - For `Fallible` allocators, this is just the identity function + #[must_use] + fn map_result(result: Result) -> Self::Result; +} + +/// Error handling mode to use when the user of the type wants to handle +/// allocation failures at the point of use. Functions performing +/// allocation will return `Result`. +#[derive(Debug)] +#[unstable(feature = "allocator_api", issue = "32838")] +pub struct Fallible; + +#[unstable(feature = "allocator_api", issue = "32838")] +impl Sealed for Fallible {} + +#[unstable(feature = "allocator_api", issue = "32838")] +impl ErrorHandling for Fallible { + type Result = Result; + + fn map_result(result: Result) -> Self::Result { + result + } } #[unstable(feature = "allocator_api", issue = "32838")] @@ -418,4 +480,6 @@ where // SAFETY: the safety contract must be upheld by the caller unsafe { (**self).shrink(ptr, old_layout, new_layout) } } + + type ErrorHandling = A::ErrorHandling; } diff --git a/library/std/src/alloc.rs b/library/std/src/alloc.rs index ec774e62debbf..5c9c5ecac7f52 100644 --- a/library/std/src/alloc.rs +++ b/library/std/src/alloc.rs @@ -284,6 +284,8 @@ unsafe impl Allocator for System { }, } } + + type ErrorHandling = Fatal; } static HOOK: AtomicPtr<()> = AtomicPtr::new(ptr::null_mut()); diff --git a/src/tools/miri/tests/pass/box-custom-alloc.rs b/src/tools/miri/tests/pass/box-custom-alloc.rs index 155e3d74ab9c1..65a2c0dfa347d 100644 --- a/src/tools/miri/tests/pass/box-custom-alloc.rs +++ b/src/tools/miri/tests/pass/box-custom-alloc.rs @@ -4,7 +4,7 @@ #![feature(allocator_api, trait_upcasting)] use std::alloc::Layout; -use std::alloc::{AllocError, Allocator}; +use std::alloc::{AllocError, Allocator, Fatal}; use std::cell::Cell; use std::mem::MaybeUninit; use std::ptr::{self, NonNull}; @@ -28,6 +28,8 @@ unsafe impl<'shared, 'a: 'shared> Allocator for &'shared OnceAlloc<'a> { } unsafe fn deallocate(&self, _ptr: NonNull, _layout: Layout) {} + + type ErrorHandling = Fatal; } trait MyTrait { @@ -68,6 +70,8 @@ unsafe impl<'shared, 'a: 'shared> Allocator for OnceAllocRef<'shared, 'a> { unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { self.0.deallocate(ptr, layout) } + + type ErrorHandling = Fatal; } /// `Box` is an `Aggregate`. diff --git a/tests/codegen/vec-shrink-panik.rs b/tests/codegen/vec-shrink-panik.rs index 606d68ff3ab38..5ffd79993d26c 100644 --- a/tests/codegen/vec-shrink-panik.rs +++ b/tests/codegen/vec-shrink-panik.rs @@ -22,14 +22,6 @@ pub fn shrink_to_fit(vec: &mut Vec) { // CHECK-LABEL: @issue71861 #[no_mangle] pub fn issue71861(vec: Vec) -> Box<[u32]> { - // CHECK-NOT: panic - - // Call to panic_cannot_unwind in case of double-panic is expected - // on LLVM 16 and older, but other panics are not. - // old: filter - // old-NEXT: ; call core::panicking::panic_cannot_unwind - // old-NEXT: panic_cannot_unwind - // CHECK-NOT: panic vec.into_boxed_slice() } @@ -37,17 +29,6 @@ pub fn issue71861(vec: Vec) -> Box<[u32]> { // CHECK-LABEL: @issue75636 #[no_mangle] pub fn issue75636<'a>(iter: &[&'a str]) -> Box<[&'a str]> { - // CHECK-NOT: panic - - // Call to panic_cannot_unwind in case of double-panic is expected, - // on LLVM 16 and older, but other panics are not. - // old: filter - // old-NEXT: ; call core::panicking::panic_cannot_unwind - // old-NEXT: panic_cannot_unwind - // CHECK-NOT: panic iter.iter().copied().collect() } - -// old: ; core::panicking::panic_cannot_unwind -// old: declare void @{{.*}}panic_cannot_unwind diff --git a/tests/ui/allocator/object-safe.rs b/tests/ui/allocator/object-safe.rs index fae7ab7fe3319..6bee8fc5d98f9 100644 --- a/tests/ui/allocator/object-safe.rs +++ b/tests/ui/allocator/object-safe.rs @@ -4,9 +4,9 @@ #![feature(allocator_api)] -use std::alloc::{Allocator, System}; +use std::alloc::{Allocator, System, Fatal}; -fn ensure_object_safe(_: &dyn Allocator) {} +fn ensure_object_safe(_: &dyn Allocator) {} fn main() { ensure_object_safe(&System); diff --git a/tests/ui/box/large-allocator-ice.rs b/tests/ui/box/large-allocator-ice.rs index b3a882ff089b0..3e45d877d5a06 100644 --- a/tests/ui/box/large-allocator-ice.rs +++ b/tests/ui/box/large-allocator-ice.rs @@ -2,7 +2,7 @@ #![feature(allocator_api)] #![allow(unused_must_use)] -use std::alloc::Allocator; +use std::alloc::{Allocator, Fatal}; struct BigAllocator([usize; 2]); @@ -16,6 +16,7 @@ unsafe impl Allocator for BigAllocator { unsafe fn deallocate(&self, _: std::ptr::NonNull, _: std::alloc::Layout) { todo!() } + type ErrorHandling = Fatal; } fn main() { diff --git a/tests/ui/box/leak-alloc.rs b/tests/ui/box/leak-alloc.rs index 3f0f39f448b91..c3fe1ff60362b 100644 --- a/tests/ui/box/leak-alloc.rs +++ b/tests/ui/box/leak-alloc.rs @@ -1,6 +1,6 @@ #![feature(allocator_api)] -use std::alloc::{AllocError, Allocator, Layout, System}; +use std::alloc::{AllocError, Allocator, Layout, System, Fatal}; use std::ptr::NonNull; use std::boxed::Box; @@ -15,6 +15,8 @@ unsafe impl Allocator for Alloc { unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { System.deallocate(ptr, layout) } + + type ErrorHandling = Fatal; } fn use_value(_: u32) {} diff --git a/tests/ui/box/leak-alloc.stderr b/tests/ui/box/leak-alloc.stderr index 5140b58934a5c..2f1e0d78bd3a8 100644 --- a/tests/ui/box/leak-alloc.stderr +++ b/tests/ui/box/leak-alloc.stderr @@ -1,5 +1,5 @@ error[E0505]: cannot move out of `alloc` because it is borrowed - --> $DIR/leak-alloc.rs:26:10 + --> $DIR/leak-alloc.rs:28:10 | LL | let alloc = Alloc {}; | ----- binding `alloc` declared here diff --git a/tests/ui/debuginfo/debuginfo-box-with-large-allocator.rs b/tests/ui/debuginfo/debuginfo-box-with-large-allocator.rs index 761539227a79c..3ec87a48301f4 100644 --- a/tests/ui/debuginfo/debuginfo-box-with-large-allocator.rs +++ b/tests/ui/debuginfo/debuginfo-box-with-large-allocator.rs @@ -4,7 +4,7 @@ #![feature(allocator_api)] -use std::alloc::{AllocError, Allocator, Layout}; +use std::alloc::{AllocError, Allocator, Layout, Fatal}; use std::ptr::NonNull; struct ZST; @@ -16,6 +16,7 @@ unsafe impl Allocator for &ZST { unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { todo!() } + type ErrorHandling = Fatal; } fn main() { diff --git a/tests/ui/hygiene/panic-location.run.stderr b/tests/ui/hygiene/panic-location.run.stderr index a7252a4002770..65180c8047143 100644 --- a/tests/ui/hygiene/panic-location.run.stderr +++ b/tests/ui/hygiene/panic-location.run.stderr @@ -1,2 +1,2 @@ -thread 'main' panicked at 'capacity overflow', library/alloc/src/raw_vec.rs:524:5 +thread 'main' panicked at 'capacity overflow', library/alloc/src/alloc.rs:650:5 note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace