allocator_api2/stable/
raw_vec.rs

1use core::alloc::LayoutError;
2use core::mem::{self, ManuallyDrop, MaybeUninit};
3use core::ops::Drop;
4use core::ptr::{self, NonNull};
5use core::slice;
6use core::{cmp, fmt};
7
8use super::{
9    alloc::{Allocator, Global, Layout},
10    assume,
11    boxed::Box,
12};
13
14#[cfg(not(no_global_oom_handling))]
15use super::alloc::handle_alloc_error;
16
17/// The error type for `try_reserve` methods.
18#[derive(Clone, PartialEq, Eq, Debug)]
19pub struct TryReserveError {
20    kind: TryReserveErrorKind,
21}
22
23impl TryReserveError {
24    /// Details about the allocation that caused the error
25    pub fn kind(&self) -> TryReserveErrorKind {
26        self.kind.clone()
27    }
28}
29
30/// Details of the allocation that caused a `TryReserveError`
31#[derive(Clone, PartialEq, Eq, Debug)]
32pub enum TryReserveErrorKind {
33    /// Error due to the computed capacity exceeding the collection's maximum
34    /// (usually `isize::MAX` bytes).
35    CapacityOverflow,
36
37    /// The memory allocator returned an error
38    AllocError {
39        /// The layout of allocation request that failed
40        layout: Layout,
41
42        #[doc(hidden)]
43        non_exhaustive: (),
44    },
45}
46
47use TryReserveErrorKind::*;
48
49impl From<TryReserveErrorKind> for TryReserveError {
50    #[inline(always)]
51    fn from(kind: TryReserveErrorKind) -> Self {
52        Self { kind }
53    }
54}
55
56impl From<LayoutError> for TryReserveErrorKind {
57    /// Always evaluates to [`TryReserveErrorKind::CapacityOverflow`].
58    #[inline(always)]
59    fn from(_: LayoutError) -> Self {
60        TryReserveErrorKind::CapacityOverflow
61    }
62}
63
64impl fmt::Display for TryReserveError {
65    fn fmt(
66        &self,
67        fmt: &mut core::fmt::Formatter<'_>,
68    ) -> core::result::Result<(), core::fmt::Error> {
69        fmt.write_str("memory allocation failed")?;
70        let reason = match self.kind {
71            TryReserveErrorKind::CapacityOverflow => {
72                " because the computed capacity exceeded the collection's maximum"
73            }
74            TryReserveErrorKind::AllocError { .. } => {
75                " because the memory allocator returned an error"
76            }
77        };
78        fmt.write_str(reason)
79    }
80}
81
82#[cfg(feature = "std")]
83impl std::error::Error for TryReserveError {}
84
85#[cfg(not(no_global_oom_handling))]
86enum AllocInit {
87    /// The contents of the new memory are uninitialized.
88    Uninitialized,
89    /// The new memory is guaranteed to be zeroed.
90    Zeroed,
91}
92
93/// A low-level utility for more ergonomically allocating, reallocating, and deallocating
94/// a buffer of memory on the heap without having to worry about all the corner cases
95/// involved. This type is excellent for building your own data structures like Vec and VecDeque.
96/// In particular:
97///
98/// * Produces `NonNull::dangling()` on zero-sized types.
99/// * Produces `NonNull::dangling()` on zero-length allocations.
100/// * Avoids freeing `NonNull::dangling()`.
101/// * Catches all overflows in capacity computations (promotes them to "capacity overflow" panics).
102/// * Guards against 32-bit systems allocating more than isize::MAX bytes.
103/// * Guards against overflowing your length.
104/// * Calls `handle_alloc_error` for fallible allocations.
105/// * Contains a `ptr::NonNull` and thus endows the user with all related benefits.
106/// * Uses the excess returned from the allocator to use the largest available capacity.
107///
108/// This type does not in anyway inspect the memory that it manages. When dropped it *will*
109/// free its memory, but it *won't* try to drop its contents. It is up to the user of `RawVec`
110/// to handle the actual things *stored* inside of a `RawVec`.
111///
112/// Note that the excess of a zero-sized types is always infinite, so `capacity()` always returns
113/// `usize::MAX`. This means that you need to be careful when round-tripping this type with a
114/// `Box<[T]>`, since `capacity()` won't yield the length.
115#[allow(missing_debug_implementations)]
116pub(crate) struct RawVec<T, A: Allocator = Global> {
117    ptr: NonNull<T>,
118    cap: usize,
119    alloc: A,
120}
121
122// Safety: RawVec owns both T and A, so sending is safe if
123// sending is safe for T and A.
124unsafe impl<T, A: Allocator> Send for RawVec<T, A>
125where
126    T: Send,
127    A: Send,
128{
129}
130
131// Safety: RawVec owns both T and A, so sharing is safe if
132// sharing is safe for T and A.
133unsafe impl<T, A: Allocator> Sync for RawVec<T, A>
134where
135    T: Sync,
136    A: Sync,
137{
138}
139
140impl<T> RawVec<T, Global> {
141    /// Creates the biggest possible `RawVec` (on the system heap)
142    /// without allocating. If `T` has positive size, then this makes a
143    /// `RawVec` with capacity `0`. If `T` is zero-sized, then it makes a
144    /// `RawVec` with capacity `usize::MAX`. Useful for implementing
145    /// delayed allocation.
146    #[must_use]
147    pub const fn new() -> Self {
148        Self::new_in(Global)
149    }
150
151    /// Creates a `RawVec` (on the system heap) with exactly the
152    /// capacity and alignment requirements for a `[T; capacity]`. This is
153    /// equivalent to calling `RawVec::new` when `capacity` is `0` or `T` is
154    /// zero-sized. Note that if `T` is zero-sized this means you will
155    /// *not* get a `RawVec` with the requested capacity.
156    ///
157    /// # Panics
158    ///
159    /// Panics if the requested capacity exceeds `isize::MAX` bytes.
160    ///
161    /// # Aborts
162    ///
163    /// Aborts on OOM.
164    #[cfg(not(no_global_oom_handling))]
165    #[must_use]
166    #[inline(always)]
167    pub fn with_capacity(capacity: usize) -> Self {
168        Self::with_capacity_in(capacity, Global)
169    }
170
171    /// Like `with_capacity`, but guarantees the buffer is zeroed.
172    #[cfg(not(no_global_oom_handling))]
173    #[must_use]
174    #[inline(always)]
175    pub fn with_capacity_zeroed(capacity: usize) -> Self {
176        Self::with_capacity_zeroed_in(capacity, Global)
177    }
178}
179
180impl<T, A: Allocator> RawVec<T, A> {
181    // Tiny Vecs are dumb. Skip to:
182    // - 8 if the element size is 1, because any heap allocators is likely
183    //   to round up a request of less than 8 bytes to at least 8 bytes.
184    // - 4 if elements are moderate-sized (<= 1 KiB).
185    // - 1 otherwise, to avoid wasting too much space for very short Vecs.
186    pub(crate) const MIN_NON_ZERO_CAP: usize = if mem::size_of::<T>() == 1 {
187        8
188    } else if mem::size_of::<T>() <= 1024 {
189        4
190    } else {
191        1
192    };
193
194    /// Like `new`, but parameterized over the choice of allocator for
195    /// the returned `RawVec`.
196    #[inline(always)]
197    pub const fn new_in(alloc: A) -> Self {
198        // `cap: 0` means "unallocated". zero-sized types are ignored.
199        Self {
200            ptr: NonNull::dangling(),
201            cap: 0,
202            alloc,
203        }
204    }
205
206    /// Like `with_capacity`, but parameterized over the choice of
207    /// allocator for the returned `RawVec`.
208    #[cfg(not(no_global_oom_handling))]
209    #[inline(always)]
210    pub fn with_capacity_in(capacity: usize, alloc: A) -> Self {
211        Self::allocate_in(capacity, AllocInit::Uninitialized, alloc)
212    }
213
214    /// Like `with_capacity_zeroed`, but parameterized over the choice
215    /// of allocator for the returned `RawVec`.
216    #[cfg(not(no_global_oom_handling))]
217    #[inline(always)]
218    pub fn with_capacity_zeroed_in(capacity: usize, alloc: A) -> Self {
219        Self::allocate_in(capacity, AllocInit::Zeroed, alloc)
220    }
221
222    /// Converts the entire buffer into `Box<[MaybeUninit<T>]>` with the specified `len`.
223    ///
224    /// Note that this will correctly reconstitute any `cap` changes
225    /// that may have been performed. (See description of type for details.)
226    ///
227    /// # Safety
228    ///
229    /// * `len` must be greater than or equal to the most recently requested capacity, and
230    /// * `len` must be less than or equal to `self.capacity()`.
231    ///
232    /// Note, that the requested capacity and `self.capacity()` could differ, as
233    /// an allocator could overallocate and return a greater memory block than requested.
234    #[inline(always)]
235    pub unsafe fn into_box(self, len: usize) -> Box<[MaybeUninit<T>], A> {
236        // Sanity-check one half of the safety requirement (we cannot check the other half).
237        debug_assert!(
238            len <= self.capacity(),
239            "`len` must be smaller than or equal to `self.capacity()`"
240        );
241
242        let me = ManuallyDrop::new(self);
243        unsafe {
244            let slice = slice::from_raw_parts_mut(me.ptr() as *mut MaybeUninit<T>, len);
245            Box::<[MaybeUninit<T>], A>::from_raw_in(slice, ptr::read(&me.alloc))
246        }
247    }
248
249    #[cfg(not(no_global_oom_handling))]
250    #[inline(always)]
251    fn allocate_in(capacity: usize, init: AllocInit, alloc: A) -> Self {
252        // Don't allocate here because `Drop` will not deallocate when `capacity` is 0.
253        if mem::size_of::<T>() == 0 || capacity == 0 {
254            Self::new_in(alloc)
255        } else {
256            // We avoid `unwrap_or_else` here because it bloats the amount of
257            // LLVM IR generated.
258            let layout = match Layout::array::<T>(capacity) {
259                Ok(layout) => layout,
260                Err(_) => capacity_overflow(),
261            };
262            match alloc_guard(layout.size()) {
263                Ok(_) => {}
264                Err(_) => capacity_overflow(),
265            }
266            let result = match init {
267                AllocInit::Uninitialized => alloc.allocate(layout),
268                AllocInit::Zeroed => alloc.allocate_zeroed(layout),
269            };
270            let ptr = match result {
271                Ok(ptr) => ptr,
272                Err(_) => handle_alloc_error(layout),
273            };
274
275            // Allocators currently return a `NonNull<[u8]>` whose length
276            // matches the size requested. If that ever changes, the capacity
277            // here should change to `ptr.len() / mem::size_of::<T>()`.
278            Self {
279                ptr: unsafe { NonNull::new_unchecked(ptr.cast().as_ptr()) },
280                cap: capacity,
281                alloc,
282            }
283        }
284    }
285
286    /// Reconstitutes a `RawVec` from a pointer, capacity, and allocator.
287    ///
288    /// # Safety
289    ///
290    /// The `ptr` must be allocated (via the given allocator `alloc`), and with the given
291    /// `capacity`.
292    /// The `capacity` cannot exceed `isize::MAX` for sized types. (only a concern on 32-bit
293    /// systems). ZST vectors may have a capacity up to `usize::MAX`.
294    /// If the `ptr` and `capacity` come from a `RawVec` created via `alloc`, then this is
295    /// guaranteed.
296    #[inline(always)]
297    pub unsafe fn from_raw_parts_in(ptr: *mut T, capacity: usize, alloc: A) -> Self {
298        Self {
299            ptr: unsafe { NonNull::new_unchecked(ptr) },
300            cap: capacity,
301            alloc,
302        }
303    }
304
305    /// Gets a raw pointer to the start of the allocation. Note that this is
306    /// `NonNull::dangling()` if `capacity == 0` or `T` is zero-sized. In the former case, you must
307    /// be careful.
308    #[inline(always)]
309    pub fn ptr(&self) -> *mut T {
310        self.ptr.as_ptr()
311    }
312
313    /// Gets the capacity of the allocation.
314    ///
315    /// This will always be `usize::MAX` if `T` is zero-sized.
316    #[inline(always)]
317    pub fn capacity(&self) -> usize {
318        if mem::size_of::<T>() == 0 {
319            usize::MAX
320        } else {
321            self.cap
322        }
323    }
324
325    /// Returns a shared reference to the allocator backing this `RawVec`.
326    #[inline(always)]
327    pub fn allocator(&self) -> &A {
328        &self.alloc
329    }
330
331    #[inline(always)]
332    fn current_memory(&self) -> Option<(NonNull<u8>, Layout)> {
333        if mem::size_of::<T>() == 0 || self.cap == 0 {
334            None
335        } else {
336            // We have an allocated chunk of memory, so we can bypass runtime
337            // checks to get our current layout.
338            unsafe {
339                let layout = Layout::array::<T>(self.cap).unwrap_unchecked();
340                Some((self.ptr.cast(), layout))
341            }
342        }
343    }
344
345    /// Ensures that the buffer contains at least enough space to hold `len +
346    /// additional` elements. If it doesn't already have enough capacity, will
347    /// reallocate enough space plus comfortable slack space to get amortized
348    /// *O*(1) behavior. Will limit this behavior if it would needlessly cause
349    /// itself to panic.
350    ///
351    /// If `len` exceeds `self.capacity()`, this may fail to actually allocate
352    /// the requested space. This is not really unsafe, but the unsafe
353    /// code *you* write that relies on the behavior of this function may break.
354    ///
355    /// This is ideal for implementing a bulk-push operation like `extend`.
356    ///
357    /// # Panics
358    ///
359    /// Panics if the new capacity exceeds `isize::MAX` bytes.
360    ///
361    /// # Aborts
362    ///
363    /// Aborts on OOM.
364    #[cfg(not(no_global_oom_handling))]
365    #[inline(always)]
366    pub fn reserve(&mut self, len: usize, additional: usize) {
367        // Callers expect this function to be very cheap when there is already sufficient capacity.
368        // Therefore, we move all the resizing and error-handling logic from grow_amortized and
369        // handle_reserve behind a call, while making sure that this function is likely to be
370        // inlined as just a comparison and a call if the comparison fails.
371        #[cold]
372        #[inline(always)]
373        fn do_reserve_and_handle<T, A: Allocator>(
374            slf: &mut RawVec<T, A>,
375            len: usize,
376            additional: usize,
377        ) {
378            handle_reserve(slf.grow_amortized(len, additional));
379        }
380
381        if self.needs_to_grow(len, additional) {
382            do_reserve_and_handle(self, len, additional);
383        }
384    }
385
386    /// A specialized version of `reserve()` used only by the hot and
387    /// oft-instantiated `Vec::push()`, which does its own capacity check.
388    #[cfg(not(no_global_oom_handling))]
389    #[inline(always)]
390    pub fn reserve_for_push(&mut self, len: usize) {
391        handle_reserve(self.grow_amortized(len, 1));
392    }
393
394    /// The same as `reserve`, but returns on errors instead of panicking or aborting.
395    #[inline(always)]
396    pub fn try_reserve(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> {
397        if self.needs_to_grow(len, additional) {
398            self.grow_amortized(len, additional)
399        } else {
400            Ok(())
401        }
402    }
403
404    /// Ensures that the buffer contains at least enough space to hold `len +
405    /// additional` elements. If it doesn't already, will reallocate the
406    /// minimum possible amount of memory necessary. Generally this will be
407    /// exactly the amount of memory necessary, but in principle the allocator
408    /// is free to give back more than we asked for.
409    ///
410    /// If `len` exceeds `self.capacity()`, this may fail to actually allocate
411    /// the requested space. This is not really unsafe, but the unsafe code
412    /// *you* write that relies on the behavior of this function may break.
413    ///
414    /// # Panics
415    ///
416    /// Panics if the new capacity exceeds `isize::MAX` bytes.
417    ///
418    /// # Aborts
419    ///
420    /// Aborts on OOM.
421    #[cfg(not(no_global_oom_handling))]
422    #[inline(always)]
423    pub fn reserve_exact(&mut self, len: usize, additional: usize) {
424        handle_reserve(self.try_reserve_exact(len, additional));
425    }
426
427    /// The same as `reserve_exact`, but returns on errors instead of panicking or aborting.
428    #[inline(always)]
429    pub fn try_reserve_exact(
430        &mut self,
431        len: usize,
432        additional: usize,
433    ) -> Result<(), TryReserveError> {
434        if self.needs_to_grow(len, additional) {
435            self.grow_exact(len, additional)
436        } else {
437            Ok(())
438        }
439    }
440
441    /// Shrinks the buffer down to the specified capacity. If the given amount
442    /// is 0, actually completely deallocates.
443    ///
444    /// # Panics
445    ///
446    /// Panics if the given amount is *larger* than the current capacity.
447    ///
448    /// # Aborts
449    ///
450    /// Aborts on OOM.
451    #[cfg(not(no_global_oom_handling))]
452    #[inline(always)]
453    pub fn shrink_to_fit(&mut self, cap: usize) {
454        handle_reserve(self.shrink(cap));
455    }
456}
457
458impl<T, A: Allocator> RawVec<T, A> {
459    /// Returns if the buffer needs to grow to fulfill the needed extra capacity.
460    /// Mainly used to make inlining reserve-calls possible without inlining `grow`.
461    #[inline(always)]
462    fn needs_to_grow(&self, len: usize, additional: usize) -> bool {
463        additional > self.capacity().wrapping_sub(len)
464    }
465
466    #[inline(always)]
467    fn set_ptr_and_cap(&mut self, ptr: NonNull<[u8]>, cap: usize) {
468        // Allocators currently return a `NonNull<[u8]>` whose length matches
469        // the size requested. If that ever changes, the capacity here should
470        // change to `ptr.len() / mem::size_of::<T>()`.
471        self.ptr = unsafe { NonNull::new_unchecked(ptr.cast().as_ptr()) };
472        self.cap = cap;
473    }
474
475    // This method is usually instantiated many times. So we want it to be as
476    // small as possible, to improve compile times. But we also want as much of
477    // its contents to be statically computable as possible, to make the
478    // generated code run faster. Therefore, this method is carefully written
479    // so that all of the code that depends on `T` is within it, while as much
480    // of the code that doesn't depend on `T` as possible is in functions that
481    // are non-generic over `T`.
482    #[inline(always)]
483    fn grow_amortized(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> {
484        // This is ensured by the calling contexts.
485        debug_assert!(additional > 0);
486
487        if mem::size_of::<T>() == 0 {
488            // Since we return a capacity of `usize::MAX` when `elem_size` is
489            // 0, getting to here necessarily means the `RawVec` is overfull.
490            return Err(CapacityOverflow.into());
491        }
492
493        // Nothing we can really do about these checks, sadly.
494        let required_cap = len.checked_add(additional).ok_or(CapacityOverflow)?;
495
496        // This guarantees exponential growth. The doubling cannot overflow
497        // because `cap <= isize::MAX` and the type of `cap` is `usize`.
498        let cap = cmp::max(self.cap * 2, required_cap);
499        let cap = cmp::max(Self::MIN_NON_ZERO_CAP, cap);
500
501        let new_layout = Layout::array::<T>(cap);
502
503        // `finish_grow` is non-generic over `T`.
504        let ptr = finish_grow(new_layout, self.current_memory(), &mut self.alloc)?;
505        self.set_ptr_and_cap(ptr, cap);
506        Ok(())
507    }
508
509    // The constraints on this method are much the same as those on
510    // `grow_amortized`, but this method is usually instantiated less often so
511    // it's less critical.
512    #[inline(always)]
513    fn grow_exact(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> {
514        if mem::size_of::<T>() == 0 {
515            // Since we return a capacity of `usize::MAX` when the type size is
516            // 0, getting to here necessarily means the `RawVec` is overfull.
517            return Err(CapacityOverflow.into());
518        }
519
520        let cap = len.checked_add(additional).ok_or(CapacityOverflow)?;
521        let new_layout = Layout::array::<T>(cap);
522
523        // `finish_grow` is non-generic over `T`.
524        let ptr = finish_grow(new_layout, self.current_memory(), &mut self.alloc)?;
525        self.set_ptr_and_cap(ptr, cap);
526        Ok(())
527    }
528
529    #[cfg(not(no_global_oom_handling))]
530    #[inline(always)]
531    fn shrink(&mut self, cap: usize) -> Result<(), TryReserveError> {
532        assert!(
533            cap <= self.capacity(),
534            "Tried to shrink to a larger capacity"
535        );
536
537        let (ptr, layout) = if let Some(mem) = self.current_memory() {
538            mem
539        } else {
540            return Ok(());
541        };
542
543        let ptr = unsafe {
544            // `Layout::array` cannot overflow here because it would have
545            // overflowed earlier when capacity was larger.
546            let new_layout = Layout::array::<T>(cap).unwrap_unchecked();
547            self.alloc
548                .shrink(ptr, layout, new_layout)
549                .map_err(|_| AllocError {
550                    layout: new_layout,
551                    non_exhaustive: (),
552                })?
553        };
554        self.set_ptr_and_cap(ptr, cap);
555        Ok(())
556    }
557}
558
559// This function is outside `RawVec` to minimize compile times. See the comment
560// above `RawVec::grow_amortized` for details. (The `A` parameter isn't
561// significant, because the number of different `A` types seen in practice is
562// much smaller than the number of `T` types.)
563#[inline(always)]
564fn finish_grow<A>(
565    new_layout: Result<Layout, LayoutError>,
566    current_memory: Option<(NonNull<u8>, Layout)>,
567    alloc: &mut A,
568) -> Result<NonNull<[u8]>, TryReserveError>
569where
570    A: Allocator,
571{
572    // Check for the error here to minimize the size of `RawVec::grow_*`.
573    let new_layout = new_layout.map_err(|_| CapacityOverflow)?;
574
575    alloc_guard(new_layout.size())?;
576
577    let memory = if let Some((ptr, old_layout)) = current_memory {
578        debug_assert_eq!(old_layout.align(), new_layout.align());
579        unsafe {
580            // The allocator checks for alignment equality
581            assume(old_layout.align() == new_layout.align());
582            alloc.grow(ptr, old_layout, new_layout)
583        }
584    } else {
585        alloc.allocate(new_layout)
586    };
587
588    memory.map_err(|_| {
589        AllocError {
590            layout: new_layout,
591            non_exhaustive: (),
592        }
593        .into()
594    })
595}
596
597impl<T, A: Allocator> Drop for RawVec<T, A> {
598    /// Frees the memory owned by the `RawVec` *without* trying to drop its contents.
599    #[inline(always)]
600    fn drop(&mut self) {
601        if let Some((ptr, layout)) = self.current_memory() {
602            unsafe { self.alloc.deallocate(ptr, layout) }
603        }
604    }
605}
606
607// Central function for reserve error handling.
608#[cfg(not(no_global_oom_handling))]
609#[inline(always)]
610fn handle_reserve(result: Result<(), TryReserveError>) {
611    match result.map_err(|e| e.kind()) {
612        Err(CapacityOverflow) => capacity_overflow(),
613        Err(AllocError { layout, .. }) => handle_alloc_error(layout),
614        Ok(()) => { /* yay */ }
615    }
616}
617
618// We need to guarantee the following:
619// * We don't ever allocate `> isize::MAX` byte-size objects.
620// * We don't overflow `usize::MAX` and actually allocate too little.
621//
622// On 64-bit we just need to check for overflow since trying to allocate
623// `> isize::MAX` bytes will surely fail. On 32-bit and 16-bit we need to add
624// an extra guard for this in case we're running on a platform which can use
625// all 4GB in user-space, e.g., PAE or x32.
626
627#[inline(always)]
628fn alloc_guard(alloc_size: usize) -> Result<(), TryReserveError> {
629    if usize::BITS < 64 && alloc_size > isize::MAX as usize {
630        Err(CapacityOverflow.into())
631    } else {
632        Ok(())
633    }
634}
635
636// One central function responsible for reporting capacity overflows. This'll
637// ensure that the code generation related to these panics is minimal as there's
638// only one location which panics rather than a bunch throughout the module.
639#[cfg(not(no_global_oom_handling))]
640fn capacity_overflow() -> ! {
641    panic!("capacity overflow");
642}