Add RawVec to unify raw Vecish code

This commit is contained in:
Alexis Beingessner
2015-07-09 21:57:21 -07:00
parent b5dad7dcb2
commit bfa0e1f58a
9 changed files with 630 additions and 474 deletions

View File

@@ -62,7 +62,7 @@ use core::hash::{self, Hash};
use core::marker::Unsize;
use core::mem;
use core::ops::{CoerceUnsized, Deref, DerefMut};
use core::ptr::{Unique};
use core::ptr::Unique;
use core::raw::{TraitObject};
/// A value that represents the heap. This is the default place that the `box`

View File

@@ -88,6 +88,7 @@
#![feature(unique)]
#![feature(unsafe_no_drop_flag, filling_drop)]
#![feature(unsize)]
#![feature(core_slice_ext)]
#![cfg_attr(test, feature(test, alloc, rustc_private, box_raw))]
#![cfg_attr(all(not(feature = "external_funcs"), not(feature = "external_crate")),
@@ -122,6 +123,7 @@ mod boxed { pub use std::boxed::{Box, HEAP}; }
mod boxed_test;
pub mod arc;
pub mod rc;
pub mod raw_vec;
/// Common out-of-memory routine
#[cold]

453
src/liballoc/raw_vec.rs Normal file
View File

@@ -0,0 +1,453 @@
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use core::ptr::Unique;
use core::mem;
use core::slice::{self, SliceExt};
use heap;
use super::oom;
use super::boxed::Box;
use core::ops::Drop;
/// A low-level utility for more ergonomically allocating, reallocating, and deallocating a
/// a buffer of memory on the heap without having to worry about all the corner cases
/// involved. This type is excellent for building your own data structures like Vec and VecDeque.
/// In particular:
///
/// * Produces heap::EMPTY on zero-sized types
/// * Produces heap::EMPTY on zero-length allocations
/// * Catches all overflows in capacity computations (promotes them to "capacity overflow" panics)
/// * Guards against 32-bit systems allocating more than isize::MAX bytes
/// * Guards against overflowing your length
/// * Aborts on OOM
/// * Avoids freeing heap::EMPTY
/// * Contains a ptr::Unique and thus endows the user with all related benefits
///
/// This type does not in anyway inspect the memory that it manages. When dropped it *will*
/// free its memory, but it *won't* try to Drop its contents. It is up to the user of RawVec
/// to handle the actual things *stored* inside of a RawVec.
///
/// Note that a RawVec always forces its capacity to be usize::MAX for zero-sized types.
/// This enables you to use capacity growing logic catch the overflows in your length
/// that might occur with zero-sized types.
///
/// However this means that you need to be careful when roundtripping this type
/// with a `Box<[T]>`: `cap()` won't yield the len. However `with_capacity`,
/// `shrink_to_fit`, and `from_box` will actually set RawVec's private capacity
/// field. This allows zero-sized types to not be special-cased by consumers of
/// this type.
#[unsafe_no_drop_flag]
pub struct RawVec<T> {
ptr: Unique<T>,
cap: usize,
}
impl<T> RawVec<T> {
/// Creates the biggest possible RawVec without allocating. If T has positive
/// size, then this makes a RawVec with capacity 0. If T has 0 size, then it
/// it makes a RawVec with capacity `usize::MAX`. Useful for implementing
/// delayed allocation.
pub fn new() -> Self {
unsafe {
// !0 is usize::MAX. This branch should be stripped at compile time.
let cap = if mem::size_of::<T>() == 0 { !0 } else { 0 };
// heap::EMPTY doubles as "unallocated" and "zero-sized allocation"
RawVec { ptr: Unique::new(heap::EMPTY as *mut T), cap: cap }
}
}
/// Creates a RawVec with exactly the capacity and alignment requirements
/// for a `[T; cap]`. This is equivalent to calling RawVec::new when `cap` is 0
/// or T is zero-sized. Note that if `T` is zero-sized this means you will *not*
/// get a RawVec with the requested capacity!
///
/// # Panics
///
/// * Panics if the requested capacity exceeds `usize::MAX` bytes.
/// * Panics on 32-bit platforms if the requested capacity exceeds
/// `isize::MAX` bytes.
///
/// # Aborts
///
/// Aborts on OOM
pub fn with_capacity(cap: usize) -> Self {
unsafe {
let elem_size = mem::size_of::<T>();
let alloc_size = cap.checked_mul(elem_size).expect("capacity overflow");
alloc_guard(alloc_size);
// handles ZSTs and `cap = 0` alike
let ptr = if alloc_size == 0 {
heap::EMPTY as *mut u8
} else {
let align = mem::align_of::<T>();
let ptr = heap::allocate(alloc_size, align);
if ptr.is_null() { oom() }
ptr
};
RawVec { ptr: Unique::new(ptr as *mut _), cap: cap }
}
}
/// Reconstitutes a RawVec from a pointer and capacity.
///
/// # Undefined Behaviour
///
/// The ptr must be allocated, and with the given capacity. The
/// capacity cannot exceed `isize::MAX` (only a concern on 32-bit systems).
/// If the ptr and capacity come from a RawVec, then this is guaranteed.
pub unsafe fn from_raw_parts(ptr: *mut T, cap: usize) -> Self {
RawVec { ptr: Unique::new(ptr), cap: cap }
}
/// Converts a `Box<[T]>` into a `RawVec<T>`.
pub fn from_box(mut slice: Box<[T]>) -> Self {
unsafe {
let result = RawVec::from_raw_parts(slice.as_mut_ptr(), slice.len());
mem::forget(slice);
result
}
}
}
impl<T> RawVec<T> {
/// Gets a raw pointer to the start of the allocation. Note that this is
/// heap::EMPTY if `cap = 0` or T is zero-sized. In the former case, you must
/// be careful.
pub fn ptr(&self) -> *mut T {
*self.ptr
}
/// Gets the capacity of the allocation.
///
/// This will always be `usize::MAX` if `T` is zero-sized.
pub fn cap(&self) -> usize {
if mem::size_of::<T>() == 0 { !0 } else { self.cap }
}
/// Doubles the size of the type's backing allocation. This is common enough
/// to want to do that it's easiest to just have a dedicated method. Slightly
/// more efficient logic can be provided for this than the general case.
///
/// This function is ideal for when pushing elements one-at-a-time because
/// you don't need to incur the costs of the more general computations
/// reserve needs to do to guard against overflow. You do however need to
/// manually check if your `len == cap`.
///
/// # Panics
///
/// * Panics if T is zero-sized on the assumption that you managed to exhaust
/// all `usize::MAX` slots in your imaginary buffer.
/// * Panics on 32-bit platforms if the requested capacity exceeds
/// `isize::MAX` bytes.
///
/// # Aborts
///
/// Aborts on OOM
///
/// # Examples
///
/// ```ignore
/// struct MyVec<T> {
/// buf: RawVec<T>,
/// len: usize,
/// }
///
/// impl<T> MyVec<T> {
/// pub fn push(&mut self, elem: T) {
/// if self.len == self.buf.cap() { self.buf.double(); }
/// // double would have aborted or panicked if the len exceeded
/// // `isize::MAX` so this is safe to do unchecked now.
/// unsafe {
/// ptr::write(self.buf.ptr().offset(self.len as isize), elem);
/// }
/// self.len += 1;
/// }
/// }
/// ```
#[inline(never)]
#[cold]
pub fn double(&mut self) {
unsafe {
let elem_size = mem::size_of::<T>();
// since we set the capacity to usize::MAX when elem_size is
// 0, getting to here necessarily means the RawVec is overfull.
assert!(elem_size != 0, "capacity overflow");
let align = mem::align_of::<T>();
let (new_cap, ptr) = if self.cap == 0 {
// skip to 4 because tiny Vec's are dumb; but not if that would cause overflow
let new_cap = if elem_size > (!0) / 8 { 1 } else { 4 };
let ptr = heap::allocate(new_cap * elem_size, align);
(new_cap, ptr)
} else {
// Since we guarantee that we never allocate more than isize::MAX bytes,
// `elem_size * self.cap <= isize::MAX` as a precondition, so this can't overflow
let new_cap = 2 * self.cap;
let new_alloc_size = new_cap * elem_size;
alloc_guard(new_alloc_size);
let ptr = heap::reallocate(self.ptr() as *mut _,
self.cap * elem_size,
new_alloc_size,
align);
(new_cap, ptr)
};
// If allocate or reallocate fail, we'll get `null` back
if ptr.is_null() { oom() }
self.ptr = Unique::new(ptr as *mut _);
self.cap = new_cap;
}
}
/// Ensures that the buffer contains at least enough space to hold
/// `used_cap + needed_extra_cap` elements. If it doesn't already,
/// will reallocate the minimum possible amount of memory necessary.
/// Generally this will be exactly the amount of memory necessary,
/// but in principle the allocator is free to give back more than
/// we asked for.
///
/// If `used_cap` exceeds `self.cap()`, this may fail to actually allocate
/// the requested space. This is not really unsafe, but the unsafe
/// code *you* write that relies on the behaviour of this function may break.
///
/// # Panics
///
/// * Panics if the requested capacity exceeds `usize::MAX` bytes.
/// * Panics on 32-bit platforms if the requested capacity exceeds
/// `isize::MAX` bytes.
///
/// # Aborts
///
/// Aborts on OOM
pub fn reserve_exact(&mut self, used_cap: usize, needed_extra_cap: usize) {
unsafe {
let elem_size = mem::size_of::<T>();
let align = mem::align_of::<T>();
// NOTE: we don't early branch on ZSTs here because we want this
// to actually catch "asking for more than usize::MAX" in that case.
// If we make it past the first branch then we are guaranteed to
// panic.
// Don't actually need any more capacity.
// Wrapping in case they gave a bad `used_cap`.
if self.cap().wrapping_sub(used_cap) >= needed_extra_cap { return; }
// Nothing we can really do about these checks :(
let new_cap = used_cap.checked_add(needed_extra_cap).expect("capacity overflow");
let new_alloc_size = new_cap.checked_mul(elem_size).expect("capacity overflow");
alloc_guard(new_alloc_size);
let ptr = if self.cap == 0 {
heap::allocate(new_alloc_size, align)
} else {
heap::reallocate(self.ptr() as *mut _,
self.cap * elem_size,
new_alloc_size,
align)
};
// If allocate or reallocate fail, we'll get `null` back
if ptr.is_null() { oom() }
self.ptr = Unique::new(ptr as *mut _);
self.cap = new_cap;
}
}
/// Ensures that the buffer contains at least enough space to hold
/// `used_cap + needed_extra_cap` elements. If it doesn't already have
/// enough capacity, will reallocate enough space plus comfortable slack
/// space to get amortized `O(1)` behaviour. Will limit this behaviour
/// if it would needlessly cause itself to panic.
///
/// If `used_cap` exceeds `self.cap()`, this may fail to actually allocate
/// the requested space. This is not really unsafe, but the unsafe
/// code *you* write that relies on the behaviour of this function may break.
///
/// This is ideal for implementing a bulk-push operation like `extend`.
///
/// # Panics
///
/// * Panics if the requested capacity exceeds `usize::MAX` bytes.
/// * Panics on 32-bit platforms if the requested capacity exceeds
/// `isize::MAX` bytes.
///
/// # Aborts
///
/// Aborts on OOM
///
/// # Examples
///
/// ```ignore
/// struct MyVec<T> {
/// buf: RawVec<T>,
/// len: usize,
/// }
///
/// impl<T> MyVec<T> {
/// pub fn push_all(&mut self, elems: &[T]) {
/// self.buf.reserve(self.len, elems.len());
/// // reserve would have aborted or panicked if the len exceeded
/// // `isize::MAX` so this is safe to do unchecked now.
/// for x in elems {
/// unsafe {
/// ptr::write(self.buf.ptr().offset(self.len as isize), x.clone());
/// }
/// self.len += 1;
/// }
/// }
/// }
/// ```
pub fn reserve(&mut self, used_cap: usize, needed_extra_cap: usize) {
unsafe {
let elem_size = mem::size_of::<T>();
let align = mem::align_of::<T>();
// NOTE: we don't early branch on ZSTs here because we want this
// to actually catch "asking for more than usize::MAX" in that case.
// If we make it past the first branch then we are guaranteed to
// panic.
// Don't actually need any more capacity.
// Wrapping in case they give a bas `used_cap`
if self.cap().wrapping_sub(used_cap) >= needed_extra_cap { return; }
// Nothing we can really do about these checks :(
let new_cap = used_cap.checked_add(needed_extra_cap)
.and_then(|cap| cap.checked_mul(2))
.expect("capacity overflow");
let new_alloc_size = new_cap.checked_mul(elem_size).expect("capacity overflow");
// FIXME: may crash and burn on over-reserve
alloc_guard(new_alloc_size);
let ptr = if self.cap == 0 {
heap::allocate(new_alloc_size, align)
} else {
heap::reallocate(self.ptr() as *mut _,
self.cap * elem_size,
new_alloc_size,
align)
};
// If allocate or reallocate fail, we'll get `null` back
if ptr.is_null() { oom() }
self.ptr = Unique::new(ptr as *mut _);
self.cap = new_cap;
}
}
/// Shrinks the allocation down to the specified amount. If the given amount
/// is 0, actually completely deallocates.
///
/// # Panics
///
/// Panics if the given amount is *larger* than the current capacity.
///
/// # Aborts
///
/// Aborts on OOM.
pub fn shrink_to_fit(&mut self, amount: usize) {
let elem_size = mem::size_of::<T>();
let align = mem::align_of::<T>();
// Set the `cap` because they might be about to promote to a `Box<[T]>`
if elem_size == 0 {
self.cap = amount;
return;
}
// This check is my waterloo; it's the only thing Vec wouldn't have to do.
assert!(self.cap >= amount, "Tried to shrink to a larger capacity");
if amount == 0 {
mem::replace(self, RawVec::new());
} else if self.cap != amount {
unsafe {
// Overflow check is unnecessary as the vector is already at
// least this large.
let ptr = heap::reallocate(self.ptr() as *mut _,
self.cap * elem_size,
amount * elem_size,
align);
if ptr.is_null() { oom() }
self.ptr = Unique::new(ptr as *mut _);
}
self.cap = amount;
}
}
/// Converts the entire buffer into `Box<[T]>`.
///
/// While it is not *strictly* Undefined Behaviour to call
/// this procedure while some of the RawVec is unintialized,
/// it cetainly makes it trivial to trigger it.
///
/// Note that this will correctly reconstitute any `cap` changes
/// that may have been performed. (see description of type for details)
pub unsafe fn into_box(self) -> Box<[T]> {
// NOTE: not calling `cap()` here, actually using the real `cap` field!
let slice = slice::from_raw_parts_mut(self.ptr(), self.cap);
let output: Box<[T]> = Box::from_raw(slice);
mem::forget(self);
output
}
/// This is a stupid name in the hopes that someone will find this in the
/// not too distant future and remove it with the rest of
/// #[unsafe_no_drop_flag]
pub fn unsafe_no_drop_flag_needs_drop(&self) -> bool {
self.cap != mem::POST_DROP_USIZE
}
}
impl<T> Drop for RawVec<T> {
/// Frees the memory owned by the RawVec *without* trying to Drop its contents.
fn drop(&mut self) {
let elem_size = mem::size_of::<T>();
if elem_size != 0 && self.cap != 0 && self.unsafe_no_drop_flag_needs_drop() {
let align = mem::align_of::<T>();
let num_bytes = elem_size * self.cap;
unsafe {
heap::deallocate(*self.ptr as *mut _, num_bytes, align);
}
}
}
}
// We need to guarantee the following:
// * We don't ever allocate `> isize::MAX` byte-size objects
// * We don't overflow `usize::MAX` and actually allocate too little
//
// On 64-bit we just need to check for overflow since trying to allocate
// `> isize::MAX` bytes will surely fail. On 32-bit we need to add an extra
// guard for this in case we're running on a platform which can use all 4GB in
// user-space. e.g. PAE or x32
#[inline]
#[cfg(target_pointer_width = "64")]
fn alloc_guard(_alloc_size: usize) { }
#[inline]
#[cfg(target_pointer_width = "32")]
fn alloc_guard(alloc_size: usize) {
assert!(alloc_size <= ::core::isize::MAX as usize, "capacity overflow");
}

View File

@@ -32,7 +32,6 @@
#![feature(alloc)]
#![feature(box_patterns)]
#![feature(box_raw)]
#![feature(box_syntax)]
#![feature(core)]
#![feature(core_intrinsics)]

View File

@@ -28,7 +28,7 @@ use rustc_unicode::str::Utf16Item;
use borrow::{Cow, IntoCow};
use range::RangeArgument;
use str::{self, FromStr, Utf8Error, Chars};
use vec::{DerefVec, Vec, as_vec};
use vec::Vec;
use boxed::Box;
/// A growable string stored as a UTF-8 encoded buffer.
@@ -1029,49 +1029,6 @@ impl ops::DerefMut for String {
}
}
/// Wrapper type providing a `&String` reference via `Deref`.
#[unstable(feature = "collections")]
#[deprecated(since = "1.2.0",
reason = "replaced with deref coercions or Borrow")]
#[allow(deprecated)]
pub struct DerefString<'a> {
x: DerefVec<'a, u8>
}
#[allow(deprecated)]
impl<'a> Deref for DerefString<'a> {
type Target = String;
#[inline]
fn deref<'b>(&'b self) -> &'b String {
unsafe { mem::transmute(&*self.x) }
}
}
/// Converts a string slice to a wrapper type providing a `&String` reference.
///
/// # Examples
///
/// ```
/// # #![feature(collections)]
/// use std::string::as_string;
///
/// // Let's pretend we have a function that requires `&String`
/// fn string_consumer(s: &String) {
/// assert_eq!(s, "foo");
/// }
///
/// // Provide a `&String` from a `&str` without allocating
/// string_consumer(&as_string("foo"));
/// ```
#[unstable(feature = "collections")]
#[deprecated(since = "1.2.0",
reason = "replaced with deref coercions or Borrow")]
#[allow(deprecated)]
pub fn as_string<'a>(x: &'a str) -> DerefString<'a> {
DerefString { x: as_vec(x.as_bytes()) }
}
/// Error returned from `String::from`
#[unstable(feature = "str_parse_error", reason = "may want to be replaced with \
Void if it ever exists")]

View File

@@ -59,32 +59,25 @@
#![stable(feature = "rust1", since = "1.0.0")]
use core::prelude::*;
use alloc::raw_vec::RawVec;
use alloc::boxed::Box;
use alloc::heap::{EMPTY, allocate, reallocate, deallocate};
use core::cmp::max;
use alloc::heap::EMPTY;
use core::cmp::Ordering;
use core::fmt;
use core::hash::{self, Hash};
use core::intrinsics::{arith_offset, assume};
use core::intrinsics::{arith_offset, assume, drop_in_place};
use core::iter::FromIterator;
use core::marker::PhantomData;
use core::mem;
use core::ops::{Index, IndexMut, Deref};
use core::ops;
use core::ptr;
use core::ptr::Unique;
use core::slice;
use core::isize;
use core::usize;
use borrow::{Cow, IntoCow};
use super::range::RangeArgument;
// FIXME- fix places which assume the max vector allowed has memory usize::MAX.
const MAX_MEMORY_SIZE: usize = isize::MAX as usize;
/// A growable list type, written `Vec<T>` but pronounced 'vector.'
///
/// # Examples
@@ -152,9 +145,8 @@ const MAX_MEMORY_SIZE: usize = isize::MAX as usize;
#[unsafe_no_drop_flag]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Vec<T> {
ptr: Unique<T>,
buf: RawVec<T>,
len: usize,
cap: usize,
}
////////////////////////////////////////////////////////////////////////////////
@@ -174,11 +166,7 @@ impl<T> Vec<T> {
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn new() -> Vec<T> {
// We want ptr to never be NULL so instead we set it to some arbitrary
// non-null value which is fine since we never call deallocate on the ptr
// if cap is 0. The reason for this is because the pointer of a slice
// being NULL would break the null pointer optimization for enums.
unsafe { Vec::from_raw_parts(EMPTY as *mut T, 0, 0) }
Vec { buf: RawVec::new(), len: 0 }
}
/// Constructs a new, empty `Vec<T>` with the specified capacity.
@@ -209,17 +197,7 @@ impl<T> Vec<T> {
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn with_capacity(capacity: usize) -> Vec<T> {
if mem::size_of::<T>() == 0 {
unsafe { Vec::from_raw_parts(EMPTY as *mut T, 0, usize::MAX) }
} else if capacity == 0 {
Vec::new()
} else {
let size = capacity.checked_mul(mem::size_of::<T>())
.expect("capacity overflow");
let ptr = unsafe { allocate(size, mem::align_of::<T>()) };
if ptr.is_null() { ::alloc::oom() }
unsafe { Vec::from_raw_parts(ptr as *mut T, 0, capacity) }
}
Vec { buf: RawVec::with_capacity(capacity), len: 0 }
}
/// Creates a `Vec<T>` directly from the raw components of another vector.
@@ -270,9 +248,8 @@ impl<T> Vec<T> {
pub unsafe fn from_raw_parts(ptr: *mut T, length: usize,
capacity: usize) -> Vec<T> {
Vec {
ptr: Unique::new(ptr),
buf: RawVec::from_raw_parts(ptr, capacity),
len: length,
cap: capacity,
}
}
@@ -306,7 +283,7 @@ impl<T> Vec<T> {
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn capacity(&self) -> usize {
self.cap
self.buf.cap()
}
/// Reserves capacity for at least `additional` more elements to be inserted
@@ -326,17 +303,7 @@ impl<T> Vec<T> {
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn reserve(&mut self, additional: usize) {
if self.cap - self.len < additional {
const ERR_MSG: &'static str = "Vec::reserve: `isize` overflow";
let new_min_cap = self.len.checked_add(additional).expect(ERR_MSG);
if new_min_cap > MAX_MEMORY_SIZE { panic!(ERR_MSG) }
self.grow_capacity(match new_min_cap.checked_next_power_of_two() {
Some(x) if x > MAX_MEMORY_SIZE => MAX_MEMORY_SIZE,
None => MAX_MEMORY_SIZE,
Some(x) => x,
});
}
self.buf.reserve(self.len, additional);
}
/// Reserves the minimum capacity for exactly `additional` more elements to
@@ -360,12 +327,7 @@ impl<T> Vec<T> {
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn reserve_exact(&mut self, additional: usize) {
if self.cap - self.len < additional {
match self.len.checked_add(additional) {
None => panic!("Vec::reserve: `usize` overflow"),
Some(new_cap) => self.grow_capacity(new_cap)
}
}
self.buf.reserve_exact(self.len, additional);
}
/// Shrinks the capacity of the vector as much as possible.
@@ -384,28 +346,7 @@ impl<T> Vec<T> {
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn shrink_to_fit(&mut self) {
if mem::size_of::<T>() == 0 { return }
if self.len == 0 {
if self.cap != 0 {
unsafe {
dealloc(*self.ptr, self.cap)
}
self.cap = 0;
}
} else if self.cap != self.len {
unsafe {
// Overflow check is unnecessary as the vector is already at
// least this large.
let ptr = reallocate(*self.ptr as *mut u8,
self.cap * mem::size_of::<T>(),
self.len * mem::size_of::<T>(),
mem::align_of::<T>()) as *mut T;
if ptr.is_null() { ::alloc::oom() }
self.ptr = Unique::new(ptr);
}
self.cap = self.len;
}
self.buf.shrink_to_fit(self.len);
}
/// Converts the vector into Box<[T]>.
@@ -415,11 +356,11 @@ impl<T> Vec<T> {
/// `shrink_to_fit()`.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn into_boxed_slice(mut self) -> Box<[T]> {
self.shrink_to_fit();
unsafe {
let xs: Box<[T]> = Box::from_raw(&mut *self);
self.shrink_to_fit();
let buf = ptr::read(&self.buf);
mem::forget(self);
xs
buf.into_box()
}
}
@@ -536,8 +477,9 @@ impl<T> Vec<T> {
pub fn insert(&mut self, index: usize, element: T) {
let len = self.len();
assert!(index <= len);
// space for the new element
self.reserve(1);
if len == self.buf.cap() { self.buf.double(); }
unsafe { // infallible
// The spot to put the new value
@@ -545,10 +487,10 @@ impl<T> Vec<T> {
let p = self.as_mut_ptr().offset(index as isize);
// Shift everything over to make space. (Duplicating the
// `index`th element into two consecutive places.)
ptr::copy(&*p, p.offset(1), len - index);
ptr::copy(p, p.offset(1), len - index);
// Write it in, overwriting the first copy of the `index`th
// element.
ptr::write(&mut *p, element);
ptr::write(p, element);
}
self.set_len(len + 1);
}
@@ -582,7 +524,7 @@ impl<T> Vec<T> {
ret = ptr::read(ptr);
// Shift everything down to fill in that spot.
ptr::copy(&*ptr.offset(1), ptr, len - index - 1);
ptr::copy(ptr.offset(1), ptr, len - index - 1);
}
self.set_len(len - 1);
ret
@@ -638,38 +580,12 @@ impl<T> Vec<T> {
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn push(&mut self, value: T) {
#[cold]
#[inline(never)]
fn resize<T>(vec: &mut Vec<T>) {
let old_size = vec.cap * mem::size_of::<T>();
if old_size >= MAX_MEMORY_SIZE { panic!("capacity overflow") }
let mut size = max(old_size, 2 * mem::size_of::<T>()) * 2;
if old_size > size || size > MAX_MEMORY_SIZE {
size = MAX_MEMORY_SIZE;
}
unsafe {
let ptr = alloc_or_realloc(*vec.ptr, old_size, size);
if ptr.is_null() { ::alloc::oom() }
vec.ptr = Unique::new(ptr);
}
vec.cap = max(vec.cap, 2) * 2;
}
if mem::size_of::<T>() == 0 {
// zero-size types consume no memory, so we can't rely on the
// address space running out
self.len = self.len.checked_add(1).expect("length overflow");
mem::forget(value);
return
}
if self.len == self.cap {
resize(self);
}
// This will panic or abort if we would allocate > isize::MAX bytes
// or if the length increment would overflow for zero-sized types.
if self.len == self.buf.cap() { self.buf.double(); }
unsafe {
let end = (*self.ptr).offset(self.len as isize);
ptr::write(&mut *end, value);
let end = self.as_mut_ptr().offset(self.len as isize);
ptr::write(end, value);
self.len += 1;
}
}
@@ -716,13 +632,6 @@ impl<T> Vec<T> {
#[unstable(feature = "append",
reason = "new API, waiting for dust to settle")]
pub fn append(&mut self, other: &mut Self) {
if mem::size_of::<T>() == 0 {
// zero-size types consume no memory, so we can't rely on the
// address space running out
self.len = self.len.checked_add(other.len()).expect("length overflow");
unsafe { other.set_len(0) }
return;
}
self.reserve(other.len());
let len = self.len();
unsafe {
@@ -1274,46 +1183,6 @@ impl<T: PartialEq> Vec<T> {
// Internal methods and functions
////////////////////////////////////////////////////////////////////////////////
impl<T> Vec<T> {
/// Reserves capacity for exactly `capacity` elements in the given vector.
///
/// If the capacity for `self` is already equal to or greater than the
/// requested capacity, then no action is taken.
fn grow_capacity(&mut self, capacity: usize) {
if mem::size_of::<T>() == 0 { return }
if capacity > self.cap {
let size = capacity.checked_mul(mem::size_of::<T>())
.expect("capacity overflow");
unsafe {
let ptr = alloc_or_realloc(*self.ptr, self.cap * mem::size_of::<T>(), size);
if ptr.is_null() { ::alloc::oom() }
self.ptr = Unique::new(ptr);
}
self.cap = capacity;
}
}
}
// FIXME: #13996: need a way to mark the return value as `noalias`
#[inline(never)]
unsafe fn alloc_or_realloc<T>(ptr: *mut T, old_size: usize, size: usize) -> *mut T {
if old_size == 0 {
allocate(size, mem::align_of::<T>()) as *mut T
} else {
reallocate(ptr as *mut u8, old_size, size, mem::align_of::<T>()) as *mut T
}
}
#[inline]
unsafe fn dealloc<T>(ptr: *mut T, len: usize) {
if mem::size_of::<T>() != 0 {
deallocate(ptr as *mut u8,
len * mem::size_of::<T>(),
mem::align_of::<T>())
}
}
#[doc(hidden)]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn from_elem<T: Clone>(elem: T, n: usize) -> Vec<T> {
@@ -1463,7 +1332,7 @@ impl<T> ops::Deref for Vec<T> {
fn deref(&self) -> &[T] {
unsafe {
let p = *self.ptr;
let p = self.buf.ptr();
assume(p != 0 as *mut T);
slice::from_raw_parts(p, self.len)
}
@@ -1474,7 +1343,7 @@ impl<T> ops::Deref for Vec<T> {
impl<T> ops::DerefMut for Vec<T> {
fn deref_mut(&mut self) -> &mut [T] {
unsafe {
let ptr = *self.ptr;
let ptr = self.buf.ptr();
assume(!ptr.is_null());
slice::from_raw_parts_mut(ptr, self.len)
}
@@ -1528,19 +1397,19 @@ impl<T> IntoIterator for Vec<T> {
/// }
/// ```
#[inline]
fn into_iter(self) -> IntoIter<T> {
fn into_iter(mut self) -> IntoIter<T> {
unsafe {
let ptr = *self.ptr;
let ptr = self.as_mut_ptr();
assume(!ptr.is_null());
let cap = self.cap;
let begin = ptr as *const T;
let end = if mem::size_of::<T>() == 0 {
arith_offset(ptr as *const i8, self.len() as isize) as *const T
} else {
ptr.offset(self.len() as isize) as *const T
};
let buf = ptr::read(&self.buf);
mem::forget(self);
IntoIter { allocation: ptr, cap: cap, ptr: begin, end: end }
IntoIter { buf: buf, ptr: begin, end: end }
}
}
}
@@ -1652,16 +1521,16 @@ impl<T: Ord> Ord for Vec<T> {
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Drop for Vec<T> {
fn drop(&mut self) {
// This is (and should always remain) a no-op if the fields are
// zeroed (when moving out, because of #[unsafe_no_drop_flag]).
if self.cap != 0 && self.cap != mem::POST_DROP_USIZE {
unsafe {
for x in self.iter() {
ptr::read(x);
}
dealloc(*self.ptr, self.cap)
// NOTE: this is currently abusing the fact that ZSTs can't impl Drop.
// Or rather, that impl'ing Drop makes them not zero-sized. This is
// OK because exactly when this stops being a valid assumption, we
// don't need unsafe_no_drop_flag shenanigans anymore.
if self.buf.unsafe_no_drop_flag_needs_drop() {
for x in self.iter_mut() {
unsafe { drop_in_place(x); }
}
}
// RawVec handles deallocation
}
}
@@ -1745,8 +1614,7 @@ impl<'a, T> IntoCow<'a, [T]> for &'a [T] where T: Clone {
/// An iterator that moves out of a vector.
#[stable(feature = "rust1", since = "1.0.0")]
pub struct IntoIter<T> {
allocation: *mut T, // the block of memory allocated for the vector
cap: usize, // the capacity of the vector
buf: RawVec<T>,
ptr: *const T,
end: *const T
}
@@ -1761,9 +1629,9 @@ impl<T> IntoIter<T> {
pub fn into_inner(mut self) -> Vec<T> {
unsafe {
for _x in self.by_ref() { }
let IntoIter { allocation, cap, ptr: _ptr, end: _end } = self;
let buf = ptr::read(&self.buf);
mem::forget(self);
Vec::from_raw_parts(allocation, 0, cap)
Vec { buf: buf, len: 0 }
}
}
}
@@ -1841,12 +1709,9 @@ impl<T> ExactSizeIterator for IntoIter<T> {}
impl<T> Drop for IntoIter<T> {
fn drop(&mut self) {
// destroy the remaining elements
if self.cap != 0 {
for _x in self.by_ref() {}
unsafe {
dealloc(self.allocation, self.cap);
}
}
for _x in self.by_ref() {}
// RawVec handles deallocation
}
}
@@ -1920,73 +1785,6 @@ impl<'a, T> Drop for Drain<'a, T> {
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> ExactSizeIterator for Drain<'a, T> {}
////////////////////////////////////////////////////////////////////////////////
// Conversion from &[T] to &Vec<T>
////////////////////////////////////////////////////////////////////////////////
/// Wrapper type providing a `&Vec<T>` reference via `Deref`.
#[unstable(feature = "collections")]
#[deprecated(since = "1.2.0",
reason = "replaced with deref coercions or Borrow")]
pub struct DerefVec<'a, T:'a> {
x: Vec<T>,
l: PhantomData<&'a T>,
}
#[unstable(feature = "collections")]
#[deprecated(since = "1.2.0",
reason = "replaced with deref coercions or Borrow")]
#[allow(deprecated)]
impl<'a, T> Deref for DerefVec<'a, T> {
type Target = Vec<T>;
fn deref<'b>(&'b self) -> &'b Vec<T> {
&self.x
}
}
// Prevent the inner `Vec<T>` from attempting to deallocate memory.
#[stable(feature = "rust1", since = "1.0.0")]
#[deprecated(since = "1.2.0",
reason = "replaced with deref coercions or Borrow")]
#[allow(deprecated)]
impl<'a, T> Drop for DerefVec<'a, T> {
fn drop(&mut self) {
self.x.len = 0;
self.x.cap = 0;
}
}
/// Converts a slice to a wrapper type providing a `&Vec<T>` reference.
///
/// # Examples
///
/// ```
/// # #![feature(collections)]
/// use std::vec::as_vec;
///
/// // Let's pretend we have a function that requires `&Vec<i32>`
/// fn vec_consumer(s: &Vec<i32>) {
/// assert_eq!(s, &[1, 2, 3]);
/// }
///
/// // Provide a `&Vec<i32>` from a `&[i32]` without allocating
/// let values = [1, 2, 3];
/// vec_consumer(&as_vec(&values));
/// ```
#[unstable(feature = "collections")]
#[deprecated(since = "1.2.0",
reason = "replaced with deref coercions or Borrow")]
#[allow(deprecated)]
pub fn as_vec<'a, T>(x: &'a [T]) -> DerefVec<'a, T> {
unsafe {
DerefVec {
x: Vec::from_raw_parts(x.as_ptr() as *mut T, x.len(), x.len()),
l: PhantomData,
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Partial vec, used for map_in_place
////////////////////////////////////////////////////////////////////////////////

View File

@@ -23,15 +23,14 @@ use core::prelude::*;
use core::cmp::Ordering;
use core::fmt;
use core::iter::{self, repeat, FromIterator, RandomAccessIterator};
use core::mem;
use core::ops::{Index, IndexMut};
use core::ptr::{self, Unique};
use core::ptr;
use core::slice;
use core::hash::{Hash, Hasher};
use core::cmp;
use alloc::heap;
use alloc::raw_vec::RawVec;
const INITIAL_CAPACITY: usize = 7; // 2^3 - 1
const MINIMUM_CAPACITY: usize = 1; // 2 - 1
@@ -52,8 +51,7 @@ pub struct VecDeque<T> {
tail: usize,
head: usize,
cap: usize,
ptr: Unique<T>,
buf: RawVec<T>,
}
#[stable(feature = "rust1", since = "1.0.0")]
@@ -67,13 +65,7 @@ impl<T: Clone> Clone for VecDeque<T> {
impl<T> Drop for VecDeque<T> {
fn drop(&mut self) {
self.clear();
unsafe {
if mem::size_of::<T>() != 0 {
heap::deallocate(*self.ptr as *mut u8,
self.cap * mem::size_of::<T>(),
mem::align_of::<T>())
}
}
// RawVec handles deallocation
}
}
@@ -84,78 +76,127 @@ impl<T> Default for VecDeque<T> {
}
impl<T> VecDeque<T> {
/// Marginally more convenient
#[inline]
fn ptr(&self) -> *mut T {
self.buf.ptr()
}
/// Marginally more convenient
#[inline]
fn cap(&self) -> usize {
self.buf.cap()
}
/// Turn ptr into a slice
#[inline]
unsafe fn buffer_as_slice(&self) -> &[T] {
slice::from_raw_parts(*self.ptr, self.cap)
slice::from_raw_parts(self.ptr(), self.cap())
}
/// Turn ptr into a mut slice
#[inline]
unsafe fn buffer_as_mut_slice(&mut self) -> &mut [T] {
slice::from_raw_parts_mut(*self.ptr, self.cap)
slice::from_raw_parts_mut(self.ptr(), self.cap())
}
/// Moves an element out of the buffer
#[inline]
unsafe fn buffer_read(&mut self, off: usize) -> T {
ptr::read(self.ptr.offset(off as isize))
ptr::read(self.ptr().offset(off as isize))
}
/// Writes an element into the buffer, moving it.
#[inline]
unsafe fn buffer_write(&mut self, off: usize, t: T) {
ptr::write(self.ptr.offset(off as isize), t);
ptr::write(self.ptr().offset(off as isize), t);
}
/// Returns true if and only if the buffer is at capacity
#[inline]
fn is_full(&self) -> bool { self.cap - self.len() == 1 }
fn is_full(&self) -> bool { self.cap() - self.len() == 1 }
/// Returns the index in the underlying buffer for a given logical element
/// index.
#[inline]
fn wrap_index(&self, idx: usize) -> usize { wrap_index(idx, self.cap) }
fn wrap_index(&self, idx: usize) -> usize { wrap_index(idx, self.cap()) }
/// Returns the index in the underlying buffer for a given logical element
/// index + addend.
#[inline]
fn wrap_add(&self, idx: usize, addend: usize) -> usize {
wrap_index(idx.wrapping_add(addend), self.cap)
wrap_index(idx.wrapping_add(addend), self.cap())
}
/// Returns the index in the underlying buffer for a given logical element
/// index - subtrahend.
#[inline]
fn wrap_sub(&self, idx: usize, subtrahend: usize) -> usize {
wrap_index(idx.wrapping_sub(subtrahend), self.cap)
wrap_index(idx.wrapping_sub(subtrahend), self.cap())
}
/// Copies a contiguous block of memory len long from src to dst
#[inline]
unsafe fn copy(&self, dst: usize, src: usize, len: usize) {
debug_assert!(dst + len <= self.cap, "dst={} src={} len={} cap={}", dst, src, len,
self.cap);
debug_assert!(src + len <= self.cap, "dst={} src={} len={} cap={}", dst, src, len,
self.cap);
debug_assert!(dst + len <= self.cap(), "dst={} src={} len={} cap={}", dst, src, len,
self.cap());
debug_assert!(src + len <= self.cap(), "dst={} src={} len={} cap={}", dst, src, len,
self.cap());
ptr::copy(
self.ptr.offset(src as isize),
self.ptr.offset(dst as isize),
self.ptr().offset(src as isize),
self.ptr().offset(dst as isize),
len);
}
/// Copies a contiguous block of memory len long from src to dst
#[inline]
unsafe fn copy_nonoverlapping(&self, dst: usize, src: usize, len: usize) {
debug_assert!(dst + len <= self.cap, "dst={} src={} len={} cap={}", dst, src, len,
self.cap);
debug_assert!(src + len <= self.cap, "dst={} src={} len={} cap={}", dst, src, len,
self.cap);
debug_assert!(dst + len <= self.cap(), "dst={} src={} len={} cap={}", dst, src, len,
self.cap());
debug_assert!(src + len <= self.cap(), "dst={} src={} len={} cap={}", dst, src, len,
self.cap());
ptr::copy_nonoverlapping(
self.ptr.offset(src as isize),
self.ptr.offset(dst as isize),
self.ptr().offset(src as isize),
self.ptr().offset(dst as isize),
len);
}
/// Frobs the head and tail sections around to handle the fact that we
/// just reallocated. Unsafe because it trusts old_cap.
#[inline]
unsafe fn handle_cap_increase(&mut self, old_cap: usize) {
let new_cap = self.cap();
// Move the shortest contiguous section of the ring buffer
// T H
// [o o o o o o o . ]
// T H
// A [o o o o o o o . . . . . . . . . ]
// H T
// [o o . o o o o o ]
// T H
// B [. . . o o o o o o o . . . . . . ]
// H T
// [o o o o o . o o ]
// H T
// C [o o o o o . . . . . . . . . o o ]
if self.tail <= self.head { // A
// Nop
} else if self.head < old_cap - self.tail { // B
self.copy_nonoverlapping(old_cap, 0, self.head);
self.head += old_cap;
debug_assert!(self.head > self.tail);
} else { // C
let new_tail = new_cap - (old_cap - self.tail);
self.copy_nonoverlapping(new_tail, self.tail, old_cap - self.tail);
self.tail = new_tail;
debug_assert!(self.head < self.tail);
}
debug_assert!(self.head < self.cap());
debug_assert!(self.tail < self.cap());
debug_assert!(self.cap().count_ones() == 1);
}
}
impl<T> VecDeque<T> {
@@ -171,24 +212,11 @@ impl<T> VecDeque<T> {
// +1 since the ringbuffer always leaves one space empty
let cap = cmp::max(n + 1, MINIMUM_CAPACITY + 1).next_power_of_two();
assert!(cap > n, "capacity overflow");
let size = cap.checked_mul(mem::size_of::<T>())
.expect("capacity overflow");
let ptr = unsafe {
if mem::size_of::<T>() != 0 {
let ptr = heap::allocate(size, mem::align_of::<T>()) as *mut T;;
if ptr.is_null() { ::alloc::oom() }
Unique::new(ptr)
} else {
Unique::new(heap::EMPTY as *mut T)
}
};
VecDeque {
tail: 0,
head: 0,
cap: cap,
ptr: ptr,
buf: RawVec::with_capacity(cap),
}
}
@@ -209,7 +237,7 @@ impl<T> VecDeque<T> {
pub fn get(&self, i: usize) -> Option<&T> {
if i < self.len() {
let idx = self.wrap_add(self.tail, i);
unsafe { Some(&*self.ptr.offset(idx as isize)) }
unsafe { Some(&*self.ptr().offset(idx as isize)) }
} else {
None
}
@@ -236,7 +264,7 @@ impl<T> VecDeque<T> {
pub fn get_mut(&mut self, i: usize) -> Option<&mut T> {
if i < self.len() {
let idx = self.wrap_add(self.tail, i);
unsafe { Some(&mut *self.ptr.offset(idx as isize)) }
unsafe { Some(&mut *self.ptr().offset(idx as isize)) }
} else {
None
}
@@ -268,7 +296,7 @@ impl<T> VecDeque<T> {
let ri = self.wrap_add(self.tail, i);
let rj = self.wrap_add(self.tail, j);
unsafe {
ptr::swap(self.ptr.offset(ri as isize), self.ptr.offset(rj as isize))
ptr::swap(self.ptr().offset(ri as isize), self.ptr().offset(rj as isize))
}
}
@@ -285,7 +313,7 @@ impl<T> VecDeque<T> {
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn capacity(&self) -> usize { self.cap - 1 }
pub fn capacity(&self) -> usize { self.cap() - 1 }
/// Reserves the minimum capacity for exactly `additional` more elements to be inserted in the
/// given `VecDeque`. Does nothing if the capacity is already sufficient.
@@ -330,62 +358,16 @@ impl<T> VecDeque<T> {
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn reserve(&mut self, additional: usize) {
let new_len = self.len() + additional;
assert!(new_len + 1 > self.len(), "capacity overflow");
if new_len > self.capacity() {
let count = (new_len + 1).next_power_of_two();
assert!(count >= new_len + 1);
let old_cap = self.cap();
let used_cap = self.len() + 1;
let new_cap = used_cap
.checked_add(additional)
.and_then(|needed_cap| needed_cap.checked_next_power_of_two())
.expect("capacity overflow");
if mem::size_of::<T>() != 0 {
let old = self.cap * mem::size_of::<T>();
let new = count.checked_mul(mem::size_of::<T>())
.expect("capacity overflow");
unsafe {
let ptr = heap::reallocate(*self.ptr as *mut u8,
old,
new,
mem::align_of::<T>()) as *mut T;
if ptr.is_null() { ::alloc::oom() }
self.ptr = Unique::new(ptr);
}
}
// Move the shortest contiguous section of the ring buffer
// T H
// [o o o o o o o . ]
// T H
// A [o o o o o o o . . . . . . . . . ]
// H T
// [o o . o o o o o ]
// T H
// B [. . . o o o o o o o . . . . . . ]
// H T
// [o o o o o . o o ]
// H T
// C [o o o o o . . . . . . . . . o o ]
let oldcap = self.cap;
self.cap = count;
if self.tail <= self.head { // A
// Nop
} else if self.head < oldcap - self.tail { // B
unsafe {
self.copy_nonoverlapping(oldcap, 0, self.head);
}
self.head += oldcap;
debug_assert!(self.head > self.tail);
} else { // C
let new_tail = count - (oldcap - self.tail);
unsafe {
self.copy_nonoverlapping(new_tail, self.tail, oldcap - self.tail);
}
self.tail = new_tail;
debug_assert!(self.head < self.tail);
}
debug_assert!(self.head < self.cap);
debug_assert!(self.tail < self.cap);
debug_assert!(self.cap.count_ones() == 1);
if new_cap > self.capacity() {
self.buf.reserve_exact(used_cap, new_cap - used_cap);
unsafe { self.handle_cap_increase(old_cap); }
}
}
@@ -410,7 +392,7 @@ impl<T> VecDeque<T> {
// +1 since the ringbuffer always leaves one space empty
// len + 1 can't overflow for an existing, well-formed ringbuffer.
let target_cap = cmp::max(self.len() + 1, MINIMUM_CAPACITY + 1).next_power_of_two();
if target_cap < self.cap {
if target_cap < self.cap() {
// There are three cases of interest:
// All elements are out of desired bounds
// Elements are contiguous, and head is out of desired bounds
@@ -448,7 +430,7 @@ impl<T> VecDeque<T> {
// H T
// [o o o o o . o o ]
debug_assert!(self.wrap_sub(self.head, 1) < target_cap);
let len = self.cap - self.tail;
let len = self.cap() - self.tail;
let new_tail = target_cap - len;
unsafe {
self.copy_nonoverlapping(new_tail, self.tail, len);
@@ -457,22 +439,11 @@ impl<T> VecDeque<T> {
debug_assert!(self.head < self.tail);
}
if mem::size_of::<T>() != 0 {
let old = self.cap * mem::size_of::<T>();
let new_size = target_cap * mem::size_of::<T>();
unsafe {
let ptr = heap::reallocate(*self.ptr as *mut u8,
old,
new_size,
mem::align_of::<T>()) as *mut T;
if ptr.is_null() { ::alloc::oom() }
self.ptr = Unique::new(ptr);
}
}
self.cap = target_cap;
debug_assert!(self.head < self.cap);
debug_assert!(self.tail < self.cap);
debug_assert!(self.cap.count_ones() == 1);
self.buf.shrink_to_fit(target_cap);
debug_assert!(self.head < self.cap());
debug_assert!(self.tail < self.cap());
debug_assert!(self.cap().count_ones() == 1);
}
}
@@ -610,7 +581,7 @@ impl<T> VecDeque<T> {
/// assert_eq!(v.len(), 1);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn len(&self) -> usize { count(self.tail, self.head, self.cap) }
pub fn len(&self) -> usize { count(self.tail, self.head, self.cap()) }
/// Returns true if the buffer contains no elements
///
@@ -799,7 +770,9 @@ impl<T> VecDeque<T> {
#[stable(feature = "rust1", since = "1.0.0")]
pub fn push_front(&mut self, t: T) {
if self.is_full() {
self.reserve(1);
let old_cap = self.cap();
self.buf.double();
unsafe { self.handle_cap_increase(old_cap); }
debug_assert!(!self.is_full());
}
@@ -823,7 +796,9 @@ impl<T> VecDeque<T> {
#[stable(feature = "rust1", since = "1.0.0")]
pub fn push_back(&mut self, t: T) {
if self.is_full() {
self.reserve(1);
let old_cap = self.cap();
self.buf.double();
unsafe { self.handle_cap_increase(old_cap); }
debug_assert!(!self.is_full());
}
@@ -952,7 +927,9 @@ impl<T> VecDeque<T> {
pub fn insert(&mut self, i: usize, t: T) {
assert!(i <= self.len(), "index out of bounds");
if self.is_full() {
self.reserve(1);
let old_cap = self.cap();
self.buf.double();
unsafe { self.handle_cap_increase(old_cap); }
debug_assert!(!self.is_full());
}
@@ -1067,10 +1044,10 @@ impl<T> VecDeque<T> {
self.copy(1, 0, self.head);
// copy last element into empty spot at bottom of buffer
self.copy(0, self.cap - 1, 1);
self.copy(0, self.cap() - 1, 1);
// move elements from idx to end forward not including ^ element
self.copy(idx + 1, idx, self.cap - 1 - idx);
self.copy(idx + 1, idx, self.cap() - 1 - idx);
self.head += 1;
},
@@ -1086,10 +1063,10 @@ impl<T> VecDeque<T> {
// M M M
// copy elements up to new tail
self.copy(self.tail - 1, self.tail, self.cap - self.tail);
self.copy(self.tail - 1, self.tail, self.cap() - self.tail);
// copy last element into empty spot at bottom of buffer
self.copy(self.cap - 1, 0, 1);
self.copy(self.cap() - 1, 0, 1);
self.tail -= 1;
},
@@ -1104,10 +1081,10 @@ impl<T> VecDeque<T> {
// M M M M M M
// copy elements up to new tail
self.copy(self.tail - 1, self.tail, self.cap - self.tail);
self.copy(self.tail - 1, self.tail, self.cap() - self.tail);
// copy last element into empty spot at bottom of buffer
self.copy(self.cap - 1, 0, 1);
self.copy(self.cap() - 1, 0, 1);
// move elements from idx-1 to end forward not including ^ element
self.copy(0, 1, idx - 1);
@@ -1261,12 +1238,12 @@ impl<T> VecDeque<T> {
// M
// draw in elements in the tail section
self.copy(idx, idx + 1, self.cap - idx - 1);
self.copy(idx, idx + 1, self.cap() - idx - 1);
// Prevents underflow.
if self.head != 0 {
// copy first element into empty spot
self.copy(self.cap - 1, 0, 1);
self.copy(self.cap() - 1, 0, 1);
// move elements in the head section backwards
self.copy(0, 1, self.head - 1);
@@ -1288,10 +1265,10 @@ impl<T> VecDeque<T> {
self.copy(1, 0, idx);
// copy last element into empty spot
self.copy(0, self.cap - 1, 1);
self.copy(0, self.cap() - 1, 1);
// move elements from tail to end forward, excluding the last one
self.copy(self.tail + 1, self.tail, self.cap - self.tail - 1);
self.copy(self.tail + 1, self.tail, self.cap() - self.tail - 1);
self.tail = self.wrap_add(self.tail, 1);
}
@@ -1343,12 +1320,12 @@ impl<T> VecDeque<T> {
let amount_in_first = first_len - at;
ptr::copy_nonoverlapping(first_half.as_ptr().offset(at as isize),
*other.ptr,
other.ptr(),
amount_in_first);
// just take all of the second half.
ptr::copy_nonoverlapping(second_half.as_ptr(),
other.ptr.offset(amount_in_first as isize),
other.ptr().offset(amount_in_first as isize),
second_len);
} else {
// `at` lies in the second half, need to factor in the elements we skipped
@@ -1356,7 +1333,7 @@ impl<T> VecDeque<T> {
let offset = at - first_len;
let amount_in_second = second_len - offset;
ptr::copy_nonoverlapping(second_half.as_ptr().offset(offset as isize),
*other.ptr,
other.ptr(),
amount_in_second);
}
}
@@ -1904,8 +1881,8 @@ mod tests {
assert_eq!(tester.swap_front_remove(idx), Some(len * 2 - 1 - i));
}
}
assert!(tester.tail < tester.cap);
assert!(tester.head < tester.cap);
assert!(tester.tail < tester.cap());
assert!(tester.head < tester.cap());
assert_eq!(tester, expected);
}
}
@@ -1940,8 +1917,8 @@ mod tests {
}
}
tester.insert(to_insert, to_insert);
assert!(tester.tail < tester.cap);
assert!(tester.head < tester.cap);
assert!(tester.tail < tester.cap());
assert!(tester.head < tester.cap());
assert_eq!(tester, expected);
}
}
@@ -1977,8 +1954,8 @@ mod tests {
tester.push_back(1234);
}
tester.remove(to_remove);
assert!(tester.tail < tester.cap);
assert!(tester.head < tester.cap);
assert!(tester.tail < tester.cap());
assert!(tester.head < tester.cap());
assert_eq!(tester, expected);
}
}
@@ -2010,8 +1987,8 @@ mod tests {
}
tester.shrink_to_fit();
assert!(tester.capacity() <= cap);
assert!(tester.tail < tester.cap);
assert!(tester.head < tester.cap);
assert!(tester.tail < tester.cap());
assert!(tester.head < tester.cap());
assert_eq!(tester, expected);
}
}
@@ -2044,10 +2021,10 @@ mod tests {
tester.push_back(i);
}
let result = tester.split_off(at);
assert!(tester.tail < tester.cap);
assert!(tester.head < tester.cap);
assert!(result.tail < result.cap);
assert!(result.head < result.cap);
assert!(tester.tail < tester.cap());
assert!(tester.head < tester.cap());
assert!(result.tail < result.cap());
assert!(result.head < result.cap());
assert_eq!(tester, expected_self);
assert_eq!(result, expected_other);
}

View File

@@ -10,18 +10,9 @@
use std::borrow::{IntoCow, Cow};
use std::iter::repeat;
#[allow(deprecated)]
use std::string::as_string;
use test::Bencher;
#[test]
#[allow(deprecated)]
fn test_as_string() {
let x = "foo";
assert_eq!(x, &**as_string(x));
}
#[test]
fn test_from_str() {
let owned: Option<::std::string::String> = "string".parse().ok();

View File

@@ -10,8 +10,6 @@
use std::iter::{FromIterator, repeat};
use std::mem::size_of;
#[allow(deprecated)]
use std::vec::as_vec;
use test::Bencher;
@@ -25,25 +23,6 @@ impl<'a> Drop for DropCounter<'a> {
}
}
#[test]
#[allow(deprecated)]
fn test_as_vec() {
let xs = [1u8, 2u8, 3u8];
assert_eq!(&**as_vec(&xs), xs);
}
#[test]
#[allow(deprecated)]
fn test_as_vec_dtor() {
let (mut count_x, mut count_y) = (0, 0);
{
let xs = &[DropCounter { count: &mut count_x }, DropCounter { count: &mut count_y }];
assert_eq!(as_vec(xs).len(), 2);
}
assert_eq!(count_x, 1);
assert_eq!(count_y, 1);
}
#[test]
fn test_small_vec_struct() {
assert!(size_of::<Vec<u8>>() == size_of::<usize>() * 3);