2019-02-08 14:53:55 +01:00
|
|
|
//! The virtual memory representation of the MIR interpreter.
|
2018-10-23 17:54:20 +02:00
|
|
|
|
2018-11-12 13:26:53 +01:00
|
|
|
use super::{
|
2019-06-07 18:56:27 +02:00
|
|
|
Pointer, InterpResult, AllocId, ScalarMaybeUndef, write_target_uint, read_target_uint, Scalar,
|
2018-11-12 13:26:53 +01:00
|
|
|
};
|
2018-11-04 15:14:54 +01:00
|
|
|
|
2019-02-05 11:20:45 -06:00
|
|
|
use crate::ty::layout::{Size, Align};
|
2018-10-23 17:54:20 +02:00
|
|
|
use syntax::ast::Mutability;
|
2019-06-23 16:42:51 +02:00
|
|
|
use std::iter;
|
2019-02-05 11:20:45 -06:00
|
|
|
use crate::mir;
|
2019-06-23 14:26:36 +02:00
|
|
|
use std::ops::{Range, Deref, DerefMut};
|
2018-10-25 16:09:42 +02:00
|
|
|
use rustc_data_structures::sorted_map::SortedMap;
|
2018-11-12 08:56:41 +01:00
|
|
|
use rustc_target::abi::HasDataLayout;
|
2019-04-22 13:53:52 +02:00
|
|
|
use std::borrow::Cow;
|
2018-10-23 17:54:20 +02:00
|
|
|
|
|
|
|
|
#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)]
|
|
|
|
|
pub struct Allocation<Tag=(),Extra=()> {
|
|
|
|
|
/// The actual bytes of the allocation.
|
|
|
|
|
/// Note that the bytes of a pointer represent the offset of the pointer
|
|
|
|
|
pub bytes: Vec<u8>,
|
|
|
|
|
/// Maps from byte addresses to extra data for each pointer.
|
|
|
|
|
/// Only the first byte of a pointer is inserted into the map; i.e.,
|
|
|
|
|
/// every entry in this map applies to `pointer_size` consecutive bytes starting
|
|
|
|
|
/// at the given offset.
|
|
|
|
|
pub relocations: Relocations<Tag>,
|
|
|
|
|
/// Denotes undefined memory. Reading from undefined memory is forbidden in miri
|
|
|
|
|
pub undef_mask: UndefMask,
|
|
|
|
|
/// The alignment of the allocation to detect unaligned reads.
|
2018-09-09 01:16:45 +03:00
|
|
|
pub align: Align,
|
2018-10-23 17:54:20 +02:00
|
|
|
/// Whether the allocation is mutable.
|
|
|
|
|
/// Also used by codegen to determine if a static should be put into mutable memory,
|
|
|
|
|
/// which happens for `static mut` and `static` with interior mutability.
|
|
|
|
|
pub mutability: Mutability,
|
|
|
|
|
/// Extra state for the machine.
|
|
|
|
|
pub extra: Extra,
|
|
|
|
|
}
|
|
|
|
|
|
2018-11-14 16:00:52 +01:00
|
|
|
|
2019-04-15 10:05:13 +02:00
|
|
|
pub trait AllocationExtra<Tag>: ::std::fmt::Debug + Clone {
|
|
|
|
|
// There is no constructor in here because the constructor's type depends
|
|
|
|
|
// on `MemoryKind`, and making things sufficiently generic leads to painful
|
|
|
|
|
// inference failure.
|
2018-11-14 16:00:52 +01:00
|
|
|
|
|
|
|
|
/// Hook for performing extra checks on a memory read access.
|
|
|
|
|
///
|
|
|
|
|
/// Takes read-only access to the allocation so we can keep all the memory read
|
2019-02-08 14:53:55 +01:00
|
|
|
/// operations take `&self`. Use a `RefCell` in `AllocExtra` if you
|
2018-11-14 16:00:52 +01:00
|
|
|
/// need to mutate.
|
|
|
|
|
#[inline(always)]
|
|
|
|
|
fn memory_read(
|
|
|
|
|
_alloc: &Allocation<Tag, Self>,
|
|
|
|
|
_ptr: Pointer<Tag>,
|
|
|
|
|
_size: Size,
|
2019-06-07 18:56:27 +02:00
|
|
|
) -> InterpResult<'tcx> {
|
2018-11-14 16:00:52 +01:00
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Hook for performing extra checks on a memory write access.
|
|
|
|
|
#[inline(always)]
|
|
|
|
|
fn memory_written(
|
|
|
|
|
_alloc: &mut Allocation<Tag, Self>,
|
|
|
|
|
_ptr: Pointer<Tag>,
|
|
|
|
|
_size: Size,
|
2019-06-07 18:56:27 +02:00
|
|
|
) -> InterpResult<'tcx> {
|
2018-11-14 16:00:52 +01:00
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Hook for performing extra checks on a memory deallocation.
|
|
|
|
|
/// `size` will be the size of the allocation.
|
|
|
|
|
#[inline(always)]
|
|
|
|
|
fn memory_deallocated(
|
|
|
|
|
_alloc: &mut Allocation<Tag, Self>,
|
|
|
|
|
_ptr: Pointer<Tag>,
|
|
|
|
|
_size: Size,
|
2019-06-07 18:56:27 +02:00
|
|
|
) -> InterpResult<'tcx> {
|
2018-11-14 16:00:52 +01:00
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-04-15 10:05:13 +02:00
|
|
|
// For Tag=() and no extra state, we have is a trivial implementation.
|
|
|
|
|
impl AllocationExtra<()> for () { }
|
2018-11-14 16:00:52 +01:00
|
|
|
|
2019-05-28 10:44:46 +02:00
|
|
|
// The constructors are all without extra; the extra gets added by a machine hook later.
|
|
|
|
|
impl<Tag> Allocation<Tag> {
|
2018-11-14 16:00:52 +01:00
|
|
|
/// Creates a read-only allocation initialized by the given bytes
|
2019-05-28 10:44:46 +02:00
|
|
|
pub fn from_bytes<'a>(slice: impl Into<Cow<'a, [u8]>>, align: Align) -> Self {
|
2019-04-22 13:53:52 +02:00
|
|
|
let bytes = slice.into().into_owned();
|
|
|
|
|
let undef_mask = UndefMask::new(Size::from_bytes(bytes.len() as u64), true);
|
2018-11-14 16:00:52 +01:00
|
|
|
Self {
|
2019-04-22 13:53:52 +02:00
|
|
|
bytes,
|
2018-11-14 16:00:52 +01:00
|
|
|
relocations: Relocations::new(),
|
|
|
|
|
undef_mask,
|
|
|
|
|
align,
|
|
|
|
|
mutability: Mutability::Immutable,
|
2019-05-28 10:44:46 +02:00
|
|
|
extra: (),
|
2018-11-14 16:00:52 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-05-28 10:44:46 +02:00
|
|
|
pub fn from_byte_aligned_bytes<'a>(slice: impl Into<Cow<'a, [u8]>>) -> Self {
|
|
|
|
|
Allocation::from_bytes(slice, Align::from_bytes(1).unwrap())
|
2018-11-14 16:00:52 +01:00
|
|
|
}
|
|
|
|
|
|
2019-05-28 10:44:46 +02:00
|
|
|
pub fn undef(size: Size, align: Align) -> Self {
|
2018-11-14 16:00:52 +01:00
|
|
|
assert_eq!(size.bytes() as usize as u64, size.bytes());
|
|
|
|
|
Allocation {
|
|
|
|
|
bytes: vec![0; size.bytes() as usize],
|
|
|
|
|
relocations: Relocations::new(),
|
2019-02-20 15:07:25 +01:00
|
|
|
undef_mask: UndefMask::new(size, false),
|
2018-11-14 16:00:52 +01:00
|
|
|
align,
|
|
|
|
|
mutability: Mutability::Mutable,
|
2019-05-28 10:44:46 +02:00
|
|
|
extra: (),
|
2018-11-14 16:00:52 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-07-23 18:50:47 +03:00
|
|
|
impl<'tcx> rustc_serialize::UseSpecializedDecodable for &'tcx Allocation {}
|
2018-11-14 16:00:52 +01:00
|
|
|
|
2019-06-23 14:26:36 +02:00
|
|
|
/// Byte accessors
|
|
|
|
|
impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
|
|
|
|
|
/// Just a small local helper function to avoid a bit of code repetition.
|
|
|
|
|
/// Returns the range of this allocation that was meant.
|
|
|
|
|
#[inline]
|
|
|
|
|
fn check_bounds(
|
2018-11-12 08:37:54 +01:00
|
|
|
&self,
|
2019-06-23 14:26:36 +02:00
|
|
|
offset: Size,
|
|
|
|
|
size: Size
|
|
|
|
|
) -> Range<usize> {
|
|
|
|
|
let end = offset + size; // this does overflow checking
|
|
|
|
|
assert_eq!(
|
|
|
|
|
end.bytes() as usize as u64, end.bytes(),
|
|
|
|
|
"cannot handle this access on this host architecture"
|
|
|
|
|
);
|
|
|
|
|
let end = end.bytes() as usize;
|
|
|
|
|
assert!(
|
|
|
|
|
end <= self.bytes.len(),
|
|
|
|
|
"Out-of-bounds access at offset {}, size {} in allocation of size {}",
|
|
|
|
|
offset.bytes(), size.bytes(), self.bytes.len()
|
|
|
|
|
);
|
|
|
|
|
(offset.bytes() as usize)..end
|
2018-11-12 08:37:54 +01:00
|
|
|
}
|
|
|
|
|
|
2018-11-13 14:55:18 +01:00
|
|
|
/// The last argument controls whether we error out when there are undefined
|
2019-02-08 14:53:55 +01:00
|
|
|
/// or pointer bytes. You should never call this, call `get_bytes` or
|
2018-11-13 14:55:18 +01:00
|
|
|
/// `get_bytes_with_undef_and_ptr` instead,
|
|
|
|
|
///
|
|
|
|
|
/// This function also guarantees that the resulting pointer will remain stable
|
|
|
|
|
/// even when new allocations are pushed to the `HashMap`. `copy_repeatedly` relies
|
|
|
|
|
/// on that.
|
2019-06-23 14:26:36 +02:00
|
|
|
///
|
2019-06-24 14:28:16 +02:00
|
|
|
/// It is the caller's responsibility to check bounds and alignment beforehand.
|
2019-04-15 10:05:13 +02:00
|
|
|
fn get_bytes_internal(
|
2018-11-13 14:55:18 +01:00
|
|
|
&self,
|
|
|
|
|
cx: &impl HasDataLayout,
|
|
|
|
|
ptr: Pointer<Tag>,
|
|
|
|
|
size: Size,
|
|
|
|
|
check_defined_and_ptr: bool,
|
2019-06-07 18:56:27 +02:00
|
|
|
) -> InterpResult<'tcx, &[u8]>
|
2018-11-14 16:00:52 +01:00
|
|
|
{
|
2019-06-23 14:26:36 +02:00
|
|
|
let range = self.check_bounds(ptr.offset, size);
|
2018-11-13 14:55:18 +01:00
|
|
|
|
|
|
|
|
if check_defined_and_ptr {
|
|
|
|
|
self.check_defined(ptr, size)?;
|
|
|
|
|
self.check_relocations(cx, ptr, size)?;
|
|
|
|
|
} else {
|
|
|
|
|
// We still don't want relocations on the *edges*
|
|
|
|
|
self.check_relocation_edges(cx, ptr, size)?;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
AllocationExtra::memory_read(self, ptr, size)?;
|
|
|
|
|
|
2019-06-23 14:26:36 +02:00
|
|
|
Ok(&self.bytes[range])
|
2018-11-13 14:55:18 +01:00
|
|
|
}
|
|
|
|
|
|
2019-06-23 14:26:36 +02:00
|
|
|
/// Check that these bytes are initialized and not pointer bytes, and then return them
|
|
|
|
|
/// as a slice.
|
|
|
|
|
///
|
2019-06-24 14:28:16 +02:00
|
|
|
/// It is the caller's responsibility to check bounds and alignment beforehand.
|
2018-11-13 14:55:18 +01:00
|
|
|
#[inline]
|
2019-04-15 10:05:13 +02:00
|
|
|
pub fn get_bytes(
|
2018-11-13 14:55:18 +01:00
|
|
|
&self,
|
|
|
|
|
cx: &impl HasDataLayout,
|
|
|
|
|
ptr: Pointer<Tag>,
|
|
|
|
|
size: Size,
|
2019-06-07 18:56:27 +02:00
|
|
|
) -> InterpResult<'tcx, &[u8]>
|
2018-11-14 16:00:52 +01:00
|
|
|
{
|
2019-06-23 14:26:36 +02:00
|
|
|
self.get_bytes_internal(cx, ptr, size, true)
|
2018-11-13 14:55:18 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// It is the caller's responsibility to handle undefined and pointer bytes.
|
|
|
|
|
/// However, this still checks that there are no relocations on the *edges*.
|
2019-06-23 14:26:36 +02:00
|
|
|
///
|
2019-06-24 14:28:16 +02:00
|
|
|
/// It is the caller's responsibility to check bounds and alignment beforehand.
|
2018-11-13 14:55:18 +01:00
|
|
|
#[inline]
|
2019-04-15 10:05:13 +02:00
|
|
|
pub fn get_bytes_with_undef_and_ptr(
|
2018-11-13 14:55:18 +01:00
|
|
|
&self,
|
|
|
|
|
cx: &impl HasDataLayout,
|
|
|
|
|
ptr: Pointer<Tag>,
|
|
|
|
|
size: Size,
|
2019-06-07 18:56:27 +02:00
|
|
|
) -> InterpResult<'tcx, &[u8]>
|
2018-11-14 16:00:52 +01:00
|
|
|
{
|
2019-06-23 14:26:36 +02:00
|
|
|
self.get_bytes_internal(cx, ptr, size, false)
|
2018-11-13 14:55:18 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Just calling this already marks everything as defined and removes relocations,
|
|
|
|
|
/// so be sure to actually put data there!
|
2019-06-23 14:26:36 +02:00
|
|
|
///
|
2019-06-24 14:28:16 +02:00
|
|
|
/// It is the caller's responsibility to check bounds and alignment beforehand.
|
2019-04-15 10:05:13 +02:00
|
|
|
pub fn get_bytes_mut(
|
2018-11-13 14:55:18 +01:00
|
|
|
&mut self,
|
|
|
|
|
cx: &impl HasDataLayout,
|
|
|
|
|
ptr: Pointer<Tag>,
|
|
|
|
|
size: Size,
|
2019-06-07 18:56:27 +02:00
|
|
|
) -> InterpResult<'tcx, &mut [u8]>
|
2018-11-14 16:00:52 +01:00
|
|
|
{
|
2019-06-23 14:26:36 +02:00
|
|
|
let range = self.check_bounds(ptr.offset, size);
|
2018-11-13 14:55:18 +01:00
|
|
|
|
2019-06-11 10:56:41 +02:00
|
|
|
self.mark_definedness(ptr, size, true);
|
2018-11-13 14:55:18 +01:00
|
|
|
self.clear_relocations(cx, ptr, size)?;
|
|
|
|
|
|
|
|
|
|
AllocationExtra::memory_written(self, ptr, size)?;
|
|
|
|
|
|
2019-06-23 14:26:36 +02:00
|
|
|
Ok(&mut self.bytes[range])
|
2018-11-13 14:55:18 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-11-12 09:00:41 +01:00
|
|
|
/// Reading and writing
|
2019-04-15 10:05:13 +02:00
|
|
|
impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
|
2018-11-16 16:25:15 +01:00
|
|
|
/// Reads bytes until a `0` is encountered. Will error if the end of the allocation is reached
|
|
|
|
|
/// before a `0` is found.
|
2019-04-15 10:05:13 +02:00
|
|
|
pub fn read_c_str(
|
2018-11-12 13:26:53 +01:00
|
|
|
&self,
|
|
|
|
|
cx: &impl HasDataLayout,
|
|
|
|
|
ptr: Pointer<Tag>,
|
2019-06-07 18:56:27 +02:00
|
|
|
) -> InterpResult<'tcx, &[u8]>
|
2018-11-14 16:00:52 +01:00
|
|
|
{
|
2018-11-12 09:00:41 +01:00
|
|
|
assert_eq!(ptr.offset.bytes() as usize as u64, ptr.offset.bytes());
|
|
|
|
|
let offset = ptr.offset.bytes() as usize;
|
2018-11-12 13:26:53 +01:00
|
|
|
match self.bytes[offset..].iter().position(|&c| c == 0) {
|
2018-11-12 09:00:41 +01:00
|
|
|
Some(size) => {
|
2018-11-25 12:07:20 +01:00
|
|
|
let size_with_null = Size::from_bytes((size + 1) as u64);
|
|
|
|
|
// Go through `get_bytes` for checks and AllocationExtra hooks.
|
2018-11-25 14:21:34 +01:00
|
|
|
// We read the null, so we include it in the request, but we want it removed
|
2019-06-23 14:26:36 +02:00
|
|
|
// from the result, so we do subslicing.
|
2018-11-25 12:07:20 +01:00
|
|
|
Ok(&self.get_bytes(cx, ptr, size_with_null)?[..size])
|
2018-11-12 09:00:41 +01:00
|
|
|
}
|
2019-06-23 14:26:36 +02:00
|
|
|
// This includes the case where `offset` is out-of-bounds to begin with.
|
2019-07-30 16:08:32 +05:30
|
|
|
None => throw_err_unsup!(UnterminatedCString(ptr.erase_tag())),
|
2018-11-12 09:00:41 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-11-16 16:25:15 +01:00
|
|
|
/// Validates that `ptr.offset` and `ptr.offset + size` do not point to the middle of a
|
|
|
|
|
/// relocation. If `allow_ptr_and_undef` is `false`, also enforces that the memory in the
|
|
|
|
|
/// given range contains neither relocations nor undef bytes.
|
2019-04-15 10:05:13 +02:00
|
|
|
pub fn check_bytes(
|
2018-11-12 09:00:41 +01:00
|
|
|
&self,
|
2018-11-12 13:26:53 +01:00
|
|
|
cx: &impl HasDataLayout,
|
|
|
|
|
ptr: Pointer<Tag>,
|
2018-11-12 09:00:41 +01:00
|
|
|
size: Size,
|
|
|
|
|
allow_ptr_and_undef: bool,
|
2019-06-07 18:56:27 +02:00
|
|
|
) -> InterpResult<'tcx>
|
2018-11-14 16:00:52 +01:00
|
|
|
{
|
2018-11-16 16:25:15 +01:00
|
|
|
// Check bounds and relocations on the edges
|
2018-11-15 14:43:58 +01:00
|
|
|
self.get_bytes_with_undef_and_ptr(cx, ptr, size)?;
|
2018-11-12 09:00:41 +01:00
|
|
|
// Check undef and ptr
|
|
|
|
|
if !allow_ptr_and_undef {
|
|
|
|
|
self.check_defined(ptr, size)?;
|
2018-11-12 13:26:53 +01:00
|
|
|
self.check_relocations(cx, ptr, size)?;
|
2018-11-12 09:00:41 +01:00
|
|
|
}
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
2018-11-16 16:25:15 +01:00
|
|
|
/// Writes `src` to the memory starting at `ptr.offset`.
|
|
|
|
|
///
|
2019-06-24 14:28:16 +02:00
|
|
|
/// It is the caller's responsibility to check bounds and alignment beforehand.
|
2019-04-15 10:05:13 +02:00
|
|
|
pub fn write_bytes(
|
2018-11-12 13:26:53 +01:00
|
|
|
&mut self,
|
|
|
|
|
cx: &impl HasDataLayout,
|
|
|
|
|
ptr: Pointer<Tag>,
|
|
|
|
|
src: &[u8],
|
2019-06-07 18:56:27 +02:00
|
|
|
) -> InterpResult<'tcx>
|
2018-11-14 16:00:52 +01:00
|
|
|
{
|
2018-11-15 14:43:58 +01:00
|
|
|
let bytes = self.get_bytes_mut(cx, ptr, Size::from_bytes(src.len() as u64))?;
|
2018-11-12 09:00:41 +01:00
|
|
|
bytes.clone_from_slice(src);
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
2018-11-16 16:25:15 +01:00
|
|
|
/// Sets `count` bytes starting at `ptr.offset` with `val`. Basically `memset`.
|
2019-06-23 14:26:36 +02:00
|
|
|
///
|
2019-06-24 14:28:16 +02:00
|
|
|
/// It is the caller's responsibility to check bounds and alignment beforehand.
|
2019-04-15 10:05:13 +02:00
|
|
|
pub fn write_repeat(
|
2018-11-12 09:00:41 +01:00
|
|
|
&mut self,
|
2018-11-12 13:26:53 +01:00
|
|
|
cx: &impl HasDataLayout,
|
|
|
|
|
ptr: Pointer<Tag>,
|
2018-11-12 09:00:41 +01:00
|
|
|
val: u8,
|
|
|
|
|
count: Size
|
2019-06-07 18:56:27 +02:00
|
|
|
) -> InterpResult<'tcx>
|
2018-11-14 16:00:52 +01:00
|
|
|
{
|
2018-11-15 14:43:58 +01:00
|
|
|
let bytes = self.get_bytes_mut(cx, ptr, count)?;
|
2018-11-12 09:00:41 +01:00
|
|
|
for b in bytes {
|
|
|
|
|
*b = val;
|
|
|
|
|
}
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Read a *non-ZST* scalar
|
2018-11-15 14:48:34 +01:00
|
|
|
///
|
|
|
|
|
/// zsts can't be read out of two reasons:
|
|
|
|
|
/// * byteorder cannot work with zero element buffers
|
|
|
|
|
/// * in oder to obtain a `Pointer` we need to check for ZSTness anyway due to integer pointers
|
|
|
|
|
/// being valid for ZSTs
|
2018-11-16 16:25:15 +01:00
|
|
|
///
|
2019-06-24 14:28:16 +02:00
|
|
|
/// It is the caller's responsibility to check bounds and alignment beforehand.
|
2019-04-15 10:05:13 +02:00
|
|
|
pub fn read_scalar(
|
2018-11-12 09:00:41 +01:00
|
|
|
&self,
|
2018-11-12 13:26:53 +01:00
|
|
|
cx: &impl HasDataLayout,
|
|
|
|
|
ptr: Pointer<Tag>,
|
2018-11-12 09:00:41 +01:00
|
|
|
size: Size
|
2019-06-07 18:56:27 +02:00
|
|
|
) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>>
|
2018-11-14 16:00:52 +01:00
|
|
|
{
|
2018-11-15 14:43:58 +01:00
|
|
|
// get_bytes_unchecked tests relocation edges
|
|
|
|
|
let bytes = self.get_bytes_with_undef_and_ptr(cx, ptr, size)?;
|
2018-11-12 09:00:41 +01:00
|
|
|
// Undef check happens *after* we established that the alignment is correct.
|
|
|
|
|
// We must not return Ok() for unaligned pointers!
|
|
|
|
|
if self.check_defined(ptr, size).is_err() {
|
|
|
|
|
// this inflates undefined bytes to the entire scalar, even if only a few
|
|
|
|
|
// bytes are undefined
|
|
|
|
|
return Ok(ScalarMaybeUndef::Undef);
|
|
|
|
|
}
|
|
|
|
|
// Now we do the actual reading
|
2018-11-12 13:26:53 +01:00
|
|
|
let bits = read_target_uint(cx.data_layout().endian, bytes).unwrap();
|
2018-11-12 09:00:41 +01:00
|
|
|
// See if we got a pointer
|
2018-11-12 13:26:53 +01:00
|
|
|
if size != cx.data_layout().pointer_size {
|
2018-11-12 09:00:41 +01:00
|
|
|
// *Now* better make sure that the inside also is free of relocations.
|
2018-11-12 13:26:53 +01:00
|
|
|
self.check_relocations(cx, ptr, size)?;
|
2018-11-12 09:00:41 +01:00
|
|
|
} else {
|
2018-11-12 13:26:53 +01:00
|
|
|
match self.relocations.get(&ptr.offset) {
|
2018-11-12 09:00:41 +01:00
|
|
|
Some(&(tag, alloc_id)) => {
|
|
|
|
|
let ptr = Pointer::new_with_tag(alloc_id, Size::from_bytes(bits as u64), tag);
|
|
|
|
|
return Ok(ScalarMaybeUndef::Scalar(ptr.into()))
|
|
|
|
|
}
|
|
|
|
|
None => {},
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
// We don't. Just return the bits.
|
|
|
|
|
Ok(ScalarMaybeUndef::Scalar(Scalar::from_uint(bits, size)))
|
|
|
|
|
}
|
|
|
|
|
|
2019-06-23 14:26:36 +02:00
|
|
|
/// Read a pointer-sized scalar.
|
|
|
|
|
///
|
2019-06-24 14:28:16 +02:00
|
|
|
/// It is the caller's responsibility to check bounds and alignment beforehand.
|
2019-04-15 10:05:13 +02:00
|
|
|
pub fn read_ptr_sized(
|
2018-11-12 09:00:41 +01:00
|
|
|
&self,
|
2018-11-12 13:26:53 +01:00
|
|
|
cx: &impl HasDataLayout,
|
|
|
|
|
ptr: Pointer<Tag>,
|
2019-06-07 18:56:27 +02:00
|
|
|
) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>>
|
2018-11-14 16:00:52 +01:00
|
|
|
{
|
2018-11-15 14:43:58 +01:00
|
|
|
self.read_scalar(cx, ptr, cx.data_layout().pointer_size)
|
2018-11-12 09:00:41 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Write a *non-ZST* scalar
|
2018-11-15 14:48:34 +01:00
|
|
|
///
|
|
|
|
|
/// zsts can't be read out of two reasons:
|
|
|
|
|
/// * byteorder cannot work with zero element buffers
|
|
|
|
|
/// * in oder to obtain a `Pointer` we need to check for ZSTness anyway due to integer pointers
|
|
|
|
|
/// being valid for ZSTs
|
2018-11-16 16:25:15 +01:00
|
|
|
///
|
2019-06-24 14:28:16 +02:00
|
|
|
/// It is the caller's responsibility to check bounds and alignment beforehand.
|
2019-04-15 10:05:13 +02:00
|
|
|
pub fn write_scalar(
|
2018-11-12 09:00:41 +01:00
|
|
|
&mut self,
|
2018-11-12 13:26:53 +01:00
|
|
|
cx: &impl HasDataLayout,
|
|
|
|
|
ptr: Pointer<Tag>,
|
|
|
|
|
val: ScalarMaybeUndef<Tag>,
|
2018-11-12 09:00:41 +01:00
|
|
|
type_size: Size,
|
2019-06-07 18:56:27 +02:00
|
|
|
) -> InterpResult<'tcx>
|
2018-11-14 16:00:52 +01:00
|
|
|
{
|
2018-11-12 09:00:41 +01:00
|
|
|
let val = match val {
|
|
|
|
|
ScalarMaybeUndef::Scalar(scalar) => scalar,
|
2019-06-11 10:56:41 +02:00
|
|
|
ScalarMaybeUndef::Undef => {
|
|
|
|
|
self.mark_definedness(ptr, type_size, false);
|
|
|
|
|
return Ok(());
|
|
|
|
|
},
|
2018-11-12 09:00:41 +01:00
|
|
|
};
|
|
|
|
|
|
2019-05-26 14:13:12 +02:00
|
|
|
let bytes = match val.to_bits_or_ptr(type_size, cx) {
|
|
|
|
|
Err(val) => val.offset.bytes() as u128,
|
|
|
|
|
Ok(data) => data,
|
2018-11-12 09:00:41 +01:00
|
|
|
};
|
|
|
|
|
|
2018-11-25 11:23:21 +01:00
|
|
|
let endian = cx.data_layout().endian;
|
|
|
|
|
let dst = self.get_bytes_mut(cx, ptr, type_size)?;
|
|
|
|
|
write_target_uint(endian, dst, bytes).unwrap();
|
2018-11-12 09:00:41 +01:00
|
|
|
|
|
|
|
|
// See if we have to also write a relocation
|
|
|
|
|
match val {
|
|
|
|
|
Scalar::Ptr(val) => {
|
2018-11-12 13:26:53 +01:00
|
|
|
self.relocations.insert(
|
2018-11-12 09:00:41 +01:00
|
|
|
ptr.offset,
|
|
|
|
|
(val.tag, val.alloc_id),
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
_ => {}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
2019-06-23 14:26:36 +02:00
|
|
|
/// Write a pointer-sized scalar.
|
|
|
|
|
///
|
2019-06-24 14:28:16 +02:00
|
|
|
/// It is the caller's responsibility to check bounds and alignment beforehand.
|
2019-04-15 10:05:13 +02:00
|
|
|
pub fn write_ptr_sized(
|
2018-11-12 09:00:41 +01:00
|
|
|
&mut self,
|
2018-11-12 13:26:53 +01:00
|
|
|
cx: &impl HasDataLayout,
|
|
|
|
|
ptr: Pointer<Tag>,
|
|
|
|
|
val: ScalarMaybeUndef<Tag>
|
2019-06-07 18:56:27 +02:00
|
|
|
) -> InterpResult<'tcx>
|
2018-11-14 16:00:52 +01:00
|
|
|
{
|
2018-11-12 13:26:53 +01:00
|
|
|
let ptr_size = cx.data_layout().pointer_size;
|
2018-11-15 14:43:58 +01:00
|
|
|
self.write_scalar(cx, ptr.into(), val, ptr_size)
|
2018-11-12 09:00:41 +01:00
|
|
|
}
|
2018-11-13 14:32:39 +01:00
|
|
|
}
|
2018-11-12 09:00:41 +01:00
|
|
|
|
2018-11-12 08:34:04 +01:00
|
|
|
/// Relocations
|
2018-11-12 08:56:41 +01:00
|
|
|
impl<'tcx, Tag: Copy, Extra> Allocation<Tag, Extra> {
|
2019-02-08 14:53:55 +01:00
|
|
|
/// Returns all relocations overlapping with the given ptr-offset pair.
|
2018-11-12 13:26:53 +01:00
|
|
|
pub fn relocations(
|
2018-11-12 08:34:04 +01:00
|
|
|
&self,
|
2018-11-12 08:56:41 +01:00
|
|
|
cx: &impl HasDataLayout,
|
2018-11-12 08:38:35 +01:00
|
|
|
ptr: Pointer<Tag>,
|
2018-11-12 08:34:04 +01:00
|
|
|
size: Size,
|
2018-11-13 09:44:59 +01:00
|
|
|
) -> &[(Size, (Tag, AllocId))] {
|
2018-11-12 08:34:04 +01:00
|
|
|
// We have to go back `pointer_size - 1` bytes, as that one would still overlap with
|
|
|
|
|
// the beginning of this range.
|
2018-11-12 08:56:41 +01:00
|
|
|
let start = ptr.offset.bytes().saturating_sub(cx.data_layout().pointer_size.bytes() - 1);
|
2018-11-12 08:34:04 +01:00
|
|
|
let end = ptr.offset + size; // this does overflow checking
|
2018-11-13 09:44:59 +01:00
|
|
|
self.relocations.range(Size::from_bytes(start)..end)
|
2018-11-12 08:34:04 +01:00
|
|
|
}
|
|
|
|
|
|
2019-02-08 14:53:55 +01:00
|
|
|
/// Checks that there are no relocations overlapping with the given range.
|
2018-11-12 08:34:04 +01:00
|
|
|
#[inline(always)]
|
2018-11-12 08:56:41 +01:00
|
|
|
fn check_relocations(
|
|
|
|
|
&self,
|
|
|
|
|
cx: &impl HasDataLayout,
|
|
|
|
|
ptr: Pointer<Tag>,
|
|
|
|
|
size: Size,
|
2019-06-07 18:56:27 +02:00
|
|
|
) -> InterpResult<'tcx> {
|
2018-11-13 09:44:59 +01:00
|
|
|
if self.relocations(cx, ptr, size).is_empty() {
|
2018-11-12 08:34:04 +01:00
|
|
|
Ok(())
|
2018-11-13 09:44:59 +01:00
|
|
|
} else {
|
2019-07-30 16:08:32 +05:30
|
|
|
throw_err_unsup!(ReadPointerAsBytes)
|
2018-11-12 08:34:04 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-02-08 14:53:55 +01:00
|
|
|
/// Removes all relocations inside the given range.
|
2018-11-12 08:34:04 +01:00
|
|
|
/// If there are relocations overlapping with the edges, they
|
|
|
|
|
/// are removed as well *and* the bytes they cover are marked as
|
2019-02-08 14:53:55 +01:00
|
|
|
/// uninitialized. This is a somewhat odd "spooky action at a distance",
|
2018-11-12 08:34:04 +01:00
|
|
|
/// but it allows strictly more code to run than if we would just error
|
|
|
|
|
/// immediately in that case.
|
2018-11-12 08:56:41 +01:00
|
|
|
fn clear_relocations(
|
|
|
|
|
&mut self,
|
|
|
|
|
cx: &impl HasDataLayout,
|
|
|
|
|
ptr: Pointer<Tag>,
|
|
|
|
|
size: Size,
|
2019-06-07 18:56:27 +02:00
|
|
|
) -> InterpResult<'tcx> {
|
2018-11-12 08:34:04 +01:00
|
|
|
// Find the start and end of the given range and its outermost relocations.
|
|
|
|
|
let (first, last) = {
|
|
|
|
|
// Find all relocations overlapping the given range.
|
2018-11-13 09:44:59 +01:00
|
|
|
let relocations = self.relocations(cx, ptr, size);
|
2018-11-12 08:34:04 +01:00
|
|
|
if relocations.is_empty() {
|
|
|
|
|
return Ok(());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
(relocations.first().unwrap().0,
|
2018-11-12 08:56:41 +01:00
|
|
|
relocations.last().unwrap().0 + cx.data_layout().pointer_size)
|
2018-11-12 08:34:04 +01:00
|
|
|
};
|
|
|
|
|
let start = ptr.offset;
|
|
|
|
|
let end = start + size;
|
|
|
|
|
|
|
|
|
|
// Mark parts of the outermost relocations as undefined if they partially fall outside the
|
|
|
|
|
// given range.
|
|
|
|
|
if first < start {
|
2018-11-12 08:39:04 +01:00
|
|
|
self.undef_mask.set_range(first, start, false);
|
2018-11-12 08:34:04 +01:00
|
|
|
}
|
|
|
|
|
if last > end {
|
2018-11-12 08:39:04 +01:00
|
|
|
self.undef_mask.set_range(end, last, false);
|
2018-11-12 08:34:04 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Forget all the relocations.
|
2018-11-12 08:39:04 +01:00
|
|
|
self.relocations.remove_range(first..last);
|
2018-11-12 08:34:04 +01:00
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Error if there are relocations overlapping with the edges of the
|
|
|
|
|
/// given memory range.
|
|
|
|
|
#[inline]
|
2018-11-12 08:56:41 +01:00
|
|
|
fn check_relocation_edges(
|
|
|
|
|
&self,
|
|
|
|
|
cx: &impl HasDataLayout,
|
|
|
|
|
ptr: Pointer<Tag>,
|
|
|
|
|
size: Size,
|
2019-06-07 18:56:27 +02:00
|
|
|
) -> InterpResult<'tcx> {
|
2018-11-12 08:56:41 +01:00
|
|
|
self.check_relocations(cx, ptr, Size::ZERO)?;
|
|
|
|
|
self.check_relocations(cx, ptr.offset(size, cx)?, Size::ZERO)?;
|
2018-11-12 08:34:04 +01:00
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-11-12 08:35:32 +01:00
|
|
|
|
|
|
|
|
/// Undefined bytes
|
|
|
|
|
impl<'tcx, Tag, Extra> Allocation<Tag, Extra> {
|
|
|
|
|
/// Checks that a range of bytes is defined. If not, returns the `ReadUndefBytes`
|
|
|
|
|
/// error which will report the first byte which is undefined.
|
|
|
|
|
#[inline]
|
2019-06-07 18:56:27 +02:00
|
|
|
fn check_defined(&self, ptr: Pointer<Tag>, size: Size) -> InterpResult<'tcx> {
|
2018-11-12 08:39:04 +01:00
|
|
|
self.undef_mask.is_range_defined(
|
2018-11-12 08:35:32 +01:00
|
|
|
ptr.offset,
|
|
|
|
|
ptr.offset + size,
|
2019-07-30 16:08:32 +05:30
|
|
|
).or_else(|idx| throw_err_unsup!(ReadUndefBytes(idx)))
|
2018-11-12 08:35:32 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pub fn mark_definedness(
|
|
|
|
|
&mut self,
|
2018-11-12 08:38:35 +01:00
|
|
|
ptr: Pointer<Tag>,
|
2018-11-12 08:35:32 +01:00
|
|
|
size: Size,
|
|
|
|
|
new_state: bool,
|
2019-06-11 10:56:41 +02:00
|
|
|
) {
|
2018-11-12 08:35:32 +01:00
|
|
|
if size.bytes() == 0 {
|
2019-06-11 10:56:41 +02:00
|
|
|
return;
|
2018-11-12 08:35:32 +01:00
|
|
|
}
|
2018-11-12 08:39:04 +01:00
|
|
|
self.undef_mask.set_range(
|
2018-11-12 08:35:32 +01:00
|
|
|
ptr.offset,
|
|
|
|
|
ptr.offset + size,
|
|
|
|
|
new_state,
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-11-14 16:00:52 +01:00
|
|
|
/// Relocations
|
2018-10-25 16:09:42 +02:00
|
|
|
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)]
|
|
|
|
|
pub struct Relocations<Tag=(), Id=AllocId>(SortedMap<Size, (Tag, Id)>);
|
|
|
|
|
|
|
|
|
|
impl<Tag, Id> Relocations<Tag, Id> {
|
|
|
|
|
pub fn new() -> Self {
|
|
|
|
|
Relocations(SortedMap::new())
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// The caller must guarantee that the given relocations are already sorted
|
|
|
|
|
// by address and contain no duplicates.
|
|
|
|
|
pub fn from_presorted(r: Vec<(Size, (Tag, Id))>) -> Self {
|
|
|
|
|
Relocations(SortedMap::from_presorted_elements(r))
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl<Tag> Deref for Relocations<Tag> {
|
|
|
|
|
type Target = SortedMap<Size, (Tag, AllocId)>;
|
|
|
|
|
|
|
|
|
|
fn deref(&self) -> &Self::Target {
|
|
|
|
|
&self.0
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl<Tag> DerefMut for Relocations<Tag> {
|
|
|
|
|
fn deref_mut(&mut self) -> &mut Self::Target {
|
|
|
|
|
&mut self.0
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////////////////
|
|
|
|
|
// Undefined byte tracking
|
|
|
|
|
////////////////////////////////////////////////////////////////////////////////
|
|
|
|
|
|
|
|
|
|
type Block = u64;
|
|
|
|
|
|
2019-03-12 14:43:49 +01:00
|
|
|
/// A bitmask where each bit refers to the byte with the same index. If the bit is `true`, the byte
|
|
|
|
|
/// is defined. If it is `false` the byte is undefined.
|
2018-10-25 16:09:42 +02:00
|
|
|
#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)]
|
|
|
|
|
pub struct UndefMask {
|
|
|
|
|
blocks: Vec<Block>,
|
|
|
|
|
len: Size,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl_stable_hash_for!(struct mir::interpret::UndefMask{blocks, len});
|
|
|
|
|
|
|
|
|
|
impl UndefMask {
|
2019-02-18 10:54:16 +01:00
|
|
|
pub const BLOCK_SIZE: u64 = 64;
|
|
|
|
|
|
2019-02-20 15:07:25 +01:00
|
|
|
pub fn new(size: Size, state: bool) -> Self {
|
2018-10-25 16:09:42 +02:00
|
|
|
let mut m = UndefMask {
|
|
|
|
|
blocks: vec![],
|
|
|
|
|
len: Size::ZERO,
|
|
|
|
|
};
|
2019-02-20 15:07:25 +01:00
|
|
|
m.grow(size, state);
|
2018-10-25 16:09:42 +02:00
|
|
|
m
|
|
|
|
|
}
|
|
|
|
|
|
2019-02-08 14:53:55 +01:00
|
|
|
/// Checks whether the range `start..end` (end-exclusive) is entirely defined.
|
2018-10-25 16:09:42 +02:00
|
|
|
///
|
|
|
|
|
/// Returns `Ok(())` if it's defined. Otherwise returns the index of the byte
|
|
|
|
|
/// at which the first undefined access begins.
|
|
|
|
|
#[inline]
|
|
|
|
|
pub fn is_range_defined(&self, start: Size, end: Size) -> Result<(), Size> {
|
|
|
|
|
if end > self.len {
|
|
|
|
|
return Err(self.len);
|
|
|
|
|
}
|
|
|
|
|
|
2019-02-18 10:54:16 +01:00
|
|
|
// FIXME(oli-obk): optimize this for allocations larger than a block.
|
2018-10-25 16:09:42 +02:00
|
|
|
let idx = (start.bytes()..end.bytes())
|
|
|
|
|
.map(|i| Size::from_bytes(i))
|
|
|
|
|
.find(|&i| !self.get(i));
|
|
|
|
|
|
|
|
|
|
match idx {
|
|
|
|
|
Some(idx) => Err(idx),
|
|
|
|
|
None => Ok(())
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pub fn set_range(&mut self, start: Size, end: Size, new_state: bool) {
|
|
|
|
|
let len = self.len;
|
|
|
|
|
if end > len {
|
|
|
|
|
self.grow(end - len, new_state);
|
|
|
|
|
}
|
|
|
|
|
self.set_range_inbounds(start, end, new_state);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pub fn set_range_inbounds(&mut self, start: Size, end: Size, new_state: bool) {
|
2019-02-18 10:54:16 +01:00
|
|
|
let (blocka, bita) = bit_index(start);
|
|
|
|
|
let (blockb, bitb) = bit_index(end);
|
|
|
|
|
if blocka == blockb {
|
2019-02-20 15:07:25 +01:00
|
|
|
// first set all bits but the first `bita`
|
|
|
|
|
// then unset the last `64 - bitb` bits
|
|
|
|
|
let range = if bitb == 0 {
|
|
|
|
|
u64::max_value() << bita
|
|
|
|
|
} else {
|
|
|
|
|
(u64::max_value() << bita) & (u64::max_value() >> (64 - bitb))
|
|
|
|
|
};
|
|
|
|
|
if new_state {
|
|
|
|
|
self.blocks[blocka] |= range;
|
|
|
|
|
} else {
|
|
|
|
|
self.blocks[blocka] &= !range;
|
2019-02-18 10:54:16 +01:00
|
|
|
}
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
// across block boundaries
|
|
|
|
|
if new_state {
|
2019-02-20 15:07:25 +01:00
|
|
|
// set bita..64 to 1
|
|
|
|
|
self.blocks[blocka] |= u64::max_value() << bita;
|
|
|
|
|
// set 0..bitb to 1
|
|
|
|
|
if bitb != 0 {
|
|
|
|
|
self.blocks[blockb] |= u64::max_value() >> (64 - bitb);
|
|
|
|
|
}
|
|
|
|
|
// fill in all the other blocks (much faster than one bit at a time)
|
2019-02-18 10:54:16 +01:00
|
|
|
for block in (blocka + 1) .. blockb {
|
2019-02-18 13:55:55 +01:00
|
|
|
self.blocks[block] = u64::max_value();
|
2019-02-18 10:54:16 +01:00
|
|
|
}
|
|
|
|
|
} else {
|
2019-02-20 15:07:25 +01:00
|
|
|
// set bita..64 to 0
|
|
|
|
|
self.blocks[blocka] &= !(u64::max_value() << bita);
|
|
|
|
|
// set 0..bitb to 0
|
|
|
|
|
if bitb != 0 {
|
|
|
|
|
self.blocks[blockb] &= !(u64::max_value() >> (64 - bitb));
|
|
|
|
|
}
|
|
|
|
|
// fill in all the other blocks (much faster than one bit at a time)
|
2019-02-18 10:54:16 +01:00
|
|
|
for block in (blocka + 1) .. blockb {
|
|
|
|
|
self.blocks[block] = 0;
|
|
|
|
|
}
|
2018-10-25 16:09:42 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
pub fn get(&self, i: Size) -> bool {
|
|
|
|
|
let (block, bit) = bit_index(i);
|
2019-02-20 15:07:25 +01:00
|
|
|
(self.blocks[block] & (1 << bit)) != 0
|
2018-10-25 16:09:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
pub fn set(&mut self, i: Size, new_state: bool) {
|
|
|
|
|
let (block, bit) = bit_index(i);
|
2019-02-18 10:54:16 +01:00
|
|
|
self.set_bit(block, bit, new_state);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn set_bit(&mut self, block: usize, bit: usize, new_state: bool) {
|
2018-10-25 16:09:42 +02:00
|
|
|
if new_state {
|
|
|
|
|
self.blocks[block] |= 1 << bit;
|
|
|
|
|
} else {
|
|
|
|
|
self.blocks[block] &= !(1 << bit);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pub fn grow(&mut self, amount: Size, new_state: bool) {
|
2019-02-20 15:07:25 +01:00
|
|
|
if amount.bytes() == 0 {
|
|
|
|
|
return;
|
|
|
|
|
}
|
2019-02-18 10:54:16 +01:00
|
|
|
let unused_trailing_bits = self.blocks.len() as u64 * Self::BLOCK_SIZE - self.len.bytes();
|
2018-10-25 16:09:42 +02:00
|
|
|
if amount.bytes() > unused_trailing_bits {
|
2019-02-18 10:54:16 +01:00
|
|
|
let additional_blocks = amount.bytes() / Self::BLOCK_SIZE + 1;
|
2018-10-25 16:09:42 +02:00
|
|
|
assert_eq!(additional_blocks as usize as u64, additional_blocks);
|
|
|
|
|
self.blocks.extend(
|
2019-02-18 10:54:16 +01:00
|
|
|
// FIXME(oli-obk): optimize this by repeating `new_state as Block`
|
2018-10-25 16:09:42 +02:00
|
|
|
iter::repeat(0).take(additional_blocks as usize),
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
let start = self.len;
|
|
|
|
|
self.len += amount;
|
|
|
|
|
self.set_range_inbounds(start, start + amount, new_state);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn bit_index(bits: Size) -> (usize, usize) {
|
|
|
|
|
let bits = bits.bytes();
|
2019-02-18 10:54:16 +01:00
|
|
|
let a = bits / UndefMask::BLOCK_SIZE;
|
|
|
|
|
let b = bits % UndefMask::BLOCK_SIZE;
|
2018-10-25 16:09:42 +02:00
|
|
|
assert_eq!(a as usize as u64, a);
|
|
|
|
|
assert_eq!(b as usize as u64, b);
|
|
|
|
|
(a as usize, b as usize)
|
|
|
|
|
}
|