2019-02-08 14:53:55 +01:00
|
|
|
//! The virtual memory representation of the MIR interpreter.
|
2018-10-23 17:54:20 +02:00
|
|
|
|
2020-03-21 13:49:02 +01:00
|
|
|
use std::borrow::Cow;
|
|
|
|
|
use std::convert::TryFrom;
|
|
|
|
|
use std::iter;
|
2020-03-24 16:43:50 +01:00
|
|
|
use std::ops::{Deref, DerefMut, Range};
|
2021-05-16 18:53:20 +02:00
|
|
|
use std::ptr;
|
2020-03-21 13:49:02 +01:00
|
|
|
|
2020-04-27 23:26:11 +05:30
|
|
|
use rustc_ast::Mutability;
|
2020-03-21 13:49:02 +01:00
|
|
|
use rustc_data_structures::sorted_map::SortedMap;
|
2020-03-31 18:16:47 +02:00
|
|
|
use rustc_target::abi::{Align, HasDataLayout, Size};
|
2020-03-21 13:49:02 +01:00
|
|
|
|
2018-11-12 13:26:53 +01:00
|
|
|
use super::{
|
2021-06-12 19:49:48 -04:00
|
|
|
read_target_uint, write_target_uint, AllocId, InterpError, InterpResult, Pointer,
|
|
|
|
|
ResourceExhaustionInfo, Scalar, ScalarMaybeUninit, UndefinedBehaviorInfo, UninitBytesAccess,
|
|
|
|
|
UnsupportedOpInfo,
|
2018-11-12 13:26:53 +01:00
|
|
|
};
|
2018-11-04 15:14:54 +01:00
|
|
|
|
2021-05-16 18:53:20 +02:00
|
|
|
/// This type represents an Allocation in the Miri/CTFE core engine.
|
|
|
|
|
///
|
|
|
|
|
/// Its public API is rather low-level, working directly with allocation offsets and a custom error
|
|
|
|
|
/// type to account for the lack of an AllocId on this level. The Miri/CTFE core engine `memory`
|
|
|
|
|
/// module provides higher-level access.
|
2020-06-11 15:49:57 +01:00
|
|
|
#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, TyEncodable, TyDecodable)]
|
2020-03-23 14:48:59 +00:00
|
|
|
#[derive(HashStable)]
|
2019-12-22 17:42:04 -05:00
|
|
|
pub struct Allocation<Tag = (), Extra = ()> {
|
2018-10-23 17:54:20 +02:00
|
|
|
/// The actual bytes of the allocation.
|
2019-07-13 04:00:06 +02:00
|
|
|
/// Note that the bytes of a pointer represent the offset of the pointer.
|
|
|
|
|
bytes: Vec<u8>,
|
2018-10-23 17:54:20 +02:00
|
|
|
/// Maps from byte addresses to extra data for each pointer.
|
|
|
|
|
/// Only the first byte of a pointer is inserted into the map; i.e.,
|
|
|
|
|
/// every entry in this map applies to `pointer_size` consecutive bytes starting
|
|
|
|
|
/// at the given offset.
|
2019-08-29 18:02:51 +02:00
|
|
|
relocations: Relocations<Tag>,
|
2019-07-13 04:00:06 +02:00
|
|
|
/// Denotes which part of this allocation is initialized.
|
2020-04-22 03:20:40 -04:00
|
|
|
init_mask: InitMask,
|
2018-10-23 17:54:20 +02:00
|
|
|
/// The alignment of the allocation to detect unaligned reads.
|
2020-03-08 23:28:00 +01:00
|
|
|
/// (`Align` guarantees that this is a power of two.)
|
2018-09-09 01:16:45 +03:00
|
|
|
pub align: Align,
|
2019-09-06 03:57:44 +01:00
|
|
|
/// `true` if the allocation is mutable.
|
2018-10-23 17:54:20 +02:00
|
|
|
/// Also used by codegen to determine if a static should be put into mutable memory,
|
|
|
|
|
/// which happens for `static mut` and `static` with interior mutability.
|
|
|
|
|
pub mutability: Mutability,
|
|
|
|
|
/// Extra state for the machine.
|
|
|
|
|
pub extra: Extra,
|
|
|
|
|
}
|
|
|
|
|
|
2021-05-16 18:53:20 +02:00
|
|
|
/// We have our own error type that does not know about the `AllocId`; that information
|
|
|
|
|
/// is added when converting to `InterpError`.
|
|
|
|
|
#[derive(Debug)]
|
|
|
|
|
pub enum AllocError {
|
|
|
|
|
/// Encountered a pointer where we needed raw bytes.
|
|
|
|
|
ReadPointerAsBytes,
|
|
|
|
|
/// Using uninitialized data where it is not allowed.
|
|
|
|
|
InvalidUninitBytes(Option<UninitBytesAccess>),
|
|
|
|
|
}
|
|
|
|
|
pub type AllocResult<T = ()> = Result<T, AllocError>;
|
2018-11-14 16:00:52 +01:00
|
|
|
|
2021-05-16 18:53:20 +02:00
|
|
|
impl AllocError {
|
|
|
|
|
pub fn to_interp_error<'tcx>(self, alloc_id: AllocId) -> InterpError<'tcx> {
|
|
|
|
|
match self {
|
|
|
|
|
AllocError::ReadPointerAsBytes => {
|
|
|
|
|
InterpError::Unsupported(UnsupportedOpInfo::ReadPointerAsBytes)
|
|
|
|
|
}
|
|
|
|
|
AllocError::InvalidUninitBytes(info) => InterpError::UndefinedBehavior(
|
|
|
|
|
UndefinedBehaviorInfo::InvalidUninitBytes(info.map(|b| (alloc_id, b))),
|
|
|
|
|
),
|
|
|
|
|
}
|
2018-11-14 16:00:52 +01:00
|
|
|
}
|
2021-05-16 18:53:20 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// The information that makes up a memory access: offset and size.
|
|
|
|
|
#[derive(Copy, Clone, Debug)]
|
|
|
|
|
pub struct AllocRange {
|
|
|
|
|
pub start: Size,
|
|
|
|
|
pub size: Size,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Free-starting constructor for less syntactic overhead.
|
|
|
|
|
#[inline(always)]
|
|
|
|
|
pub fn alloc_range(start: Size, size: Size) -> AllocRange {
|
|
|
|
|
AllocRange { start, size }
|
|
|
|
|
}
|
2018-11-14 16:00:52 +01:00
|
|
|
|
2021-05-16 18:53:20 +02:00
|
|
|
impl AllocRange {
|
2018-11-14 16:00:52 +01:00
|
|
|
#[inline(always)]
|
2021-05-16 18:53:20 +02:00
|
|
|
pub fn end(self) -> Size {
|
|
|
|
|
self.start + self.size // This does overflow checking.
|
2018-11-14 16:00:52 +01:00
|
|
|
}
|
|
|
|
|
|
2021-05-16 18:53:20 +02:00
|
|
|
/// Returns the `subrange` within this range; panics if it is not a subrange.
|
|
|
|
|
#[inline]
|
|
|
|
|
pub fn subrange(self, subrange: AllocRange) -> AllocRange {
|
|
|
|
|
let sub_start = self.start + subrange.start;
|
|
|
|
|
let range = alloc_range(sub_start, subrange.size);
|
|
|
|
|
assert!(range.end() <= self.end(), "access outside the bounds for given AllocRange");
|
|
|
|
|
range
|
2018-11-14 16:00:52 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-05-28 10:44:46 +02:00
|
|
|
// The constructors are all without extra; the extra gets added by a machine hook later.
|
|
|
|
|
impl<Tag> Allocation<Tag> {
|
2021-05-23 13:26:51 +02:00
|
|
|
/// Creates an allocation initialized by the given bytes
|
2021-05-23 12:03:39 +02:00
|
|
|
pub fn from_bytes<'a>(
|
|
|
|
|
slice: impl Into<Cow<'a, [u8]>>,
|
|
|
|
|
align: Align,
|
|
|
|
|
mutability: Mutability,
|
|
|
|
|
) -> Self {
|
2019-04-22 13:53:52 +02:00
|
|
|
let bytes = slice.into().into_owned();
|
2020-03-22 17:48:11 +01:00
|
|
|
let size = Size::from_bytes(bytes.len());
|
2018-11-14 16:00:52 +01:00
|
|
|
Self {
|
2019-04-22 13:53:52 +02:00
|
|
|
bytes,
|
2018-11-14 16:00:52 +01:00
|
|
|
relocations: Relocations::new(),
|
2020-04-22 03:20:40 -04:00
|
|
|
init_mask: InitMask::new(size, true),
|
2018-11-14 16:00:52 +01:00
|
|
|
align,
|
2021-05-23 12:03:39 +02:00
|
|
|
mutability,
|
2019-05-28 10:44:46 +02:00
|
|
|
extra: (),
|
2018-11-14 16:00:52 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-05-23 12:03:39 +02:00
|
|
|
pub fn from_bytes_byte_aligned_immutable<'a>(slice: impl Into<Cow<'a, [u8]>>) -> Self {
|
|
|
|
|
Allocation::from_bytes(slice, Align::ONE, Mutability::Not)
|
2018-11-14 16:00:52 +01:00
|
|
|
}
|
|
|
|
|
|
2021-06-12 19:49:48 -04:00
|
|
|
/// Try to create an Allocation of `size` bytes, failing if there is not enough memory
|
|
|
|
|
/// available to the compiler to do so.
|
|
|
|
|
pub fn uninit(size: Size, align: Align) -> InterpResult<'static, Self> {
|
|
|
|
|
let mut bytes = Vec::new();
|
|
|
|
|
bytes.try_reserve(size.bytes_usize()).map_err(|_| {
|
|
|
|
|
InterpError::ResourceExhaustion(ResourceExhaustionInfo::MemoryExhausted)
|
|
|
|
|
})?;
|
|
|
|
|
bytes.resize(size.bytes_usize(), 0);
|
|
|
|
|
bytes.fill(0);
|
|
|
|
|
Ok(Allocation {
|
|
|
|
|
bytes: bytes,
|
2018-11-14 16:00:52 +01:00
|
|
|
relocations: Relocations::new(),
|
2020-04-22 03:20:40 -04:00
|
|
|
init_mask: InitMask::new(size, false),
|
2018-11-14 16:00:52 +01:00
|
|
|
align,
|
2019-12-16 17:28:40 +01:00
|
|
|
mutability: Mutability::Mut,
|
2019-05-28 10:44:46 +02:00
|
|
|
extra: (),
|
2021-06-12 19:49:48 -04:00
|
|
|
})
|
2018-11-14 16:00:52 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-05-16 18:53:20 +02:00
|
|
|
impl Allocation<()> {
|
2019-09-06 11:10:53 +02:00
|
|
|
/// Add Tag and Extra fields
|
2019-09-16 11:34:27 +02:00
|
|
|
pub fn with_tags_and_extra<T, E>(
|
2019-09-06 11:10:53 +02:00
|
|
|
self,
|
|
|
|
|
mut tagger: impl FnMut(AllocId) -> T,
|
|
|
|
|
extra: E,
|
|
|
|
|
) -> Allocation<T, E> {
|
|
|
|
|
Allocation {
|
|
|
|
|
bytes: self.bytes,
|
|
|
|
|
relocations: Relocations::from_presorted(
|
2019-12-22 17:42:04 -05:00
|
|
|
self.relocations
|
|
|
|
|
.iter()
|
2019-09-06 11:10:53 +02:00
|
|
|
// The allocations in the relocations (pointers stored *inside* this allocation)
|
|
|
|
|
// all get the base pointer tag.
|
|
|
|
|
.map(|&(offset, ((), alloc))| {
|
|
|
|
|
let tag = tagger(alloc);
|
|
|
|
|
(offset, (tag, alloc))
|
|
|
|
|
})
|
2019-12-22 17:42:04 -05:00
|
|
|
.collect(),
|
2019-09-06 11:10:53 +02:00
|
|
|
),
|
2020-04-22 03:20:40 -04:00
|
|
|
init_mask: self.init_mask,
|
2019-09-06 11:10:53 +02:00
|
|
|
align: self.align,
|
|
|
|
|
mutability: self.mutability,
|
|
|
|
|
extra,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-07-13 04:00:06 +02:00
|
|
|
/// Raw accessors. Provide access to otherwise private bytes.
|
|
|
|
|
impl<Tag, Extra> Allocation<Tag, Extra> {
|
|
|
|
|
pub fn len(&self) -> usize {
|
2021-05-17 13:30:16 +02:00
|
|
|
self.bytes.len()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pub fn size(&self) -> Size {
|
|
|
|
|
Size::from_bytes(self.len())
|
2019-07-13 04:00:06 +02:00
|
|
|
}
|
|
|
|
|
|
2020-07-22 09:08:59 -06:00
|
|
|
/// Looks at a slice which may describe uninitialized bytes or describe a relocation. This differs
|
2020-08-08 07:53:47 -06:00
|
|
|
/// from `get_bytes_with_uninit_and_ptr` in that it does no relocation checks (even on the
|
2021-05-16 18:53:20 +02:00
|
|
|
/// edges) at all.
|
2019-07-13 04:00:06 +02:00
|
|
|
/// This must not be used for reads affecting the interpreter execution.
|
2020-08-08 07:53:47 -06:00
|
|
|
pub fn inspect_with_uninit_and_ptr_outside_interpreter(&self, range: Range<usize>) -> &[u8] {
|
2019-07-13 04:00:06 +02:00
|
|
|
&self.bytes[range]
|
|
|
|
|
}
|
2019-08-14 02:26:18 +02:00
|
|
|
|
2020-04-22 03:20:40 -04:00
|
|
|
/// Returns the mask indicating which bytes are initialized.
|
|
|
|
|
pub fn init_mask(&self) -> &InitMask {
|
|
|
|
|
&self.init_mask
|
2019-08-14 02:26:18 +02:00
|
|
|
}
|
2019-08-29 18:02:51 +02:00
|
|
|
|
|
|
|
|
/// Returns the relocation list.
|
|
|
|
|
pub fn relocations(&self) -> &Relocations<Tag> {
|
|
|
|
|
&self.relocations
|
|
|
|
|
}
|
2019-07-13 04:00:06 +02:00
|
|
|
}
|
|
|
|
|
|
2019-09-06 03:57:44 +01:00
|
|
|
/// Byte accessors.
|
2021-05-16 18:53:20 +02:00
|
|
|
impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
|
2020-07-22 09:08:59 -06:00
|
|
|
/// The last argument controls whether we error out when there are uninitialized
|
2019-02-08 14:53:55 +01:00
|
|
|
/// or pointer bytes. You should never call this, call `get_bytes` or
|
2020-08-08 07:53:47 -06:00
|
|
|
/// `get_bytes_with_uninit_and_ptr` instead,
|
2018-11-13 14:55:18 +01:00
|
|
|
///
|
|
|
|
|
/// This function also guarantees that the resulting pointer will remain stable
|
|
|
|
|
/// even when new allocations are pushed to the `HashMap`. `copy_repeatedly` relies
|
|
|
|
|
/// on that.
|
2019-06-23 14:26:36 +02:00
|
|
|
///
|
2019-06-24 14:28:16 +02:00
|
|
|
/// It is the caller's responsibility to check bounds and alignment beforehand.
|
2019-04-15 10:05:13 +02:00
|
|
|
fn get_bytes_internal(
|
2018-11-13 14:55:18 +01:00
|
|
|
&self,
|
|
|
|
|
cx: &impl HasDataLayout,
|
2021-05-16 18:53:20 +02:00
|
|
|
range: AllocRange,
|
2020-07-22 09:08:59 -06:00
|
|
|
check_init_and_ptr: bool,
|
2021-05-16 18:53:20 +02:00
|
|
|
) -> AllocResult<&[u8]> {
|
2020-07-22 09:08:59 -06:00
|
|
|
if check_init_and_ptr {
|
2021-05-16 18:53:20 +02:00
|
|
|
self.check_init(range)?;
|
|
|
|
|
self.check_relocations(cx, range)?;
|
2018-11-13 14:55:18 +01:00
|
|
|
} else {
|
2019-09-06 03:57:44 +01:00
|
|
|
// We still don't want relocations on the *edges*.
|
2021-05-16 18:53:20 +02:00
|
|
|
self.check_relocation_edges(cx, range)?;
|
2018-11-13 14:55:18 +01:00
|
|
|
}
|
|
|
|
|
|
2021-05-16 18:53:20 +02:00
|
|
|
Ok(&self.bytes[range.start.bytes_usize()..range.end().bytes_usize()])
|
2018-11-13 14:55:18 +01:00
|
|
|
}
|
|
|
|
|
|
2019-09-06 03:57:44 +01:00
|
|
|
/// Checks that these bytes are initialized and not pointer bytes, and then return them
|
2019-06-23 14:26:36 +02:00
|
|
|
/// as a slice.
|
|
|
|
|
///
|
2019-06-24 14:28:16 +02:00
|
|
|
/// It is the caller's responsibility to check bounds and alignment beforehand.
|
2019-10-20 14:57:21 +02:00
|
|
|
/// Most likely, you want to use the `PlaceTy` and `OperandTy`-based methods
|
|
|
|
|
/// on `InterpCx` instead.
|
2018-11-13 14:55:18 +01:00
|
|
|
#[inline]
|
2021-05-16 18:53:20 +02:00
|
|
|
pub fn get_bytes(&self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult<&[u8]> {
|
|
|
|
|
self.get_bytes_internal(cx, range, true)
|
2018-11-13 14:55:18 +01:00
|
|
|
}
|
|
|
|
|
|
2020-07-22 09:08:59 -06:00
|
|
|
/// It is the caller's responsibility to handle uninitialized and pointer bytes.
|
2018-11-13 14:55:18 +01:00
|
|
|
/// However, this still checks that there are no relocations on the *edges*.
|
2019-06-23 14:26:36 +02:00
|
|
|
///
|
2019-06-24 14:28:16 +02:00
|
|
|
/// It is the caller's responsibility to check bounds and alignment beforehand.
|
2018-11-13 14:55:18 +01:00
|
|
|
#[inline]
|
2020-08-08 07:53:47 -06:00
|
|
|
pub fn get_bytes_with_uninit_and_ptr(
|
2018-11-13 14:55:18 +01:00
|
|
|
&self,
|
|
|
|
|
cx: &impl HasDataLayout,
|
2021-05-16 18:53:20 +02:00
|
|
|
range: AllocRange,
|
|
|
|
|
) -> AllocResult<&[u8]> {
|
|
|
|
|
self.get_bytes_internal(cx, range, false)
|
2018-11-13 14:55:18 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Just calling this already marks everything as defined and removes relocations,
|
|
|
|
|
/// so be sure to actually put data there!
|
2019-06-23 14:26:36 +02:00
|
|
|
///
|
2019-06-24 14:28:16 +02:00
|
|
|
/// It is the caller's responsibility to check bounds and alignment beforehand.
|
2019-10-20 14:57:21 +02:00
|
|
|
/// Most likely, you want to use the `PlaceTy` and `OperandTy`-based methods
|
|
|
|
|
/// on `InterpCx` instead.
|
2021-05-16 18:53:20 +02:00
|
|
|
pub fn get_bytes_mut(&mut self, cx: &impl HasDataLayout, range: AllocRange) -> &mut [u8] {
|
|
|
|
|
self.mark_init(range, true);
|
|
|
|
|
self.clear_relocations(cx, range);
|
2018-11-13 14:55:18 +01:00
|
|
|
|
2021-05-16 18:53:20 +02:00
|
|
|
&mut self.bytes[range.start.bytes_usize()..range.end().bytes_usize()]
|
|
|
|
|
}
|
2018-11-13 14:55:18 +01:00
|
|
|
|
2021-05-16 18:53:20 +02:00
|
|
|
/// A raw pointer variant of `get_bytes_mut` that avoids invalidating existing aliases into this memory.
|
|
|
|
|
pub fn get_bytes_mut_ptr(&mut self, cx: &impl HasDataLayout, range: AllocRange) -> *mut [u8] {
|
|
|
|
|
self.mark_init(range, true);
|
|
|
|
|
self.clear_relocations(cx, range);
|
2018-11-13 14:55:18 +01:00
|
|
|
|
2021-05-16 18:53:20 +02:00
|
|
|
assert!(range.end().bytes_usize() <= self.bytes.len()); // need to do our own bounds-check
|
|
|
|
|
let begin_ptr = self.bytes.as_mut_ptr().wrapping_add(range.start.bytes_usize());
|
|
|
|
|
let len = range.end().bytes_usize() - range.start.bytes_usize();
|
|
|
|
|
ptr::slice_from_raw_parts_mut(begin_ptr, len)
|
2018-11-13 14:55:18 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-09-06 03:57:44 +01:00
|
|
|
/// Reading and writing.
|
2021-05-16 18:53:20 +02:00
|
|
|
impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
|
2018-11-16 16:25:15 +01:00
|
|
|
/// Validates that `ptr.offset` and `ptr.offset + size` do not point to the middle of a
|
2020-08-08 07:53:47 -06:00
|
|
|
/// relocation. If `allow_uninit_and_ptr` is `false`, also enforces that the memory in the
|
2020-07-22 09:08:59 -06:00
|
|
|
/// given range contains neither relocations nor uninitialized bytes.
|
2019-04-15 10:05:13 +02:00
|
|
|
pub fn check_bytes(
|
2018-11-12 09:00:41 +01:00
|
|
|
&self,
|
2018-11-12 13:26:53 +01:00
|
|
|
cx: &impl HasDataLayout,
|
2021-05-16 18:53:20 +02:00
|
|
|
range: AllocRange,
|
2020-08-08 07:53:47 -06:00
|
|
|
allow_uninit_and_ptr: bool,
|
2021-05-16 18:53:20 +02:00
|
|
|
) -> AllocResult {
|
2019-09-06 03:57:44 +01:00
|
|
|
// Check bounds and relocations on the edges.
|
2021-05-16 18:53:20 +02:00
|
|
|
self.get_bytes_with_uninit_and_ptr(cx, range)?;
|
2020-07-22 09:08:59 -06:00
|
|
|
// Check uninit and ptr.
|
2020-08-08 07:53:47 -06:00
|
|
|
if !allow_uninit_and_ptr {
|
2021-05-16 18:53:20 +02:00
|
|
|
self.check_init(range)?;
|
|
|
|
|
self.check_relocations(cx, range)?;
|
2018-11-12 09:00:41 +01:00
|
|
|
}
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
2019-09-06 03:57:44 +01:00
|
|
|
/// Reads a *non-ZST* scalar.
|
2018-11-15 14:48:34 +01:00
|
|
|
///
|
2020-08-20 05:32:14 -07:00
|
|
|
/// ZSTs can't be read because in order to obtain a `Pointer`, we need to check
|
|
|
|
|
/// for ZSTness anyway due to integer pointers being valid for ZSTs.
|
2018-11-16 16:25:15 +01:00
|
|
|
///
|
2019-06-24 14:28:16 +02:00
|
|
|
/// It is the caller's responsibility to check bounds and alignment beforehand.
|
2019-10-20 14:57:21 +02:00
|
|
|
/// Most likely, you want to call `InterpCx::read_scalar` instead of this method.
|
2019-04-15 10:05:13 +02:00
|
|
|
pub fn read_scalar(
|
2018-11-12 09:00:41 +01:00
|
|
|
&self,
|
2018-11-12 13:26:53 +01:00
|
|
|
cx: &impl HasDataLayout,
|
2021-05-16 18:53:20 +02:00
|
|
|
range: AllocRange,
|
|
|
|
|
) -> AllocResult<ScalarMaybeUninit<Tag>> {
|
2019-09-06 03:57:44 +01:00
|
|
|
// `get_bytes_unchecked` tests relocation edges.
|
2021-05-16 18:53:20 +02:00
|
|
|
let bytes = self.get_bytes_with_uninit_and_ptr(cx, range)?;
|
2020-04-22 03:20:40 -04:00
|
|
|
// Uninit check happens *after* we established that the alignment is correct.
|
2019-09-06 03:57:44 +01:00
|
|
|
// We must not return `Ok()` for unaligned pointers!
|
2021-05-16 18:53:20 +02:00
|
|
|
if self.is_init(range).is_err() {
|
2020-04-22 03:20:40 -04:00
|
|
|
// This inflates uninitialized bytes to the entire scalar, even if only a few
|
|
|
|
|
// bytes are uninitialized.
|
|
|
|
|
return Ok(ScalarMaybeUninit::Uninit);
|
2018-11-12 09:00:41 +01:00
|
|
|
}
|
2019-09-06 03:57:44 +01:00
|
|
|
// Now we do the actual reading.
|
2018-11-12 13:26:53 +01:00
|
|
|
let bits = read_target_uint(cx.data_layout().endian, bytes).unwrap();
|
2019-09-06 03:57:44 +01:00
|
|
|
// See if we got a pointer.
|
2021-05-16 18:53:20 +02:00
|
|
|
if range.size != cx.data_layout().pointer_size {
|
|
|
|
|
// Not a pointer.
|
2019-09-06 03:57:44 +01:00
|
|
|
// *Now*, we better make sure that the inside is free of relocations too.
|
2021-05-16 18:53:20 +02:00
|
|
|
self.check_relocations(cx, range)?;
|
2018-11-12 09:00:41 +01:00
|
|
|
} else {
|
2021-05-16 18:53:20 +02:00
|
|
|
// Maybe a pointer.
|
|
|
|
|
if let Some(&(tag, alloc_id)) = self.relocations.get(&range.start) {
|
2020-03-22 13:36:56 +01:00
|
|
|
let ptr = Pointer::new_with_tag(alloc_id, Size::from_bytes(bits), tag);
|
2020-04-22 03:20:40 -04:00
|
|
|
return Ok(ScalarMaybeUninit::Scalar(ptr.into()));
|
2018-11-12 09:00:41 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
// We don't. Just return the bits.
|
2021-05-16 18:53:20 +02:00
|
|
|
Ok(ScalarMaybeUninit::Scalar(Scalar::from_uint(bits, range.size)))
|
2018-11-12 09:00:41 +01:00
|
|
|
}
|
|
|
|
|
|
2019-09-06 03:57:44 +01:00
|
|
|
/// Writes a *non-ZST* scalar.
|
2018-11-15 14:48:34 +01:00
|
|
|
///
|
2020-08-20 05:32:14 -07:00
|
|
|
/// ZSTs can't be read because in order to obtain a `Pointer`, we need to check
|
|
|
|
|
/// for ZSTness anyway due to integer pointers being valid for ZSTs.
|
2018-11-16 16:25:15 +01:00
|
|
|
///
|
2019-06-24 14:28:16 +02:00
|
|
|
/// It is the caller's responsibility to check bounds and alignment beforehand.
|
2019-10-20 14:57:21 +02:00
|
|
|
/// Most likely, you want to call `InterpCx::write_scalar` instead of this method.
|
2019-04-15 10:05:13 +02:00
|
|
|
pub fn write_scalar(
|
2018-11-12 09:00:41 +01:00
|
|
|
&mut self,
|
2018-11-12 13:26:53 +01:00
|
|
|
cx: &impl HasDataLayout,
|
2021-05-16 18:53:20 +02:00
|
|
|
range: AllocRange,
|
2020-04-22 03:20:40 -04:00
|
|
|
val: ScalarMaybeUninit<Tag>,
|
2021-05-16 18:53:20 +02:00
|
|
|
) -> AllocResult {
|
2018-11-12 09:00:41 +01:00
|
|
|
let val = match val {
|
2020-04-22 03:20:40 -04:00
|
|
|
ScalarMaybeUninit::Scalar(scalar) => scalar,
|
|
|
|
|
ScalarMaybeUninit::Uninit => {
|
2021-05-16 18:53:20 +02:00
|
|
|
self.mark_init(range, false);
|
2019-06-11 10:56:41 +02:00
|
|
|
return Ok(());
|
2019-12-22 17:42:04 -05:00
|
|
|
}
|
2018-11-12 09:00:41 +01:00
|
|
|
};
|
|
|
|
|
|
2021-05-16 18:53:20 +02:00
|
|
|
let bytes = match val.to_bits_or_ptr(range.size, cx) {
|
2020-03-21 13:49:02 +01:00
|
|
|
Err(val) => u128::from(val.offset.bytes()),
|
2019-05-26 14:13:12 +02:00
|
|
|
Ok(data) => data,
|
2018-11-12 09:00:41 +01:00
|
|
|
};
|
|
|
|
|
|
2018-11-25 11:23:21 +01:00
|
|
|
let endian = cx.data_layout().endian;
|
2021-05-16 18:53:20 +02:00
|
|
|
let dst = self.get_bytes_mut(cx, range);
|
2018-11-25 11:23:21 +01:00
|
|
|
write_target_uint(endian, dst, bytes).unwrap();
|
2018-11-12 09:00:41 +01:00
|
|
|
|
2019-09-06 03:57:44 +01:00
|
|
|
// See if we have to also write a relocation.
|
2020-03-22 13:36:56 +01:00
|
|
|
if let Scalar::Ptr(val) = val {
|
2021-05-16 18:53:20 +02:00
|
|
|
self.relocations.insert(range.start, (val.tag, val.alloc_id));
|
2018-11-12 09:00:41 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
2018-11-13 14:32:39 +01:00
|
|
|
}
|
2018-11-12 09:00:41 +01:00
|
|
|
|
2019-09-06 03:57:44 +01:00
|
|
|
/// Relocations.
|
2021-05-16 18:53:20 +02:00
|
|
|
impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
|
2019-09-06 03:57:44 +01:00
|
|
|
/// Returns all relocations overlapping with the given pointer-offset pair.
|
2019-08-29 18:02:51 +02:00
|
|
|
pub fn get_relocations(
|
2018-11-12 08:34:04 +01:00
|
|
|
&self,
|
2018-11-12 08:56:41 +01:00
|
|
|
cx: &impl HasDataLayout,
|
2021-05-16 18:53:20 +02:00
|
|
|
range: AllocRange,
|
2018-11-13 09:44:59 +01:00
|
|
|
) -> &[(Size, (Tag, AllocId))] {
|
2018-11-12 08:34:04 +01:00
|
|
|
// We have to go back `pointer_size - 1` bytes, as that one would still overlap with
|
|
|
|
|
// the beginning of this range.
|
2021-05-16 18:53:20 +02:00
|
|
|
let start = range.start.bytes().saturating_sub(cx.data_layout().pointer_size.bytes() - 1);
|
|
|
|
|
self.relocations.range(Size::from_bytes(start)..range.end())
|
2018-11-12 08:34:04 +01:00
|
|
|
}
|
|
|
|
|
|
2019-02-08 14:53:55 +01:00
|
|
|
/// Checks that there are no relocations overlapping with the given range.
|
2018-11-12 08:34:04 +01:00
|
|
|
#[inline(always)]
|
2021-05-16 18:53:20 +02:00
|
|
|
fn check_relocations(&self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult {
|
|
|
|
|
if self.get_relocations(cx, range).is_empty() {
|
2018-11-12 08:34:04 +01:00
|
|
|
Ok(())
|
2018-11-13 09:44:59 +01:00
|
|
|
} else {
|
2021-05-16 18:53:20 +02:00
|
|
|
Err(AllocError::ReadPointerAsBytes)
|
2018-11-12 08:34:04 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-02-08 14:53:55 +01:00
|
|
|
/// Removes all relocations inside the given range.
|
2018-11-12 08:34:04 +01:00
|
|
|
/// If there are relocations overlapping with the edges, they
|
|
|
|
|
/// are removed as well *and* the bytes they cover are marked as
|
2019-02-08 14:53:55 +01:00
|
|
|
/// uninitialized. This is a somewhat odd "spooky action at a distance",
|
2018-11-12 08:34:04 +01:00
|
|
|
/// but it allows strictly more code to run than if we would just error
|
|
|
|
|
/// immediately in that case.
|
2021-05-16 18:53:20 +02:00
|
|
|
fn clear_relocations(&mut self, cx: &impl HasDataLayout, range: AllocRange) {
|
2018-11-12 08:34:04 +01:00
|
|
|
// Find the start and end of the given range and its outermost relocations.
|
|
|
|
|
let (first, last) = {
|
|
|
|
|
// Find all relocations overlapping the given range.
|
2021-05-16 18:53:20 +02:00
|
|
|
let relocations = self.get_relocations(cx, range);
|
2018-11-12 08:34:04 +01:00
|
|
|
if relocations.is_empty() {
|
2021-02-21 13:25:28 +01:00
|
|
|
return;
|
2018-11-12 08:34:04 +01:00
|
|
|
}
|
|
|
|
|
|
2019-12-22 17:42:04 -05:00
|
|
|
(
|
|
|
|
|
relocations.first().unwrap().0,
|
|
|
|
|
relocations.last().unwrap().0 + cx.data_layout().pointer_size,
|
|
|
|
|
)
|
2018-11-12 08:34:04 +01:00
|
|
|
};
|
2021-05-16 18:53:20 +02:00
|
|
|
let start = range.start;
|
|
|
|
|
let end = range.end();
|
2018-11-12 08:34:04 +01:00
|
|
|
|
2020-07-22 09:08:59 -06:00
|
|
|
// Mark parts of the outermost relocations as uninitialized if they partially fall outside the
|
2018-11-12 08:34:04 +01:00
|
|
|
// given range.
|
|
|
|
|
if first < start {
|
2020-04-22 03:20:40 -04:00
|
|
|
self.init_mask.set_range(first, start, false);
|
2018-11-12 08:34:04 +01:00
|
|
|
}
|
|
|
|
|
if last > end {
|
2020-04-22 03:20:40 -04:00
|
|
|
self.init_mask.set_range(end, last, false);
|
2018-11-12 08:34:04 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Forget all the relocations.
|
2018-11-12 08:39:04 +01:00
|
|
|
self.relocations.remove_range(first..last);
|
2018-11-12 08:34:04 +01:00
|
|
|
}
|
|
|
|
|
|
2019-09-06 03:57:44 +01:00
|
|
|
/// Errors if there are relocations overlapping with the edges of the
|
2018-11-12 08:34:04 +01:00
|
|
|
/// given memory range.
|
|
|
|
|
#[inline]
|
2021-05-16 18:53:20 +02:00
|
|
|
fn check_relocation_edges(&self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult {
|
|
|
|
|
self.check_relocations(cx, alloc_range(range.start, Size::ZERO))?;
|
|
|
|
|
self.check_relocations(cx, alloc_range(range.end(), Size::ZERO))?;
|
2018-11-12 08:34:04 +01:00
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-07-22 09:08:59 -06:00
|
|
|
/// Uninitialized bytes.
|
2021-05-16 18:53:20 +02:00
|
|
|
impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
|
2020-07-22 09:08:59 -06:00
|
|
|
/// Checks whether the given range is entirely initialized.
|
2020-03-25 00:12:49 +01:00
|
|
|
///
|
2020-07-22 09:08:59 -06:00
|
|
|
/// Returns `Ok(())` if it's initialized. Otherwise returns the range of byte
|
|
|
|
|
/// indexes of the first contiguous uninitialized access.
|
2021-05-16 18:53:20 +02:00
|
|
|
fn is_init(&self, range: AllocRange) -> Result<(), Range<Size>> {
|
|
|
|
|
self.init_mask.is_range_initialized(range.start, range.end()) // `Size` addition
|
2020-03-25 00:12:49 +01:00
|
|
|
}
|
|
|
|
|
|
2020-07-22 09:08:59 -06:00
|
|
|
/// Checks that a range of bytes is initialized. If not, returns the `InvalidUninitBytes`
|
|
|
|
|
/// error which will report the first range of bytes which is uninitialized.
|
2021-05-16 18:53:20 +02:00
|
|
|
fn check_init(&self, range: AllocRange) -> AllocResult {
|
|
|
|
|
self.is_init(range).or_else(|idx_range| {
|
|
|
|
|
Err(AllocError::InvalidUninitBytes(Some(UninitBytesAccess {
|
|
|
|
|
access_offset: range.start,
|
|
|
|
|
access_size: range.size,
|
|
|
|
|
uninit_offset: idx_range.start,
|
2020-05-14 07:46:43 -05:00
|
|
|
uninit_size: idx_range.end - idx_range.start, // `Size` subtraction
|
2021-02-20 19:01:25 +01:00
|
|
|
})))
|
2020-05-14 07:46:43 -05:00
|
|
|
})
|
2018-11-12 08:35:32 +01:00
|
|
|
}
|
|
|
|
|
|
2021-05-16 18:53:20 +02:00
|
|
|
pub fn mark_init(&mut self, range: AllocRange, is_init: bool) {
|
|
|
|
|
if range.size.bytes() == 0 {
|
2019-06-11 10:56:41 +02:00
|
|
|
return;
|
2018-11-12 08:35:32 +01:00
|
|
|
}
|
2021-05-16 18:53:20 +02:00
|
|
|
self.init_mask.set_range(range.start, range.end(), is_init);
|
2018-11-12 08:35:32 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-07-22 09:08:59 -06:00
|
|
|
/// Run-length encoding of the uninit mask.
|
2019-07-30 23:38:02 +02:00
|
|
|
/// Used to copy parts of a mask multiple times to another allocation.
|
2020-07-22 09:08:59 -06:00
|
|
|
pub struct InitMaskCompressed {
|
|
|
|
|
/// Whether the first range is initialized.
|
2019-08-31 17:01:56 +02:00
|
|
|
initial: bool,
|
2019-08-31 21:21:29 +02:00
|
|
|
/// The lengths of ranges that are run-length encoded.
|
2020-07-22 09:08:59 -06:00
|
|
|
/// The initialization state of the ranges alternate starting with `initial`.
|
2019-12-22 17:42:04 -05:00
|
|
|
ranges: smallvec::SmallVec<[u64; 1]>,
|
2019-07-30 23:38:02 +02:00
|
|
|
}
|
|
|
|
|
|
2020-07-22 09:08:59 -06:00
|
|
|
impl InitMaskCompressed {
|
|
|
|
|
pub fn no_bytes_init(&self) -> bool {
|
|
|
|
|
// The `ranges` are run-length encoded and of alternating initialization state.
|
|
|
|
|
// So if `ranges.len() > 1` then the second block is an initialized range.
|
2020-02-24 16:52:40 +01:00
|
|
|
!self.initial && self.ranges.len() == 1
|
2019-12-27 15:50:56 -03:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-07-22 09:08:59 -06:00
|
|
|
/// Transferring the initialization mask to other allocations.
|
2019-07-30 23:38:02 +02:00
|
|
|
impl<Tag, Extra> Allocation<Tag, Extra> {
|
2020-07-22 09:08:59 -06:00
|
|
|
/// Creates a run-length encoding of the initialization mask.
|
2020-08-08 07:53:47 -06:00
|
|
|
pub fn compress_uninit_range(&self, src: Pointer<Tag>, size: Size) -> InitMaskCompressed {
|
2019-07-30 23:38:02 +02:00
|
|
|
// Since we are copying `size` bytes from `src` to `dest + i * size` (`for i in 0..repeat`),
|
2020-07-22 09:08:59 -06:00
|
|
|
// a naive initialization mask copying algorithm would repeatedly have to read the initialization mask from
|
2019-07-30 23:38:02 +02:00
|
|
|
// the source and write it to the destination. Even if we optimized the memory accesses,
|
|
|
|
|
// we'd be doing all of this `repeat` times.
|
2020-07-22 09:08:59 -06:00
|
|
|
// Therefore we precompute a compressed version of the initialization mask of the source value and
|
2019-07-30 23:38:02 +02:00
|
|
|
// then write it back `repeat` times without computing any more information from the source.
|
|
|
|
|
|
2020-07-22 09:08:59 -06:00
|
|
|
// A precomputed cache for ranges of initialized / uninitialized bits
|
2019-07-30 23:38:02 +02:00
|
|
|
// 0000010010001110 will become
|
2019-09-06 03:57:44 +01:00
|
|
|
// `[5, 1, 2, 1, 3, 3, 1]`,
|
|
|
|
|
// where each element toggles the state.
|
2019-07-30 23:38:02 +02:00
|
|
|
|
|
|
|
|
let mut ranges = smallvec::SmallVec::<[u64; 1]>::new();
|
2020-04-22 03:20:40 -04:00
|
|
|
let initial = self.init_mask.get(src.offset);
|
2019-07-30 23:38:02 +02:00
|
|
|
let mut cur_len = 1;
|
2019-08-31 17:01:56 +02:00
|
|
|
let mut cur = initial;
|
2019-07-30 23:38:02 +02:00
|
|
|
|
|
|
|
|
for i in 1..size.bytes() {
|
2020-07-22 09:08:59 -06:00
|
|
|
// FIXME: optimize to bitshift the current uninitialized block's bits and read the top bit.
|
2020-04-22 03:20:40 -04:00
|
|
|
if self.init_mask.get(src.offset + Size::from_bytes(i)) == cur {
|
2019-07-30 23:38:02 +02:00
|
|
|
cur_len += 1;
|
|
|
|
|
} else {
|
|
|
|
|
ranges.push(cur_len);
|
|
|
|
|
cur_len = 1;
|
|
|
|
|
cur = !cur;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ranges.push(cur_len);
|
|
|
|
|
|
2020-07-22 09:08:59 -06:00
|
|
|
InitMaskCompressed { ranges, initial }
|
2019-07-30 23:38:02 +02:00
|
|
|
}
|
|
|
|
|
|
2020-07-22 09:08:59 -06:00
|
|
|
/// Applies multiple instances of the run-length encoding to the initialization mask.
|
|
|
|
|
pub fn mark_compressed_init_range(
|
2019-07-30 23:38:02 +02:00
|
|
|
&mut self,
|
2020-07-22 09:08:59 -06:00
|
|
|
defined: &InitMaskCompressed,
|
2019-07-30 23:38:02 +02:00
|
|
|
dest: Pointer<Tag>,
|
|
|
|
|
size: Size,
|
|
|
|
|
repeat: u64,
|
|
|
|
|
) {
|
2020-08-08 07:53:47 -06:00
|
|
|
// An optimization where we can just overwrite an entire range of initialization
|
|
|
|
|
// bits if they are going to be uniformly `1` or `0`.
|
2019-07-30 23:38:02 +02:00
|
|
|
if defined.ranges.len() <= 1 {
|
2020-04-22 03:20:40 -04:00
|
|
|
self.init_mask.set_range_inbounds(
|
2019-07-30 23:38:02 +02:00
|
|
|
dest.offset,
|
2020-03-24 16:43:50 +01:00
|
|
|
dest.offset + size * repeat, // `Size` operations
|
2019-08-31 17:01:56 +02:00
|
|
|
defined.initial,
|
2019-07-30 23:38:02 +02:00
|
|
|
);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for mut j in 0..repeat {
|
|
|
|
|
j *= size.bytes();
|
|
|
|
|
j += dest.offset.bytes();
|
2019-08-31 17:01:56 +02:00
|
|
|
let mut cur = defined.initial;
|
2019-07-30 23:38:02 +02:00
|
|
|
for range in &defined.ranges {
|
|
|
|
|
let old_j = j;
|
|
|
|
|
j += range;
|
2020-04-22 03:20:40 -04:00
|
|
|
self.init_mask.set_range_inbounds(
|
2019-07-30 23:38:02 +02:00
|
|
|
Size::from_bytes(old_j),
|
|
|
|
|
Size::from_bytes(j),
|
|
|
|
|
cur,
|
|
|
|
|
);
|
|
|
|
|
cur = !cur;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-09-06 03:57:44 +01:00
|
|
|
/// Relocations.
|
2020-06-11 15:49:57 +01:00
|
|
|
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TyEncodable, TyDecodable)]
|
2019-09-06 03:57:44 +01:00
|
|
|
pub struct Relocations<Tag = (), Id = AllocId>(SortedMap<Size, (Tag, Id)>);
|
2018-10-25 16:09:42 +02:00
|
|
|
|
|
|
|
|
impl<Tag, Id> Relocations<Tag, Id> {
|
|
|
|
|
pub fn new() -> Self {
|
|
|
|
|
Relocations(SortedMap::new())
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// The caller must guarantee that the given relocations are already sorted
|
|
|
|
|
// by address and contain no duplicates.
|
|
|
|
|
pub fn from_presorted(r: Vec<(Size, (Tag, Id))>) -> Self {
|
|
|
|
|
Relocations(SortedMap::from_presorted_elements(r))
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl<Tag> Deref for Relocations<Tag> {
|
|
|
|
|
type Target = SortedMap<Size, (Tag, AllocId)>;
|
|
|
|
|
|
|
|
|
|
fn deref(&self) -> &Self::Target {
|
|
|
|
|
&self.0
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl<Tag> DerefMut for Relocations<Tag> {
|
|
|
|
|
fn deref_mut(&mut self) -> &mut Self::Target {
|
|
|
|
|
&mut self.0
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-08-30 04:17:18 +02:00
|
|
|
/// A partial, owned list of relocations to transfer into another allocation.
|
|
|
|
|
pub struct AllocationRelocations<Tag> {
|
|
|
|
|
relative_relocations: Vec<(Size, (Tag, AllocId))>,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
|
|
|
|
|
pub fn prepare_relocation_copy(
|
|
|
|
|
&self,
|
|
|
|
|
cx: &impl HasDataLayout,
|
2021-05-16 18:53:20 +02:00
|
|
|
src: AllocRange,
|
|
|
|
|
dest: Size,
|
|
|
|
|
count: u64,
|
2019-08-30 04:17:18 +02:00
|
|
|
) -> AllocationRelocations<Tag> {
|
2021-05-16 18:53:20 +02:00
|
|
|
let relocations = self.get_relocations(cx, src);
|
2019-08-30 04:17:18 +02:00
|
|
|
if relocations.is_empty() {
|
|
|
|
|
return AllocationRelocations { relative_relocations: Vec::new() };
|
|
|
|
|
}
|
|
|
|
|
|
2021-05-16 18:53:20 +02:00
|
|
|
let size = src.size;
|
|
|
|
|
let mut new_relocations = Vec::with_capacity(relocations.len() * (count as usize));
|
2019-08-30 04:17:18 +02:00
|
|
|
|
2021-05-16 18:53:20 +02:00
|
|
|
for i in 0..count {
|
2019-12-22 17:42:04 -05:00
|
|
|
new_relocations.extend(relocations.iter().map(|&(offset, reloc)| {
|
|
|
|
|
// compute offset for current repetition
|
2021-05-16 18:53:20 +02:00
|
|
|
let dest_offset = dest + size * i; // `Size` operations
|
2019-12-22 17:42:04 -05:00
|
|
|
(
|
|
|
|
|
// shift offsets from source allocation to destination allocation
|
2021-05-16 18:53:20 +02:00
|
|
|
(offset + dest_offset) - src.start, // `Size` operations
|
2019-12-22 17:42:04 -05:00
|
|
|
reloc,
|
|
|
|
|
)
|
|
|
|
|
}));
|
2019-08-30 04:17:18 +02:00
|
|
|
}
|
|
|
|
|
|
2019-12-22 17:42:04 -05:00
|
|
|
AllocationRelocations { relative_relocations: new_relocations }
|
2019-08-30 04:17:18 +02:00
|
|
|
}
|
|
|
|
|
|
2019-09-06 21:05:37 +01:00
|
|
|
/// Applies a relocation copy.
|
2019-08-31 17:01:56 +02:00
|
|
|
/// The affected range, as defined in the parameters to `prepare_relocation_copy` is expected
|
|
|
|
|
/// to be clear of relocations.
|
2019-12-22 17:42:04 -05:00
|
|
|
pub fn mark_relocation_range(&mut self, relocations: AllocationRelocations<Tag>) {
|
2019-08-30 04:17:18 +02:00
|
|
|
self.relocations.insert_presorted(relocations.relative_relocations);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-10-25 16:09:42 +02:00
|
|
|
////////////////////////////////////////////////////////////////////////////////
|
2020-07-22 09:08:59 -06:00
|
|
|
// Uninitialized byte tracking
|
2018-10-25 16:09:42 +02:00
|
|
|
////////////////////////////////////////////////////////////////////////////////
|
|
|
|
|
|
|
|
|
|
type Block = u64;
|
|
|
|
|
|
2019-03-12 14:43:49 +01:00
|
|
|
/// A bitmask where each bit refers to the byte with the same index. If the bit is `true`, the byte
|
2020-04-22 03:20:40 -04:00
|
|
|
/// is initialized. If it is `false` the byte is uninitialized.
|
2020-06-11 15:49:57 +01:00
|
|
|
#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, TyEncodable, TyDecodable)]
|
2020-03-23 14:48:59 +00:00
|
|
|
#[derive(HashStable)]
|
2020-04-22 03:20:40 -04:00
|
|
|
pub struct InitMask {
|
2018-10-25 16:09:42 +02:00
|
|
|
blocks: Vec<Block>,
|
|
|
|
|
len: Size,
|
|
|
|
|
}
|
|
|
|
|
|
2020-04-22 03:20:40 -04:00
|
|
|
impl InitMask {
|
2019-02-18 10:54:16 +01:00
|
|
|
pub const BLOCK_SIZE: u64 = 64;
|
|
|
|
|
|
2019-02-20 15:07:25 +01:00
|
|
|
pub fn new(size: Size, state: bool) -> Self {
|
2020-04-22 03:20:40 -04:00
|
|
|
let mut m = InitMask { blocks: vec![], len: Size::ZERO };
|
2019-02-20 15:07:25 +01:00
|
|
|
m.grow(size, state);
|
2018-10-25 16:09:42 +02:00
|
|
|
m
|
|
|
|
|
}
|
|
|
|
|
|
2020-04-22 03:20:40 -04:00
|
|
|
/// Checks whether the range `start..end` (end-exclusive) is entirely initialized.
|
2018-10-25 16:09:42 +02:00
|
|
|
///
|
2020-05-14 07:46:43 -05:00
|
|
|
/// Returns `Ok(())` if it's initialized. Otherwise returns a range of byte
|
|
|
|
|
/// indexes for the first contiguous span of the uninitialized access.
|
2018-10-25 16:09:42 +02:00
|
|
|
#[inline]
|
2020-05-14 07:46:43 -05:00
|
|
|
pub fn is_range_initialized(&self, start: Size, end: Size) -> Result<(), Range<Size>> {
|
2018-10-25 16:09:42 +02:00
|
|
|
if end > self.len {
|
2020-05-14 07:46:43 -05:00
|
|
|
return Err(self.len..end);
|
2018-10-25 16:09:42 +02:00
|
|
|
}
|
|
|
|
|
|
2019-02-18 10:54:16 +01:00
|
|
|
// FIXME(oli-obk): optimize this for allocations larger than a block.
|
2020-03-22 12:43:19 +01:00
|
|
|
let idx = (start.bytes()..end.bytes()).map(Size::from_bytes).find(|&i| !self.get(i));
|
2018-10-25 16:09:42 +02:00
|
|
|
|
|
|
|
|
match idx {
|
2020-05-14 07:46:43 -05:00
|
|
|
Some(idx) => {
|
2020-07-22 09:08:59 -06:00
|
|
|
let uninit_end = (idx.bytes()..end.bytes())
|
2020-05-14 07:46:43 -05:00
|
|
|
.map(Size::from_bytes)
|
|
|
|
|
.find(|&i| self.get(i))
|
|
|
|
|
.unwrap_or(end);
|
2020-07-22 09:08:59 -06:00
|
|
|
Err(idx..uninit_end)
|
2020-05-14 07:46:43 -05:00
|
|
|
}
|
2019-12-22 17:42:04 -05:00
|
|
|
None => Ok(()),
|
2018-10-25 16:09:42 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pub fn set_range(&mut self, start: Size, end: Size, new_state: bool) {
|
|
|
|
|
let len = self.len;
|
|
|
|
|
if end > len {
|
|
|
|
|
self.grow(end - len, new_state);
|
|
|
|
|
}
|
|
|
|
|
self.set_range_inbounds(start, end, new_state);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pub fn set_range_inbounds(&mut self, start: Size, end: Size, new_state: bool) {
|
2019-02-18 10:54:16 +01:00
|
|
|
let (blocka, bita) = bit_index(start);
|
|
|
|
|
let (blockb, bitb) = bit_index(end);
|
|
|
|
|
if blocka == blockb {
|
2019-09-06 03:57:44 +01:00
|
|
|
// First set all bits except the first `bita`,
|
|
|
|
|
// then unset the last `64 - bitb` bits.
|
2019-02-20 15:07:25 +01:00
|
|
|
let range = if bitb == 0 {
|
2020-03-04 13:12:04 +01:00
|
|
|
u64::MAX << bita
|
2019-02-20 15:07:25 +01:00
|
|
|
} else {
|
2020-03-04 13:12:04 +01:00
|
|
|
(u64::MAX << bita) & (u64::MAX >> (64 - bitb))
|
2019-02-20 15:07:25 +01:00
|
|
|
};
|
|
|
|
|
if new_state {
|
|
|
|
|
self.blocks[blocka] |= range;
|
|
|
|
|
} else {
|
|
|
|
|
self.blocks[blocka] &= !range;
|
2019-02-18 10:54:16 +01:00
|
|
|
}
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
// across block boundaries
|
|
|
|
|
if new_state {
|
2019-09-06 03:57:44 +01:00
|
|
|
// Set `bita..64` to `1`.
|
2020-03-04 13:12:04 +01:00
|
|
|
self.blocks[blocka] |= u64::MAX << bita;
|
2019-09-06 03:57:44 +01:00
|
|
|
// Set `0..bitb` to `1`.
|
2019-02-20 15:07:25 +01:00
|
|
|
if bitb != 0 {
|
2020-03-04 13:12:04 +01:00
|
|
|
self.blocks[blockb] |= u64::MAX >> (64 - bitb);
|
2019-02-20 15:07:25 +01:00
|
|
|
}
|
2019-09-06 03:57:44 +01:00
|
|
|
// Fill in all the other blocks (much faster than one bit at a time).
|
2019-12-22 17:42:04 -05:00
|
|
|
for block in (blocka + 1)..blockb {
|
2020-03-04 13:12:04 +01:00
|
|
|
self.blocks[block] = u64::MAX;
|
2019-02-18 10:54:16 +01:00
|
|
|
}
|
|
|
|
|
} else {
|
2019-09-06 03:57:44 +01:00
|
|
|
// Set `bita..64` to `0`.
|
2020-03-04 13:12:04 +01:00
|
|
|
self.blocks[blocka] &= !(u64::MAX << bita);
|
2019-09-06 03:57:44 +01:00
|
|
|
// Set `0..bitb` to `0`.
|
2019-02-20 15:07:25 +01:00
|
|
|
if bitb != 0 {
|
2020-03-04 13:12:04 +01:00
|
|
|
self.blocks[blockb] &= !(u64::MAX >> (64 - bitb));
|
2019-02-20 15:07:25 +01:00
|
|
|
}
|
2019-09-06 03:57:44 +01:00
|
|
|
// Fill in all the other blocks (much faster than one bit at a time).
|
2019-12-22 17:42:04 -05:00
|
|
|
for block in (blocka + 1)..blockb {
|
2019-02-18 10:54:16 +01:00
|
|
|
self.blocks[block] = 0;
|
|
|
|
|
}
|
2018-10-25 16:09:42 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
pub fn get(&self, i: Size) -> bool {
|
|
|
|
|
let (block, bit) = bit_index(i);
|
2019-02-20 15:07:25 +01:00
|
|
|
(self.blocks[block] & (1 << bit)) != 0
|
2018-10-25 16:09:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
pub fn set(&mut self, i: Size, new_state: bool) {
|
|
|
|
|
let (block, bit) = bit_index(i);
|
2019-02-18 10:54:16 +01:00
|
|
|
self.set_bit(block, bit, new_state);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn set_bit(&mut self, block: usize, bit: usize, new_state: bool) {
|
2018-10-25 16:09:42 +02:00
|
|
|
if new_state {
|
|
|
|
|
self.blocks[block] |= 1 << bit;
|
|
|
|
|
} else {
|
|
|
|
|
self.blocks[block] &= !(1 << bit);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pub fn grow(&mut self, amount: Size, new_state: bool) {
|
2019-02-20 15:07:25 +01:00
|
|
|
if amount.bytes() == 0 {
|
|
|
|
|
return;
|
|
|
|
|
}
|
2020-03-21 13:49:02 +01:00
|
|
|
let unused_trailing_bits =
|
|
|
|
|
u64::try_from(self.blocks.len()).unwrap() * Self::BLOCK_SIZE - self.len.bytes();
|
2018-10-25 16:09:42 +02:00
|
|
|
if amount.bytes() > unused_trailing_bits {
|
2019-02-18 10:54:16 +01:00
|
|
|
let additional_blocks = amount.bytes() / Self::BLOCK_SIZE + 1;
|
2018-10-25 16:09:42 +02:00
|
|
|
self.blocks.extend(
|
2019-09-06 03:57:44 +01:00
|
|
|
// FIXME(oli-obk): optimize this by repeating `new_state as Block`.
|
2020-03-21 13:49:02 +01:00
|
|
|
iter::repeat(0).take(usize::try_from(additional_blocks).unwrap()),
|
2018-10-25 16:09:42 +02:00
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
let start = self.len;
|
|
|
|
|
self.len += amount;
|
2020-03-24 16:43:50 +01:00
|
|
|
self.set_range_inbounds(start, start + amount, new_state); // `Size` operation
|
2018-10-25 16:09:42 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
fn bit_index(bits: Size) -> (usize, usize) {
|
|
|
|
|
let bits = bits.bytes();
|
2020-04-22 03:20:40 -04:00
|
|
|
let a = bits / InitMask::BLOCK_SIZE;
|
|
|
|
|
let b = bits % InitMask::BLOCK_SIZE;
|
2020-03-21 13:49:02 +01:00
|
|
|
(usize::try_from(a).unwrap(), usize::try_from(b).unwrap())
|
2018-10-25 16:09:42 +02:00
|
|
|
}
|