2019-02-08 14:53:55 +01:00
|
|
|
//! The virtual memory representation of the MIR interpreter.
|
2018-10-23 17:54:20 +02:00
|
|
|
|
2022-11-06 13:44:50 +01:00
|
|
|
mod init_mask;
|
2022-11-06 13:00:09 +01:00
|
|
|
mod provenance_map;
|
2022-11-06 16:26:02 +01:00
|
|
|
#[cfg(test)]
|
|
|
|
|
mod tests;
|
2022-11-06 13:00:09 +01:00
|
|
|
|
2020-03-21 13:49:02 +01:00
|
|
|
use std::borrow::Cow;
|
Introduce `ConstAllocation`.
Currently some `Allocation`s are interned, some are not, and it's very
hard to tell at a use point which is which.
This commit introduces `ConstAllocation` for the known-interned ones,
which makes the division much clearer. `ConstAllocation::inner()` is
used to get the underlying `Allocation`.
In some places it's natural to use an `Allocation`, in some it's natural
to use a `ConstAllocation`, and in some places there's no clear choice.
I've tried to make things look as nice as possible, while generally
favouring `ConstAllocation`, which is the type that embodies more
information. This does require quite a few calls to `inner()`.
The commit also tweaks how `PartialOrd` works for `Interned`. The
previous code was too clever by half, building on `T: Ord` to make the
code shorter. That caused problems with deriving `PartialOrd` and `Ord`
for `ConstAllocation`, so I changed it to build on `T: PartialOrd`,
which is slightly more verbose but much more standard and avoided the
problems.
2022-03-02 07:15:04 +11:00
|
|
|
use std::fmt;
|
2022-06-14 16:03:26 +02:00
|
|
|
use std::hash;
|
2022-11-06 13:00:09 +01:00
|
|
|
use std::ops::Range;
|
2021-05-16 18:53:20 +02:00
|
|
|
use std::ptr;
|
2020-03-21 13:49:02 +01:00
|
|
|
|
2020-04-27 23:26:11 +05:30
|
|
|
use rustc_ast::Mutability;
|
Introduce `ConstAllocation`.
Currently some `Allocation`s are interned, some are not, and it's very
hard to tell at a use point which is which.
This commit introduces `ConstAllocation` for the known-interned ones,
which makes the division much clearer. `ConstAllocation::inner()` is
used to get the underlying `Allocation`.
In some places it's natural to use an `Allocation`, in some it's natural
to use a `ConstAllocation`, and in some places there's no clear choice.
I've tried to make things look as nice as possible, while generally
favouring `ConstAllocation`, which is the type that embodies more
information. This does require quite a few calls to `inner()`.
The commit also tweaks how `PartialOrd` works for `Interned`. The
previous code was too clever by half, building on `T: Ord` to make the
code shorter. That caused problems with deriving `PartialOrd` and `Ord`
for `ConstAllocation`, so I changed it to build on `T: PartialOrd`,
which is slightly more verbose but much more standard and avoided the
problems.
2022-03-02 07:15:04 +11:00
|
|
|
use rustc_data_structures::intern::Interned;
|
2021-06-30 15:38:31 -04:00
|
|
|
use rustc_span::DUMMY_SP;
|
2020-03-31 18:16:47 +02:00
|
|
|
use rustc_target::abi::{Align, HasDataLayout, Size};
|
2020-03-21 13:49:02 +01:00
|
|
|
|
2018-11-12 13:26:53 +01:00
|
|
|
use super::{
|
2021-07-18 11:15:17 +02:00
|
|
|
read_target_uint, write_target_uint, AllocId, InterpError, InterpResult, Pointer, Provenance,
|
2022-08-01 19:05:20 -04:00
|
|
|
ResourceExhaustionInfo, Scalar, ScalarSizeMismatch, UndefinedBehaviorInfo, UninitBytesAccess,
|
|
|
|
|
UnsupportedOpInfo,
|
2018-11-12 13:26:53 +01:00
|
|
|
};
|
2021-06-30 15:38:31 -04:00
|
|
|
use crate::ty;
|
2022-11-06 13:44:50 +01:00
|
|
|
use init_mask::*;
|
2022-11-06 13:00:09 +01:00
|
|
|
use provenance_map::*;
|
2018-11-04 15:14:54 +01:00
|
|
|
|
2022-11-06 13:44:50 +01:00
|
|
|
pub use init_mask::{InitChunk, InitChunkIter};
|
|
|
|
|
|
2021-05-16 18:53:20 +02:00
|
|
|
/// This type represents an Allocation in the Miri/CTFE core engine.
|
|
|
|
|
///
|
|
|
|
|
/// Its public API is rather low-level, working directly with allocation offsets and a custom error
|
|
|
|
|
/// type to account for the lack of an AllocId on this level. The Miri/CTFE core engine `memory`
|
|
|
|
|
/// module provides higher-level access.
|
2022-06-16 20:04:52 +02:00
|
|
|
// Note: for performance reasons when interning, some of the `Allocation` fields can be partially
|
|
|
|
|
// hashed. (see the `Hash` impl below for more details), so the impl is not derived.
|
2022-11-06 13:14:55 +01:00
|
|
|
#[derive(Clone, Eq, PartialEq, PartialOrd, Ord, TyEncodable, TyDecodable)]
|
2020-03-23 14:48:59 +00:00
|
|
|
#[derive(HashStable)]
|
2022-11-14 13:37:08 +01:00
|
|
|
pub struct Allocation<Prov: Provenance = AllocId, Extra = ()> {
|
2018-10-23 17:54:20 +02:00
|
|
|
/// The actual bytes of the allocation.
|
2019-07-13 04:00:06 +02:00
|
|
|
/// Note that the bytes of a pointer represent the offset of the pointer.
|
2021-08-05 19:52:08 +02:00
|
|
|
bytes: Box<[u8]>,
|
2022-08-27 14:11:19 -04:00
|
|
|
/// Maps from byte addresses to extra provenance data for each pointer.
|
2018-10-23 17:54:20 +02:00
|
|
|
/// Only the first byte of a pointer is inserted into the map; i.e.,
|
|
|
|
|
/// every entry in this map applies to `pointer_size` consecutive bytes starting
|
|
|
|
|
/// at the given offset.
|
2022-08-27 14:11:19 -04:00
|
|
|
provenance: ProvenanceMap<Prov>,
|
2019-07-13 04:00:06 +02:00
|
|
|
/// Denotes which part of this allocation is initialized.
|
2020-04-22 03:20:40 -04:00
|
|
|
init_mask: InitMask,
|
2018-10-23 17:54:20 +02:00
|
|
|
/// The alignment of the allocation to detect unaligned reads.
|
2020-03-08 23:28:00 +01:00
|
|
|
/// (`Align` guarantees that this is a power of two.)
|
2018-09-09 01:16:45 +03:00
|
|
|
pub align: Align,
|
2019-09-06 03:57:44 +01:00
|
|
|
/// `true` if the allocation is mutable.
|
2018-10-23 17:54:20 +02:00
|
|
|
/// Also used by codegen to determine if a static should be put into mutable memory,
|
|
|
|
|
/// which happens for `static mut` and `static` with interior mutability.
|
|
|
|
|
pub mutability: Mutability,
|
|
|
|
|
/// Extra state for the machine.
|
|
|
|
|
pub extra: Extra,
|
|
|
|
|
}
|
|
|
|
|
|
2022-06-16 20:04:52 +02:00
|
|
|
/// This is the maximum size we will hash at a time, when interning an `Allocation` and its
|
|
|
|
|
/// `InitMask`. Note, we hash that amount of bytes twice: at the start, and at the end of a buffer.
|
|
|
|
|
/// Used when these two structures are large: we only partially hash the larger fields in that
|
2022-06-14 16:03:26 +02:00
|
|
|
/// situation. See the comment at the top of their respective `Hash` impl for more details.
|
|
|
|
|
const MAX_BYTES_TO_HASH: usize = 64;
|
|
|
|
|
|
2022-06-16 20:04:52 +02:00
|
|
|
/// This is the maximum size (in bytes) for which a buffer will be fully hashed, when interning.
|
2022-06-14 16:03:26 +02:00
|
|
|
/// Otherwise, it will be partially hashed in 2 slices, requiring at least 2 `MAX_BYTES_TO_HASH`
|
|
|
|
|
/// bytes.
|
|
|
|
|
const MAX_HASHED_BUFFER_LEN: usize = 2 * MAX_BYTES_TO_HASH;
|
|
|
|
|
|
|
|
|
|
// Const allocations are only hashed for interning. However, they can be large, making the hashing
|
|
|
|
|
// expensive especially since it uses `FxHash`: it's better suited to short keys, not potentially
|
|
|
|
|
// big buffers like the actual bytes of allocation. We can partially hash some fields when they're
|
|
|
|
|
// large.
|
|
|
|
|
impl hash::Hash for Allocation {
|
|
|
|
|
fn hash<H: hash::Hasher>(&self, state: &mut H) {
|
|
|
|
|
// Partially hash the `bytes` buffer when it is large. To limit collisions with common
|
|
|
|
|
// prefixes and suffixes, we hash the length and some slices of the buffer.
|
|
|
|
|
let byte_count = self.bytes.len();
|
|
|
|
|
if byte_count > MAX_HASHED_BUFFER_LEN {
|
|
|
|
|
// Hash the buffer's length.
|
|
|
|
|
byte_count.hash(state);
|
|
|
|
|
|
|
|
|
|
// And its head and tail.
|
|
|
|
|
self.bytes[..MAX_BYTES_TO_HASH].hash(state);
|
|
|
|
|
self.bytes[byte_count - MAX_BYTES_TO_HASH..].hash(state);
|
|
|
|
|
} else {
|
|
|
|
|
self.bytes.hash(state);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Hash the other fields as usual.
|
2022-08-27 14:11:19 -04:00
|
|
|
self.provenance.hash(state);
|
2022-06-14 16:03:26 +02:00
|
|
|
self.init_mask.hash(state);
|
|
|
|
|
self.align.hash(state);
|
|
|
|
|
self.mutability.hash(state);
|
|
|
|
|
self.extra.hash(state);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
Introduce `ConstAllocation`.
Currently some `Allocation`s are interned, some are not, and it's very
hard to tell at a use point which is which.
This commit introduces `ConstAllocation` for the known-interned ones,
which makes the division much clearer. `ConstAllocation::inner()` is
used to get the underlying `Allocation`.
In some places it's natural to use an `Allocation`, in some it's natural
to use a `ConstAllocation`, and in some places there's no clear choice.
I've tried to make things look as nice as possible, while generally
favouring `ConstAllocation`, which is the type that embodies more
information. This does require quite a few calls to `inner()`.
The commit also tweaks how `PartialOrd` works for `Interned`. The
previous code was too clever by half, building on `T: Ord` to make the
code shorter. That caused problems with deriving `PartialOrd` and `Ord`
for `ConstAllocation`, so I changed it to build on `T: PartialOrd`,
which is slightly more verbose but much more standard and avoided the
problems.
2022-03-02 07:15:04 +11:00
|
|
|
/// Interned types generally have an `Outer` type and an `Inner` type, where
|
|
|
|
|
/// `Outer` is a newtype around `Interned<Inner>`, and all the operations are
|
|
|
|
|
/// done on `Outer`, because all occurrences are interned. E.g. `Ty` is an
|
|
|
|
|
/// outer type and `TyS` is its inner type.
|
|
|
|
|
///
|
|
|
|
|
/// Here things are different because only const allocations are interned. This
|
|
|
|
|
/// means that both the inner type (`Allocation`) and the outer type
|
|
|
|
|
/// (`ConstAllocation`) are used quite a bit.
|
|
|
|
|
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, HashStable)]
|
2022-04-05 22:42:23 +02:00
|
|
|
#[rustc_pass_by_value]
|
2022-11-14 13:37:08 +01:00
|
|
|
pub struct ConstAllocation<'tcx>(pub Interned<'tcx, Allocation>);
|
Introduce `ConstAllocation`.
Currently some `Allocation`s are interned, some are not, and it's very
hard to tell at a use point which is which.
This commit introduces `ConstAllocation` for the known-interned ones,
which makes the division much clearer. `ConstAllocation::inner()` is
used to get the underlying `Allocation`.
In some places it's natural to use an `Allocation`, in some it's natural
to use a `ConstAllocation`, and in some places there's no clear choice.
I've tried to make things look as nice as possible, while generally
favouring `ConstAllocation`, which is the type that embodies more
information. This does require quite a few calls to `inner()`.
The commit also tweaks how `PartialOrd` works for `Interned`. The
previous code was too clever by half, building on `T: Ord` to make the
code shorter. That caused problems with deriving `PartialOrd` and `Ord`
for `ConstAllocation`, so I changed it to build on `T: PartialOrd`,
which is slightly more verbose but much more standard and avoided the
problems.
2022-03-02 07:15:04 +11:00
|
|
|
|
|
|
|
|
impl<'tcx> fmt::Debug for ConstAllocation<'tcx> {
|
|
|
|
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
2022-11-06 13:44:50 +01:00
|
|
|
// The debug representation of this is very verbose and basically useless,
|
2022-11-06 13:14:55 +01:00
|
|
|
// so don't print it.
|
|
|
|
|
write!(f, "ConstAllocation {{ .. }}")
|
Introduce `ConstAllocation`.
Currently some `Allocation`s are interned, some are not, and it's very
hard to tell at a use point which is which.
This commit introduces `ConstAllocation` for the known-interned ones,
which makes the division much clearer. `ConstAllocation::inner()` is
used to get the underlying `Allocation`.
In some places it's natural to use an `Allocation`, in some it's natural
to use a `ConstAllocation`, and in some places there's no clear choice.
I've tried to make things look as nice as possible, while generally
favouring `ConstAllocation`, which is the type that embodies more
information. This does require quite a few calls to `inner()`.
The commit also tweaks how `PartialOrd` works for `Interned`. The
previous code was too clever by half, building on `T: Ord` to make the
code shorter. That caused problems with deriving `PartialOrd` and `Ord`
for `ConstAllocation`, so I changed it to build on `T: PartialOrd`,
which is slightly more verbose but much more standard and avoided the
problems.
2022-03-02 07:15:04 +11:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-11-14 13:37:08 +01:00
|
|
|
impl<'tcx> ConstAllocation<'tcx> {
|
|
|
|
|
pub fn inner(self) -> &'tcx Allocation {
|
Introduce `ConstAllocation`.
Currently some `Allocation`s are interned, some are not, and it's very
hard to tell at a use point which is which.
This commit introduces `ConstAllocation` for the known-interned ones,
which makes the division much clearer. `ConstAllocation::inner()` is
used to get the underlying `Allocation`.
In some places it's natural to use an `Allocation`, in some it's natural
to use a `ConstAllocation`, and in some places there's no clear choice.
I've tried to make things look as nice as possible, while generally
favouring `ConstAllocation`, which is the type that embodies more
information. This does require quite a few calls to `inner()`.
The commit also tweaks how `PartialOrd` works for `Interned`. The
previous code was too clever by half, building on `T: Ord` to make the
code shorter. That caused problems with deriving `PartialOrd` and `Ord`
for `ConstAllocation`, so I changed it to build on `T: PartialOrd`,
which is slightly more verbose but much more standard and avoided the
problems.
2022-03-02 07:15:04 +11:00
|
|
|
self.0.0
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-05-16 18:53:20 +02:00
|
|
|
/// We have our own error type that does not know about the `AllocId`; that information
|
|
|
|
|
/// is added when converting to `InterpError`.
|
|
|
|
|
#[derive(Debug)]
|
|
|
|
|
pub enum AllocError {
|
2022-04-07 16:22:09 -04:00
|
|
|
/// A scalar had the wrong size.
|
|
|
|
|
ScalarSizeMismatch(ScalarSizeMismatch),
|
2021-05-16 18:53:20 +02:00
|
|
|
/// Encountered a pointer where we needed raw bytes.
|
|
|
|
|
ReadPointerAsBytes,
|
2021-07-18 11:15:17 +02:00
|
|
|
/// Partially overwriting a pointer.
|
|
|
|
|
PartialPointerOverwrite(Size),
|
2022-08-27 14:54:02 -04:00
|
|
|
/// Partially copying a pointer.
|
|
|
|
|
PartialPointerCopy(Size),
|
2021-05-16 18:53:20 +02:00
|
|
|
/// Using uninitialized data where it is not allowed.
|
|
|
|
|
InvalidUninitBytes(Option<UninitBytesAccess>),
|
|
|
|
|
}
|
|
|
|
|
pub type AllocResult<T = ()> = Result<T, AllocError>;
|
2018-11-14 16:00:52 +01:00
|
|
|
|
2022-04-07 16:22:09 -04:00
|
|
|
impl From<ScalarSizeMismatch> for AllocError {
|
|
|
|
|
fn from(s: ScalarSizeMismatch) -> Self {
|
|
|
|
|
AllocError::ScalarSizeMismatch(s)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-05-16 18:53:20 +02:00
|
|
|
impl AllocError {
|
|
|
|
|
pub fn to_interp_error<'tcx>(self, alloc_id: AllocId) -> InterpError<'tcx> {
|
2021-07-18 11:15:17 +02:00
|
|
|
use AllocError::*;
|
2021-05-16 18:53:20 +02:00
|
|
|
match self {
|
2022-04-07 16:22:09 -04:00
|
|
|
ScalarSizeMismatch(s) => {
|
|
|
|
|
InterpError::UndefinedBehavior(UndefinedBehaviorInfo::ScalarSizeMismatch(s))
|
|
|
|
|
}
|
2021-07-18 11:15:17 +02:00
|
|
|
ReadPointerAsBytes => InterpError::Unsupported(UnsupportedOpInfo::ReadPointerAsBytes),
|
|
|
|
|
PartialPointerOverwrite(offset) => InterpError::Unsupported(
|
|
|
|
|
UnsupportedOpInfo::PartialPointerOverwrite(Pointer::new(alloc_id, offset)),
|
|
|
|
|
),
|
2022-08-27 14:54:02 -04:00
|
|
|
PartialPointerCopy(offset) => InterpError::Unsupported(
|
|
|
|
|
UnsupportedOpInfo::PartialPointerCopy(Pointer::new(alloc_id, offset)),
|
|
|
|
|
),
|
2021-07-18 11:15:17 +02:00
|
|
|
InvalidUninitBytes(info) => InterpError::UndefinedBehavior(
|
2021-05-16 18:53:20 +02:00
|
|
|
UndefinedBehaviorInfo::InvalidUninitBytes(info.map(|b| (alloc_id, b))),
|
|
|
|
|
),
|
|
|
|
|
}
|
2018-11-14 16:00:52 +01:00
|
|
|
}
|
2021-05-16 18:53:20 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// The information that makes up a memory access: offset and size.
|
2022-07-02 10:53:34 -04:00
|
|
|
#[derive(Copy, Clone)]
|
2021-05-16 18:53:20 +02:00
|
|
|
pub struct AllocRange {
|
|
|
|
|
pub start: Size,
|
|
|
|
|
pub size: Size,
|
|
|
|
|
}
|
|
|
|
|
|
2022-07-02 10:53:34 -04:00
|
|
|
impl fmt::Debug for AllocRange {
|
|
|
|
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
|
|
|
write!(f, "[{:#x}..{:#x}]", self.start.bytes(), self.end().bytes())
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-05-16 18:53:20 +02:00
|
|
|
/// Free-starting constructor for less syntactic overhead.
|
|
|
|
|
#[inline(always)]
|
|
|
|
|
pub fn alloc_range(start: Size, size: Size) -> AllocRange {
|
|
|
|
|
AllocRange { start, size }
|
|
|
|
|
}
|
2018-11-14 16:00:52 +01:00
|
|
|
|
2022-11-06 13:44:50 +01:00
|
|
|
impl From<Range<Size>> for AllocRange {
|
2022-07-06 10:52:20 -04:00
|
|
|
#[inline]
|
2022-11-06 13:44:50 +01:00
|
|
|
fn from(r: Range<Size>) -> Self {
|
2022-07-06 10:52:20 -04:00
|
|
|
alloc_range(r.start, r.end - r.start) // `Size` subtraction (overflow-checked)
|
|
|
|
|
}
|
2022-11-06 13:44:50 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl From<Range<usize>> for AllocRange {
|
|
|
|
|
#[inline]
|
|
|
|
|
fn from(r: Range<usize>) -> Self {
|
|
|
|
|
AllocRange::from(Size::from_bytes(r.start)..Size::from_bytes(r.end))
|
|
|
|
|
}
|
|
|
|
|
}
|
2022-07-06 10:52:20 -04:00
|
|
|
|
2022-11-06 13:44:50 +01:00
|
|
|
impl AllocRange {
|
2018-11-14 16:00:52 +01:00
|
|
|
#[inline(always)]
|
2021-05-16 18:53:20 +02:00
|
|
|
pub fn end(self) -> Size {
|
|
|
|
|
self.start + self.size // This does overflow checking.
|
2018-11-14 16:00:52 +01:00
|
|
|
}
|
|
|
|
|
|
2021-05-16 18:53:20 +02:00
|
|
|
/// Returns the `subrange` within this range; panics if it is not a subrange.
|
|
|
|
|
#[inline]
|
|
|
|
|
pub fn subrange(self, subrange: AllocRange) -> AllocRange {
|
|
|
|
|
let sub_start = self.start + subrange.start;
|
|
|
|
|
let range = alloc_range(sub_start, subrange.size);
|
|
|
|
|
assert!(range.end() <= self.end(), "access outside the bounds for given AllocRange");
|
|
|
|
|
range
|
2018-11-14 16:00:52 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-05-28 10:44:46 +02:00
|
|
|
// The constructors are all without extra; the extra gets added by a machine hook later.
|
2022-11-14 13:37:08 +01:00
|
|
|
impl<Prov: Provenance> Allocation<Prov> {
|
2021-05-23 13:26:51 +02:00
|
|
|
/// Creates an allocation initialized by the given bytes
|
2021-05-23 12:03:39 +02:00
|
|
|
pub fn from_bytes<'a>(
|
|
|
|
|
slice: impl Into<Cow<'a, [u8]>>,
|
|
|
|
|
align: Align,
|
|
|
|
|
mutability: Mutability,
|
|
|
|
|
) -> Self {
|
2021-08-05 19:52:08 +02:00
|
|
|
let bytes = Box::<[u8]>::from(slice.into());
|
2020-03-22 17:48:11 +01:00
|
|
|
let size = Size::from_bytes(bytes.len());
|
2018-11-14 16:00:52 +01:00
|
|
|
Self {
|
2019-04-22 13:53:52 +02:00
|
|
|
bytes,
|
2022-08-27 14:11:19 -04:00
|
|
|
provenance: ProvenanceMap::new(),
|
2020-04-22 03:20:40 -04:00
|
|
|
init_mask: InitMask::new(size, true),
|
2018-11-14 16:00:52 +01:00
|
|
|
align,
|
2021-05-23 12:03:39 +02:00
|
|
|
mutability,
|
2019-05-28 10:44:46 +02:00
|
|
|
extra: (),
|
2018-11-14 16:00:52 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-05-23 12:03:39 +02:00
|
|
|
pub fn from_bytes_byte_aligned_immutable<'a>(slice: impl Into<Cow<'a, [u8]>>) -> Self {
|
|
|
|
|
Allocation::from_bytes(slice, Align::ONE, Mutability::Not)
|
2018-11-14 16:00:52 +01:00
|
|
|
}
|
|
|
|
|
|
2021-06-12 19:49:48 -04:00
|
|
|
/// Try to create an Allocation of `size` bytes, failing if there is not enough memory
|
|
|
|
|
/// available to the compiler to do so.
|
2022-07-04 11:46:10 -04:00
|
|
|
///
|
|
|
|
|
/// If `panic_on_fail` is true, this will never return `Err`.
|
2022-05-22 12:48:19 -07:00
|
|
|
pub fn uninit<'tcx>(size: Size, align: Align, panic_on_fail: bool) -> InterpResult<'tcx, Self> {
|
2021-08-05 00:24:31 +02:00
|
|
|
let bytes = Box::<[u8]>::try_new_zeroed_slice(size.bytes_usize()).map_err(|_| {
|
2021-06-19 10:49:06 -04:00
|
|
|
// This results in an error that can happen non-deterministically, since the memory
|
|
|
|
|
// available to the compiler can change between runs. Normally queries are always
|
2022-03-30 01:39:38 -04:00
|
|
|
// deterministic. However, we can be non-deterministic here because all uses of const
|
2021-06-30 12:42:04 -04:00
|
|
|
// evaluation (including ConstProp!) will make compilation fail (via hard error
|
|
|
|
|
// or ICE) upon encountering a `MemoryExhausted` error.
|
2021-07-02 16:06:12 -04:00
|
|
|
if panic_on_fail {
|
|
|
|
|
panic!("Allocation::uninit called with panic_on_fail had allocation failure")
|
|
|
|
|
}
|
2021-06-30 15:38:31 -04:00
|
|
|
ty::tls::with(|tcx| {
|
2022-03-30 01:42:10 -04:00
|
|
|
tcx.sess.delay_span_bug(DUMMY_SP, "exhausted memory during interpretation")
|
2021-06-30 15:38:31 -04:00
|
|
|
});
|
2021-06-12 19:49:48 -04:00
|
|
|
InterpError::ResourceExhaustion(ResourceExhaustionInfo::MemoryExhausted)
|
|
|
|
|
})?;
|
2021-08-05 19:52:08 +02:00
|
|
|
// SAFETY: the box was zero-allocated, which is a valid initial value for Box<[u8]>
|
|
|
|
|
let bytes = unsafe { bytes.assume_init() };
|
2021-06-12 19:49:48 -04:00
|
|
|
Ok(Allocation {
|
2021-06-18 15:17:13 -04:00
|
|
|
bytes,
|
2022-08-27 14:11:19 -04:00
|
|
|
provenance: ProvenanceMap::new(),
|
2020-04-22 03:20:40 -04:00
|
|
|
init_mask: InitMask::new(size, false),
|
2018-11-14 16:00:52 +01:00
|
|
|
align,
|
2019-12-16 17:28:40 +01:00
|
|
|
mutability: Mutability::Mut,
|
2019-05-28 10:44:46 +02:00
|
|
|
extra: (),
|
2021-06-12 19:49:48 -04:00
|
|
|
})
|
2018-11-14 16:00:52 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-07-12 18:22:15 +02:00
|
|
|
impl Allocation {
|
2022-07-18 18:47:31 -04:00
|
|
|
/// Adjust allocation from the ones in tcx to a custom Machine instance
|
|
|
|
|
/// with a different Provenance and Extra type.
|
2022-11-14 13:37:08 +01:00
|
|
|
pub fn adjust_from_tcx<Prov: Provenance, Extra, Err>(
|
2019-09-06 11:10:53 +02:00
|
|
|
self,
|
2021-07-15 18:03:22 +02:00
|
|
|
cx: &impl HasDataLayout,
|
2021-07-12 18:22:15 +02:00
|
|
|
extra: Extra,
|
2022-07-18 18:47:31 -04:00
|
|
|
mut adjust_ptr: impl FnMut(Pointer<AllocId>) -> Result<Pointer<Prov>, Err>,
|
|
|
|
|
) -> Result<Allocation<Prov, Extra>, Err> {
|
|
|
|
|
// Compute new pointer provenance, which also adjusts the bytes.
|
2021-07-15 18:03:22 +02:00
|
|
|
let mut bytes = self.bytes;
|
2022-11-06 13:00:09 +01:00
|
|
|
let mut new_provenance = Vec::with_capacity(self.provenance.ptrs().len());
|
2021-07-15 18:03:22 +02:00
|
|
|
let ptr_size = cx.data_layout().pointer_size.bytes_usize();
|
|
|
|
|
let endian = cx.data_layout().endian;
|
2022-11-06 13:00:09 +01:00
|
|
|
for &(offset, alloc_id) in self.provenance.ptrs().iter() {
|
2021-07-15 18:03:22 +02:00
|
|
|
let idx = offset.bytes_usize();
|
|
|
|
|
let ptr_bytes = &mut bytes[idx..idx + ptr_size];
|
|
|
|
|
let bits = read_target_uint(endian, ptr_bytes).unwrap();
|
2022-07-18 18:47:31 -04:00
|
|
|
let (ptr_prov, ptr_offset) =
|
|
|
|
|
adjust_ptr(Pointer::new(alloc_id, Size::from_bytes(bits)))?.into_parts();
|
2021-07-15 18:03:22 +02:00
|
|
|
write_target_uint(endian, ptr_bytes, ptr_offset.bytes().into()).unwrap();
|
2022-08-27 14:11:19 -04:00
|
|
|
new_provenance.push((offset, ptr_prov));
|
2021-07-15 18:03:22 +02:00
|
|
|
}
|
|
|
|
|
// Create allocation.
|
2022-06-14 09:40:15 -07:00
|
|
|
Ok(Allocation {
|
2021-07-15 18:03:22 +02:00
|
|
|
bytes,
|
2022-11-06 13:00:09 +01:00
|
|
|
provenance: ProvenanceMap::from_presorted_ptrs(new_provenance),
|
2020-04-22 03:20:40 -04:00
|
|
|
init_mask: self.init_mask,
|
2019-09-06 11:10:53 +02:00
|
|
|
align: self.align,
|
|
|
|
|
mutability: self.mutability,
|
|
|
|
|
extra,
|
2022-06-14 09:40:15 -07:00
|
|
|
})
|
2019-09-06 11:10:53 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-07-13 04:00:06 +02:00
|
|
|
/// Raw accessors. Provide access to otherwise private bytes.
|
2022-11-14 13:37:08 +01:00
|
|
|
impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
|
2019-07-13 04:00:06 +02:00
|
|
|
pub fn len(&self) -> usize {
|
2021-05-17 13:30:16 +02:00
|
|
|
self.bytes.len()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pub fn size(&self) -> Size {
|
|
|
|
|
Size::from_bytes(self.len())
|
2019-07-13 04:00:06 +02:00
|
|
|
}
|
|
|
|
|
|
2022-08-27 14:11:19 -04:00
|
|
|
/// Looks at a slice which may contain uninitialized bytes or provenance. This differs
|
|
|
|
|
/// from `get_bytes_with_uninit_and_ptr` in that it does no provenance checks (even on the
|
2021-05-16 18:53:20 +02:00
|
|
|
/// edges) at all.
|
2019-07-13 04:00:06 +02:00
|
|
|
/// This must not be used for reads affecting the interpreter execution.
|
2020-08-08 07:53:47 -06:00
|
|
|
pub fn inspect_with_uninit_and_ptr_outside_interpreter(&self, range: Range<usize>) -> &[u8] {
|
2019-07-13 04:00:06 +02:00
|
|
|
&self.bytes[range]
|
|
|
|
|
}
|
2019-08-14 02:26:18 +02:00
|
|
|
|
2020-04-22 03:20:40 -04:00
|
|
|
/// Returns the mask indicating which bytes are initialized.
|
|
|
|
|
pub fn init_mask(&self) -> &InitMask {
|
|
|
|
|
&self.init_mask
|
2019-08-14 02:26:18 +02:00
|
|
|
}
|
2019-08-29 18:02:51 +02:00
|
|
|
|
2022-08-27 14:11:19 -04:00
|
|
|
/// Returns the provenance map.
|
|
|
|
|
pub fn provenance(&self) -> &ProvenanceMap<Prov> {
|
|
|
|
|
&self.provenance
|
2019-08-29 18:02:51 +02:00
|
|
|
}
|
2019-07-13 04:00:06 +02:00
|
|
|
}
|
|
|
|
|
|
2019-09-06 03:57:44 +01:00
|
|
|
/// Byte accessors.
|
2022-07-18 18:47:31 -04:00
|
|
|
impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
|
2022-06-02 20:30:29 -04:00
|
|
|
/// This is the entirely abstraction-violating way to just grab the raw bytes without
|
2022-08-27 14:54:02 -04:00
|
|
|
/// caring about provenance or initialization.
|
2018-11-13 14:55:18 +01:00
|
|
|
///
|
|
|
|
|
/// This function also guarantees that the resulting pointer will remain stable
|
2022-04-17 19:27:41 -04:00
|
|
|
/// even when new allocations are pushed to the `HashMap`. `mem_copy_repeatedly` relies
|
2018-11-13 14:55:18 +01:00
|
|
|
/// on that.
|
2022-08-27 14:54:02 -04:00
|
|
|
#[inline]
|
|
|
|
|
pub fn get_bytes_unchecked(&self, range: AllocRange) -> &[u8] {
|
|
|
|
|
&self.bytes[range.start.bytes_usize()..range.end().bytes_usize()]
|
2018-11-13 14:55:18 +01:00
|
|
|
}
|
|
|
|
|
|
2022-08-27 14:54:02 -04:00
|
|
|
/// Checks that these bytes are initialized, and then strip provenance (if possible) and return
|
|
|
|
|
/// them.
|
2019-06-23 14:26:36 +02:00
|
|
|
///
|
2019-06-24 14:28:16 +02:00
|
|
|
/// It is the caller's responsibility to check bounds and alignment beforehand.
|
2019-10-20 14:57:21 +02:00
|
|
|
/// Most likely, you want to use the `PlaceTy` and `OperandTy`-based methods
|
|
|
|
|
/// on `InterpCx` instead.
|
2018-11-13 14:55:18 +01:00
|
|
|
#[inline]
|
2022-08-27 14:54:02 -04:00
|
|
|
pub fn get_bytes_strip_provenance(
|
2018-11-13 14:55:18 +01:00
|
|
|
&self,
|
|
|
|
|
cx: &impl HasDataLayout,
|
2021-05-16 18:53:20 +02:00
|
|
|
range: AllocRange,
|
|
|
|
|
) -> AllocResult<&[u8]> {
|
2022-11-06 13:44:50 +01:00
|
|
|
self.init_mask.is_range_initialized(range).map_err(|uninit_range| {
|
|
|
|
|
AllocError::InvalidUninitBytes(Some(UninitBytesAccess {
|
|
|
|
|
access: range,
|
|
|
|
|
uninit: uninit_range,
|
|
|
|
|
}))
|
|
|
|
|
})?;
|
2022-08-27 14:54:02 -04:00
|
|
|
if !Prov::OFFSET_IS_ADDR {
|
2022-11-06 13:00:09 +01:00
|
|
|
if !self.provenance.range_empty(range, cx) {
|
2022-08-27 14:54:02 -04:00
|
|
|
return Err(AllocError::ReadPointerAsBytes);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
Ok(self.get_bytes_unchecked(range))
|
2018-11-13 14:55:18 +01:00
|
|
|
}
|
|
|
|
|
|
2022-08-27 14:11:19 -04:00
|
|
|
/// Just calling this already marks everything as defined and removes provenance,
|
2018-11-13 14:55:18 +01:00
|
|
|
/// so be sure to actually put data there!
|
2019-06-23 14:26:36 +02:00
|
|
|
///
|
2019-06-24 14:28:16 +02:00
|
|
|
/// It is the caller's responsibility to check bounds and alignment beforehand.
|
2019-10-20 14:57:21 +02:00
|
|
|
/// Most likely, you want to use the `PlaceTy` and `OperandTy`-based methods
|
|
|
|
|
/// on `InterpCx` instead.
|
2021-07-18 11:15:17 +02:00
|
|
|
pub fn get_bytes_mut(
|
|
|
|
|
&mut self,
|
|
|
|
|
cx: &impl HasDataLayout,
|
|
|
|
|
range: AllocRange,
|
|
|
|
|
) -> AllocResult<&mut [u8]> {
|
2021-05-16 18:53:20 +02:00
|
|
|
self.mark_init(range, true);
|
2022-11-06 13:00:09 +01:00
|
|
|
self.provenance.clear(range, cx)?;
|
2018-11-13 14:55:18 +01:00
|
|
|
|
2021-07-18 11:15:17 +02:00
|
|
|
Ok(&mut self.bytes[range.start.bytes_usize()..range.end().bytes_usize()])
|
2021-05-16 18:53:20 +02:00
|
|
|
}
|
2018-11-13 14:55:18 +01:00
|
|
|
|
2021-05-16 18:53:20 +02:00
|
|
|
/// A raw pointer variant of `get_bytes_mut` that avoids invalidating existing aliases into this memory.
|
2021-07-18 11:15:17 +02:00
|
|
|
pub fn get_bytes_mut_ptr(
|
|
|
|
|
&mut self,
|
|
|
|
|
cx: &impl HasDataLayout,
|
|
|
|
|
range: AllocRange,
|
|
|
|
|
) -> AllocResult<*mut [u8]> {
|
2021-05-16 18:53:20 +02:00
|
|
|
self.mark_init(range, true);
|
2022-11-06 13:00:09 +01:00
|
|
|
self.provenance.clear(range, cx)?;
|
2018-11-13 14:55:18 +01:00
|
|
|
|
2021-05-16 18:53:20 +02:00
|
|
|
assert!(range.end().bytes_usize() <= self.bytes.len()); // need to do our own bounds-check
|
|
|
|
|
let begin_ptr = self.bytes.as_mut_ptr().wrapping_add(range.start.bytes_usize());
|
|
|
|
|
let len = range.end().bytes_usize() - range.start.bytes_usize();
|
2021-07-18 11:15:17 +02:00
|
|
|
Ok(ptr::slice_from_raw_parts_mut(begin_ptr, len))
|
2018-11-13 14:55:18 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-09-06 03:57:44 +01:00
|
|
|
/// Reading and writing.
|
2022-07-18 18:47:31 -04:00
|
|
|
impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
|
2022-11-06 13:44:50 +01:00
|
|
|
/// Sets the init bit for the given range.
|
|
|
|
|
fn mark_init(&mut self, range: AllocRange, is_init: bool) {
|
|
|
|
|
if range.size.bytes() == 0 {
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
assert!(self.mutability == Mutability::Mut);
|
|
|
|
|
self.init_mask.set_range(range, is_init);
|
|
|
|
|
}
|
|
|
|
|
|
2019-09-06 03:57:44 +01:00
|
|
|
/// Reads a *non-ZST* scalar.
|
2018-11-15 14:48:34 +01:00
|
|
|
///
|
2022-06-02 20:30:29 -04:00
|
|
|
/// If `read_provenance` is `true`, this will also read provenance; otherwise (if the machine
|
|
|
|
|
/// supports that) provenance is entirely ignored.
|
|
|
|
|
///
|
2020-08-20 05:32:14 -07:00
|
|
|
/// ZSTs can't be read because in order to obtain a `Pointer`, we need to check
|
|
|
|
|
/// for ZSTness anyway due to integer pointers being valid for ZSTs.
|
2018-11-16 16:25:15 +01:00
|
|
|
///
|
2019-06-24 14:28:16 +02:00
|
|
|
/// It is the caller's responsibility to check bounds and alignment beforehand.
|
2019-10-20 14:57:21 +02:00
|
|
|
/// Most likely, you want to call `InterpCx::read_scalar` instead of this method.
|
2019-04-15 10:05:13 +02:00
|
|
|
pub fn read_scalar(
|
2018-11-12 09:00:41 +01:00
|
|
|
&self,
|
2018-11-12 13:26:53 +01:00
|
|
|
cx: &impl HasDataLayout,
|
2021-05-16 18:53:20 +02:00
|
|
|
range: AllocRange,
|
2022-06-02 20:30:29 -04:00
|
|
|
read_provenance: bool,
|
2022-08-01 19:05:20 -04:00
|
|
|
) -> AllocResult<Scalar<Prov>> {
|
2022-06-02 20:30:29 -04:00
|
|
|
// First and foremost, if anything is uninit, bail.
|
2022-11-06 13:44:50 +01:00
|
|
|
if self.init_mask.is_range_initialized(range).is_err() {
|
2022-08-01 19:05:20 -04:00
|
|
|
return Err(AllocError::InvalidUninitBytes(None));
|
2018-11-12 09:00:41 +01:00
|
|
|
}
|
2022-06-02 20:30:29 -04:00
|
|
|
|
2022-08-27 14:54:02 -04:00
|
|
|
// Get the integer part of the result. We HAVE TO check provenance before returning this!
|
|
|
|
|
let bytes = self.get_bytes_unchecked(range);
|
|
|
|
|
let bits = read_target_uint(cx.data_layout().endian, bytes).unwrap();
|
2022-06-02 20:30:29 -04:00
|
|
|
|
2022-08-27 14:54:02 -04:00
|
|
|
if read_provenance {
|
|
|
|
|
assert_eq!(range.size, cx.data_layout().pointer_size);
|
|
|
|
|
|
|
|
|
|
// When reading data with provenance, the easy case is finding provenance exactly where we
|
|
|
|
|
// are reading, then we can put data and provenance back together and return that.
|
2022-11-06 13:00:09 +01:00
|
|
|
if let Some(prov) = self.provenance.get_ptr(range.start) {
|
2022-08-27 14:54:02 -04:00
|
|
|
// Now we can return the bits, with their appropriate provenance.
|
|
|
|
|
let ptr = Pointer::new(prov, Size::from_bytes(bits));
|
|
|
|
|
return Ok(Scalar::from_pointer(ptr, cx));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// If we can work on pointers byte-wise, join the byte-wise provenances.
|
|
|
|
|
if Prov::OFFSET_IS_ADDR {
|
2022-11-06 13:00:09 +01:00
|
|
|
let mut prov = self.provenance.get(range.start, cx);
|
|
|
|
|
for offset in Size::from_bytes(1)..range.size {
|
|
|
|
|
let this_prov = self.provenance.get(range.start + offset, cx);
|
2022-08-27 14:54:02 -04:00
|
|
|
prov = Prov::join(prov, this_prov);
|
|
|
|
|
}
|
|
|
|
|
// Now use this provenance.
|
|
|
|
|
let ptr = Pointer::new(prov, Size::from_bytes(bits));
|
|
|
|
|
return Ok(Scalar::from_maybe_pointer(ptr, cx));
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
// We are *not* reading a pointer.
|
|
|
|
|
// If we can just ignore provenance, do exactly that.
|
|
|
|
|
if Prov::OFFSET_IS_ADDR {
|
|
|
|
|
// We just strip provenance.
|
|
|
|
|
return Ok(Scalar::from_uint(bits, range.size));
|
|
|
|
|
}
|
2022-06-02 20:30:29 -04:00
|
|
|
}
|
|
|
|
|
|
2022-08-27 14:54:02 -04:00
|
|
|
// Fallback path for when we cannot treat provenance bytewise or ignore it.
|
|
|
|
|
assert!(!Prov::OFFSET_IS_ADDR);
|
2022-11-06 13:00:09 +01:00
|
|
|
if !self.provenance.range_empty(range, cx) {
|
2022-08-27 14:54:02 -04:00
|
|
|
return Err(AllocError::ReadPointerAsBytes);
|
|
|
|
|
}
|
|
|
|
|
// There is no provenance, we can just return the bits.
|
2022-08-01 19:05:20 -04:00
|
|
|
Ok(Scalar::from_uint(bits, range.size))
|
2018-11-12 09:00:41 +01:00
|
|
|
}
|
|
|
|
|
|
2019-09-06 03:57:44 +01:00
|
|
|
/// Writes a *non-ZST* scalar.
|
2018-11-15 14:48:34 +01:00
|
|
|
///
|
2020-08-20 05:32:14 -07:00
|
|
|
/// ZSTs can't be read because in order to obtain a `Pointer`, we need to check
|
|
|
|
|
/// for ZSTness anyway due to integer pointers being valid for ZSTs.
|
2018-11-16 16:25:15 +01:00
|
|
|
///
|
2019-06-24 14:28:16 +02:00
|
|
|
/// It is the caller's responsibility to check bounds and alignment beforehand.
|
2019-10-20 14:57:21 +02:00
|
|
|
/// Most likely, you want to call `InterpCx::write_scalar` instead of this method.
|
2019-04-15 10:05:13 +02:00
|
|
|
pub fn write_scalar(
|
2018-11-12 09:00:41 +01:00
|
|
|
&mut self,
|
2018-11-12 13:26:53 +01:00
|
|
|
cx: &impl HasDataLayout,
|
2021-05-16 18:53:20 +02:00
|
|
|
range: AllocRange,
|
2022-08-01 19:05:20 -04:00
|
|
|
val: Scalar<Prov>,
|
2021-05-16 18:53:20 +02:00
|
|
|
) -> AllocResult {
|
2021-06-28 09:19:36 +02:00
|
|
|
assert!(self.mutability == Mutability::Mut);
|
|
|
|
|
|
2021-07-16 09:39:35 +02:00
|
|
|
// `to_bits_or_ptr_internal` is the right method because we just want to store this data
|
|
|
|
|
// as-is into memory.
|
2022-04-07 16:22:09 -04:00
|
|
|
let (bytes, provenance) = match val.to_bits_or_ptr_internal(range.size)? {
|
2021-07-12 18:22:15 +02:00
|
|
|
Err(val) => {
|
|
|
|
|
let (provenance, offset) = val.into_parts();
|
|
|
|
|
(u128::from(offset.bytes()), Some(provenance))
|
|
|
|
|
}
|
|
|
|
|
Ok(data) => (data, None),
|
2018-11-12 09:00:41 +01:00
|
|
|
};
|
|
|
|
|
|
2018-11-25 11:23:21 +01:00
|
|
|
let endian = cx.data_layout().endian;
|
2021-07-18 11:15:17 +02:00
|
|
|
let dst = self.get_bytes_mut(cx, range)?;
|
2018-11-25 11:23:21 +01:00
|
|
|
write_target_uint(endian, dst, bytes).unwrap();
|
2018-11-12 09:00:41 +01:00
|
|
|
|
2022-08-27 14:11:19 -04:00
|
|
|
// See if we have to also store some provenance.
|
2021-07-12 18:22:15 +02:00
|
|
|
if let Some(provenance) = provenance {
|
2022-11-06 13:00:09 +01:00
|
|
|
assert_eq!(range.size, cx.data_layout().pointer_size);
|
|
|
|
|
self.provenance.insert_ptr(range.start, provenance, cx);
|
2018-11-12 09:00:41 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
2022-04-17 19:16:54 -04:00
|
|
|
|
|
|
|
|
/// Write "uninit" to the given memory range.
|
|
|
|
|
pub fn write_uninit(&mut self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult {
|
|
|
|
|
self.mark_init(range, false);
|
2022-11-06 13:00:09 +01:00
|
|
|
self.provenance.clear(range, cx)?;
|
2022-04-17 19:16:54 -04:00
|
|
|
return Ok(());
|
|
|
|
|
}
|
2019-08-30 04:17:18 +02:00
|
|
|
|
2022-11-06 13:44:50 +01:00
|
|
|
/// Applies a previously prepared provenance copy.
|
2022-11-06 13:00:09 +01:00
|
|
|
/// The affected range, as defined in the parameters to `provenance().prepare_copy` is expected
|
2022-08-27 14:11:19 -04:00
|
|
|
/// to be clear of provenance.
|
2022-04-17 19:27:41 -04:00
|
|
|
///
|
|
|
|
|
/// This is dangerous to use as it can violate internal `Allocation` invariants!
|
|
|
|
|
/// It only exists to support an efficient implementation of `mem_copy_repeatedly`.
|
2022-11-06 13:00:09 +01:00
|
|
|
pub fn provenance_apply_copy(&mut self, copy: ProvenanceCopy<Prov>) {
|
|
|
|
|
self.provenance.apply_copy(copy)
|
2019-08-30 04:17:18 +02:00
|
|
|
}
|
2021-03-31 00:06:01 -04:00
|
|
|
|
2022-11-06 13:44:50 +01:00
|
|
|
/// Applies a previously prepared copy of the init mask.
|
2022-04-17 19:27:41 -04:00
|
|
|
///
|
|
|
|
|
/// This is dangerous to use as it can violate internal `Allocation` invariants!
|
|
|
|
|
/// It only exists to support an efficient implementation of `mem_copy_repeatedly`.
|
2022-11-06 13:44:50 +01:00
|
|
|
pub fn init_mask_apply_copy(&mut self, copy: InitCopy, range: AllocRange, repeat: u64) {
|
|
|
|
|
self.init_mask.apply_copy(copy, range, repeat)
|
2021-08-14 14:13:06 -04:00
|
|
|
}
|
2021-08-10 19:29:18 -04:00
|
|
|
}
|