const-eval: full support for pointer fragments
This commit is contained in:
@@ -57,7 +57,7 @@ const_eval_const_context = {$kind ->
|
|||||||
}
|
}
|
||||||
|
|
||||||
const_eval_const_heap_ptr_in_final = encountered `const_allocate` pointer in final value that was not made global
|
const_eval_const_heap_ptr_in_final = encountered `const_allocate` pointer in final value that was not made global
|
||||||
.note = use `const_make_global` to make allocated pointers immutable before returning
|
.note = use `const_make_global` to turn allocated pointers into immutable globals before returning
|
||||||
|
|
||||||
const_eval_const_make_global_ptr_already_made_global = attempting to call `const_make_global` twice on the same allocation {$alloc}
|
const_eval_const_make_global_ptr_already_made_global = attempting to call `const_make_global` twice on the same allocation {$alloc}
|
||||||
|
|
||||||
@@ -231,6 +231,9 @@ const_eval_mutable_borrow_escaping =
|
|||||||
|
|
||||||
const_eval_mutable_ptr_in_final = encountered mutable pointer in final value of {const_eval_intern_kind}
|
const_eval_mutable_ptr_in_final = encountered mutable pointer in final value of {const_eval_intern_kind}
|
||||||
|
|
||||||
|
const_eval_partial_pointer_in_final = encountered partial pointer in final value of {const_eval_intern_kind}
|
||||||
|
.note = while pointers can be broken apart into individual bytes during const-evaluation, only complete pointers (with all their bytes in the right order) are supported in the final value
|
||||||
|
|
||||||
const_eval_nested_static_in_thread_local = #[thread_local] does not support implicit nested statics, please create explicit static items and refer to them instead
|
const_eval_nested_static_in_thread_local = #[thread_local] does not support implicit nested statics, please create explicit static items and refer to them instead
|
||||||
|
|
||||||
const_eval_non_const_await =
|
const_eval_non_const_await =
|
||||||
@@ -299,10 +302,8 @@ const_eval_panic = evaluation panicked: {$msg}
|
|||||||
|
|
||||||
const_eval_panic_non_str = argument to `panic!()` in a const context must have type `&str`
|
const_eval_panic_non_str = argument to `panic!()` in a const context must have type `&str`
|
||||||
|
|
||||||
const_eval_partial_pointer_copy =
|
const_eval_partial_pointer_read =
|
||||||
unable to copy parts of a pointer from memory at {$ptr}
|
unable to read parts of a pointer from memory at {$ptr}
|
||||||
const_eval_partial_pointer_overwrite =
|
|
||||||
unable to overwrite parts of a pointer in memory at {$ptr}
|
|
||||||
const_eval_pointer_arithmetic_overflow =
|
const_eval_pointer_arithmetic_overflow =
|
||||||
overflowing pointer arithmetic: the total offset in bytes does not fit in an `isize`
|
overflowing pointer arithmetic: the total offset in bytes does not fit in an `isize`
|
||||||
|
|
||||||
|
|||||||
@@ -117,6 +117,13 @@ fn eval_body_using_ecx<'tcx, R: InterpretationResult<'tcx>>(
|
|||||||
ecx.tcx.dcx().emit_err(errors::ConstHeapPtrInFinal { span: ecx.tcx.span }),
|
ecx.tcx.dcx().emit_err(errors::ConstHeapPtrInFinal { span: ecx.tcx.span }),
|
||||||
)));
|
)));
|
||||||
}
|
}
|
||||||
|
Err(InternError::PartialPointer) => {
|
||||||
|
throw_inval!(AlreadyReported(ReportedErrorInfo::non_const_eval_error(
|
||||||
|
ecx.tcx
|
||||||
|
.dcx()
|
||||||
|
.emit_err(errors::PartialPtrInFinal { span: ecx.tcx.span, kind: intern_kind }),
|
||||||
|
)));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
interp_ok(R::make_result(ret, ecx))
|
interp_ok(R::make_result(ret, ecx))
|
||||||
|
|||||||
@@ -51,6 +51,15 @@ pub(crate) struct ConstHeapPtrInFinal {
|
|||||||
pub span: Span,
|
pub span: Span,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Diagnostic)]
|
||||||
|
#[diag(const_eval_partial_pointer_in_final)]
|
||||||
|
#[note]
|
||||||
|
pub(crate) struct PartialPtrInFinal {
|
||||||
|
#[primary_span]
|
||||||
|
pub span: Span,
|
||||||
|
pub kind: InternKind,
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Diagnostic)]
|
#[derive(Diagnostic)]
|
||||||
#[diag(const_eval_unstable_in_stable_exposed)]
|
#[diag(const_eval_unstable_in_stable_exposed)]
|
||||||
pub(crate) struct UnstableInStableExposed {
|
pub(crate) struct UnstableInStableExposed {
|
||||||
@@ -832,8 +841,7 @@ impl ReportErrorExt for UnsupportedOpInfo {
|
|||||||
UnsupportedOpInfo::Unsupported(s) => s.clone().into(),
|
UnsupportedOpInfo::Unsupported(s) => s.clone().into(),
|
||||||
UnsupportedOpInfo::ExternTypeField => const_eval_extern_type_field,
|
UnsupportedOpInfo::ExternTypeField => const_eval_extern_type_field,
|
||||||
UnsupportedOpInfo::UnsizedLocal => const_eval_unsized_local,
|
UnsupportedOpInfo::UnsizedLocal => const_eval_unsized_local,
|
||||||
UnsupportedOpInfo::OverwritePartialPointer(_) => const_eval_partial_pointer_overwrite,
|
UnsupportedOpInfo::ReadPartialPointer(_) => const_eval_partial_pointer_read,
|
||||||
UnsupportedOpInfo::ReadPartialPointer(_) => const_eval_partial_pointer_copy,
|
|
||||||
UnsupportedOpInfo::ReadPointerAsInt(_) => const_eval_read_pointer_as_int,
|
UnsupportedOpInfo::ReadPointerAsInt(_) => const_eval_read_pointer_as_int,
|
||||||
UnsupportedOpInfo::ThreadLocalStatic(_) => const_eval_thread_local_static,
|
UnsupportedOpInfo::ThreadLocalStatic(_) => const_eval_thread_local_static,
|
||||||
UnsupportedOpInfo::ExternStatic(_) => const_eval_extern_static,
|
UnsupportedOpInfo::ExternStatic(_) => const_eval_extern_static,
|
||||||
@@ -844,7 +852,7 @@ impl ReportErrorExt for UnsupportedOpInfo {
|
|||||||
use UnsupportedOpInfo::*;
|
use UnsupportedOpInfo::*;
|
||||||
|
|
||||||
use crate::fluent_generated::*;
|
use crate::fluent_generated::*;
|
||||||
if let ReadPointerAsInt(_) | OverwritePartialPointer(_) | ReadPartialPointer(_) = self {
|
if let ReadPointerAsInt(_) | ReadPartialPointer(_) = self {
|
||||||
diag.help(const_eval_ptr_as_bytes_1);
|
diag.help(const_eval_ptr_as_bytes_1);
|
||||||
diag.help(const_eval_ptr_as_bytes_2);
|
diag.help(const_eval_ptr_as_bytes_2);
|
||||||
}
|
}
|
||||||
@@ -856,7 +864,7 @@ impl ReportErrorExt for UnsupportedOpInfo {
|
|||||||
| UnsupportedOpInfo::ExternTypeField
|
| UnsupportedOpInfo::ExternTypeField
|
||||||
| Unsupported(_)
|
| Unsupported(_)
|
||||||
| ReadPointerAsInt(_) => {}
|
| ReadPointerAsInt(_) => {}
|
||||||
OverwritePartialPointer(ptr) | ReadPartialPointer(ptr) => {
|
ReadPartialPointer(ptr) => {
|
||||||
diag.arg("ptr", ptr);
|
diag.arg("ptr", ptr);
|
||||||
}
|
}
|
||||||
ThreadLocalStatic(did) | ExternStatic(did) => rustc_middle::ty::tls::with(|tcx| {
|
ThreadLocalStatic(did) | ExternStatic(did) => rustc_middle::ty::tls::with(|tcx| {
|
||||||
|
|||||||
@@ -19,9 +19,12 @@ use rustc_data_structures::fx::{FxHashSet, FxIndexMap};
|
|||||||
use rustc_hir as hir;
|
use rustc_hir as hir;
|
||||||
use rustc_hir::definitions::{DefPathData, DisambiguatorState};
|
use rustc_hir::definitions::{DefPathData, DisambiguatorState};
|
||||||
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs;
|
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs;
|
||||||
use rustc_middle::mir::interpret::{ConstAllocation, CtfeProvenance, InterpResult};
|
use rustc_middle::mir::interpret::{
|
||||||
|
AllocBytes, ConstAllocation, CtfeProvenance, InterpResult, Provenance,
|
||||||
|
};
|
||||||
use rustc_middle::query::TyCtxtAt;
|
use rustc_middle::query::TyCtxtAt;
|
||||||
use rustc_middle::span_bug;
|
use rustc_middle::span_bug;
|
||||||
|
use rustc_middle::ty::TyCtxt;
|
||||||
use rustc_middle::ty::layout::TyAndLayout;
|
use rustc_middle::ty::layout::TyAndLayout;
|
||||||
use rustc_span::def_id::LocalDefId;
|
use rustc_span::def_id::LocalDefId;
|
||||||
use tracing::{instrument, trace};
|
use tracing::{instrument, trace};
|
||||||
@@ -52,6 +55,45 @@ impl HasStaticRootDefId for const_eval::CompileTimeMachine<'_> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn prepare_alloc<'tcx, Prov: Provenance, Extra, Bytes: AllocBytes>(
|
||||||
|
tcx: TyCtxt<'tcx>,
|
||||||
|
kind: MemoryKind<const_eval::MemoryKind>,
|
||||||
|
alloc: &mut Allocation<Prov, Extra, Bytes>,
|
||||||
|
mutability: Mutability,
|
||||||
|
) -> Result<(), InternError> {
|
||||||
|
match kind {
|
||||||
|
MemoryKind::Machine(const_eval::MemoryKind::Heap { was_made_global }) => {
|
||||||
|
if !was_made_global {
|
||||||
|
// Attempting to intern a `const_allocate`d pointer that was not made global via
|
||||||
|
// `const_make_global`.
|
||||||
|
tcx.dcx().delayed_bug("non-global heap allocation in const value");
|
||||||
|
return Err(InternError::ConstAllocNotGlobal);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
MemoryKind::Stack | MemoryKind::CallerLocation => {}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !alloc.provenance_merge_bytes(&tcx) {
|
||||||
|
// Per-byte provenance is not supported by backends, so we cannot accept it here.
|
||||||
|
tcx.dcx().delayed_bug("partial pointer in const value");
|
||||||
|
return Err(InternError::PartialPointer);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set allocation mutability as appropriate. This is used by LLVM to put things into
|
||||||
|
// read-only memory, and also by Miri when evaluating other globals that
|
||||||
|
// access this one.
|
||||||
|
match mutability {
|
||||||
|
Mutability::Not => {
|
||||||
|
alloc.mutability = Mutability::Not;
|
||||||
|
}
|
||||||
|
Mutability::Mut => {
|
||||||
|
// This must be already mutable, we won't "un-freeze" allocations ever.
|
||||||
|
assert_eq!(alloc.mutability, Mutability::Mut);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
/// Intern an allocation. Returns `Err` if the allocation does not exist in the local memory.
|
/// Intern an allocation. Returns `Err` if the allocation does not exist in the local memory.
|
||||||
///
|
///
|
||||||
/// `mutability` can be used to force immutable interning: if it is `Mutability::Not`, the
|
/// `mutability` can be used to force immutable interning: if it is `Mutability::Not`, the
|
||||||
@@ -72,31 +114,13 @@ fn intern_shallow<'tcx, M: CompileTimeMachine<'tcx>>(
|
|||||||
return Err(InternError::DanglingPointer);
|
return Err(InternError::DanglingPointer);
|
||||||
};
|
};
|
||||||
|
|
||||||
match kind {
|
if let Err(err) = prepare_alloc(*ecx.tcx, kind, &mut alloc, mutability) {
|
||||||
MemoryKind::Machine(const_eval::MemoryKind::Heap { was_made_global }) => {
|
// We want to error here, but we have to first put the
|
||||||
if !was_made_global {
|
|
||||||
// Attempting to intern a `const_allocate`d pointer that was not made global via
|
|
||||||
// `const_make_global`. We want to error here, but we have to first put the
|
|
||||||
// allocation back into the `alloc_map` to keep things in a consistent state.
|
// allocation back into the `alloc_map` to keep things in a consistent state.
|
||||||
ecx.memory.alloc_map.insert(alloc_id, (kind, alloc));
|
ecx.memory.alloc_map.insert(alloc_id, (kind, alloc));
|
||||||
return Err(InternError::ConstAllocNotGlobal);
|
return Err(err);
|
||||||
}
|
|
||||||
}
|
|
||||||
MemoryKind::Stack | MemoryKind::CallerLocation => {}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set allocation mutability as appropriate. This is used by LLVM to put things into
|
|
||||||
// read-only memory, and also by Miri when evaluating other globals that
|
|
||||||
// access this one.
|
|
||||||
match mutability {
|
|
||||||
Mutability::Not => {
|
|
||||||
alloc.mutability = Mutability::Not;
|
|
||||||
}
|
|
||||||
Mutability::Mut => {
|
|
||||||
// This must be already mutable, we won't "un-freeze" allocations ever.
|
|
||||||
assert_eq!(alloc.mutability, Mutability::Mut);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// link the alloc id to the actual allocation
|
// link the alloc id to the actual allocation
|
||||||
let alloc = ecx.tcx.mk_const_alloc(alloc);
|
let alloc = ecx.tcx.mk_const_alloc(alloc);
|
||||||
if let Some(static_id) = ecx.machine.static_def_id() {
|
if let Some(static_id) = ecx.machine.static_def_id() {
|
||||||
@@ -166,6 +190,7 @@ pub enum InternError {
|
|||||||
BadMutablePointer,
|
BadMutablePointer,
|
||||||
DanglingPointer,
|
DanglingPointer,
|
||||||
ConstAllocNotGlobal,
|
ConstAllocNotGlobal,
|
||||||
|
PartialPointer,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Intern `ret` and everything it references.
|
/// Intern `ret` and everything it references.
|
||||||
@@ -221,13 +246,11 @@ pub fn intern_const_alloc_recursive<'tcx, M: CompileTimeMachine<'tcx>>(
|
|||||||
let mut todo: Vec<_> = if is_static {
|
let mut todo: Vec<_> = if is_static {
|
||||||
// Do not steal the root allocation, we need it later to create the return value of `eval_static_initializer`.
|
// Do not steal the root allocation, we need it later to create the return value of `eval_static_initializer`.
|
||||||
// But still change its mutability to match the requested one.
|
// But still change its mutability to match the requested one.
|
||||||
let alloc = ecx.memory.alloc_map.get_mut(&base_alloc_id).unwrap();
|
let (kind, alloc) = ecx.memory.alloc_map.get_mut(&base_alloc_id).unwrap();
|
||||||
alloc.1.mutability = base_mutability;
|
prepare_alloc(*ecx.tcx, *kind, alloc, base_mutability)?;
|
||||||
alloc.1.provenance().ptrs().iter().map(|&(_, prov)| prov).collect()
|
alloc.provenance().ptrs().iter().map(|&(_, prov)| prov).collect()
|
||||||
} else {
|
} else {
|
||||||
intern_shallow(ecx, base_alloc_id, base_mutability, Some(&mut disambiguator))
|
intern_shallow(ecx, base_alloc_id, base_mutability, Some(&mut disambiguator))?.collect()
|
||||||
.unwrap()
|
|
||||||
.collect()
|
|
||||||
};
|
};
|
||||||
// We need to distinguish "has just been interned" from "was already in `tcx`",
|
// We need to distinguish "has just been interned" from "was already in `tcx`",
|
||||||
// so we track this in a separate set.
|
// so we track this in a separate set.
|
||||||
@@ -235,7 +258,6 @@ pub fn intern_const_alloc_recursive<'tcx, M: CompileTimeMachine<'tcx>>(
|
|||||||
// Whether we encountered a bad mutable pointer.
|
// Whether we encountered a bad mutable pointer.
|
||||||
// We want to first report "dangling" and then "mutable", so we need to delay reporting these
|
// We want to first report "dangling" and then "mutable", so we need to delay reporting these
|
||||||
// errors.
|
// errors.
|
||||||
let mut result = Ok(());
|
|
||||||
let mut found_bad_mutable_ptr = false;
|
let mut found_bad_mutable_ptr = false;
|
||||||
|
|
||||||
// Keep interning as long as there are things to intern.
|
// Keep interning as long as there are things to intern.
|
||||||
@@ -310,20 +332,15 @@ pub fn intern_const_alloc_recursive<'tcx, M: CompileTimeMachine<'tcx>>(
|
|||||||
// okay with losing some potential for immutability here. This can anyway only affect
|
// okay with losing some potential for immutability here. This can anyway only affect
|
||||||
// `static mut`.
|
// `static mut`.
|
||||||
just_interned.insert(alloc_id);
|
just_interned.insert(alloc_id);
|
||||||
match intern_shallow(ecx, alloc_id, inner_mutability, Some(&mut disambiguator)) {
|
let next = intern_shallow(ecx, alloc_id, inner_mutability, Some(&mut disambiguator))?;
|
||||||
Ok(nested) => todo.extend(nested),
|
todo.extend(next);
|
||||||
Err(err) => {
|
|
||||||
ecx.tcx.dcx().delayed_bug("error during const interning");
|
|
||||||
result = Err(err);
|
|
||||||
}
|
}
|
||||||
}
|
if found_bad_mutable_ptr {
|
||||||
}
|
|
||||||
if found_bad_mutable_ptr && result.is_ok() {
|
|
||||||
// We found a mutable pointer inside a const where inner allocations should be immutable,
|
// We found a mutable pointer inside a const where inner allocations should be immutable,
|
||||||
// and there was no other error. This should usually never happen! However, this can happen
|
// and there was no other error. This should usually never happen! However, this can happen
|
||||||
// in unleash-miri mode, so report it as a normal error then.
|
// in unleash-miri mode, so report it as a normal error then.
|
||||||
if ecx.tcx.sess.opts.unstable_opts.unleash_the_miri_inside_of_you {
|
if ecx.tcx.sess.opts.unstable_opts.unleash_the_miri_inside_of_you {
|
||||||
result = Err(InternError::BadMutablePointer);
|
return Err(InternError::BadMutablePointer);
|
||||||
} else {
|
} else {
|
||||||
span_bug!(
|
span_bug!(
|
||||||
ecx.tcx.span,
|
ecx.tcx.span,
|
||||||
@@ -331,7 +348,7 @@ pub fn intern_const_alloc_recursive<'tcx, M: CompileTimeMachine<'tcx>>(
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
result
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Intern `ret`. This function assumes that `ret` references no other allocation.
|
/// Intern `ret`. This function assumes that `ret` references no other allocation.
|
||||||
|
|||||||
@@ -1310,29 +1310,20 @@ impl<'a, 'tcx, Prov: Provenance, Extra, Bytes: AllocBytes>
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Mark the given sub-range (relative to this allocation reference) as uninitialized.
|
/// Mark the given sub-range (relative to this allocation reference) as uninitialized.
|
||||||
pub fn write_uninit(&mut self, range: AllocRange) -> InterpResult<'tcx> {
|
pub fn write_uninit(&mut self, range: AllocRange) {
|
||||||
let range = self.range.subrange(range);
|
let range = self.range.subrange(range);
|
||||||
|
|
||||||
self.alloc
|
self.alloc.write_uninit(&self.tcx, range);
|
||||||
.write_uninit(&self.tcx, range)
|
|
||||||
.map_err(|e| e.to_interp_error(self.alloc_id))
|
|
||||||
.into()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Mark the entire referenced range as uninitialized
|
/// Mark the entire referenced range as uninitialized
|
||||||
pub fn write_uninit_full(&mut self) -> InterpResult<'tcx> {
|
pub fn write_uninit_full(&mut self) {
|
||||||
self.alloc
|
self.alloc.write_uninit(&self.tcx, self.range);
|
||||||
.write_uninit(&self.tcx, self.range)
|
|
||||||
.map_err(|e| e.to_interp_error(self.alloc_id))
|
|
||||||
.into()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Remove all provenance in the reference range.
|
/// Remove all provenance in the reference range.
|
||||||
pub fn clear_provenance(&mut self) -> InterpResult<'tcx> {
|
pub fn clear_provenance(&mut self) {
|
||||||
self.alloc
|
self.alloc.clear_provenance(&self.tcx, self.range);
|
||||||
.clear_provenance(&self.tcx, self.range)
|
|
||||||
.map_err(|e| e.to_interp_error(self.alloc_id))
|
|
||||||
.into()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1423,11 +1414,8 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||||||
|
|
||||||
// Side-step AllocRef and directly access the underlying bytes more efficiently.
|
// Side-step AllocRef and directly access the underlying bytes more efficiently.
|
||||||
// (We are staying inside the bounds here and all bytes do get overwritten so all is good.)
|
// (We are staying inside the bounds here and all bytes do get overwritten so all is good.)
|
||||||
let alloc_id = alloc_ref.alloc_id;
|
let bytes =
|
||||||
let bytes = alloc_ref
|
alloc_ref.alloc.get_bytes_unchecked_for_overwrite(&alloc_ref.tcx, alloc_ref.range);
|
||||||
.alloc
|
|
||||||
.get_bytes_unchecked_for_overwrite(&alloc_ref.tcx, alloc_ref.range)
|
|
||||||
.map_err(move |e| e.to_interp_error(alloc_id))?;
|
|
||||||
// `zip` would stop when the first iterator ends; we want to definitely
|
// `zip` would stop when the first iterator ends; we want to definitely
|
||||||
// cover all of `bytes`.
|
// cover all of `bytes`.
|
||||||
for dest in bytes {
|
for dest in bytes {
|
||||||
@@ -1509,10 +1497,8 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||||||
// `get_bytes_mut` will clear the provenance, which is correct,
|
// `get_bytes_mut` will clear the provenance, which is correct,
|
||||||
// since we don't want to keep any provenance at the target.
|
// since we don't want to keep any provenance at the target.
|
||||||
// This will also error if copying partial provenance is not supported.
|
// This will also error if copying partial provenance is not supported.
|
||||||
let provenance = src_alloc
|
let provenance =
|
||||||
.provenance()
|
src_alloc.provenance().prepare_copy(src_range, dest_offset, num_copies, self);
|
||||||
.prepare_copy(src_range, dest_offset, num_copies, self)
|
|
||||||
.map_err(|e| e.to_interp_error(src_alloc_id))?;
|
|
||||||
// Prepare a copy of the initialization mask.
|
// Prepare a copy of the initialization mask.
|
||||||
let init = src_alloc.init_mask().prepare_copy(src_range);
|
let init = src_alloc.init_mask().prepare_copy(src_range);
|
||||||
|
|
||||||
@@ -1530,10 +1516,8 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||||||
dest_range,
|
dest_range,
|
||||||
)?;
|
)?;
|
||||||
// Yes we do overwrite all bytes in `dest_bytes`.
|
// Yes we do overwrite all bytes in `dest_bytes`.
|
||||||
let dest_bytes = dest_alloc
|
let dest_bytes =
|
||||||
.get_bytes_unchecked_for_overwrite_ptr(&tcx, dest_range)
|
dest_alloc.get_bytes_unchecked_for_overwrite_ptr(&tcx, dest_range).as_mut_ptr();
|
||||||
.map_err(|e| e.to_interp_error(dest_alloc_id))?
|
|
||||||
.as_mut_ptr();
|
|
||||||
|
|
||||||
if init.no_bytes_init() {
|
if init.no_bytes_init() {
|
||||||
// Fast path: If all bytes are `uninit` then there is nothing to copy. The target range
|
// Fast path: If all bytes are `uninit` then there is nothing to copy. The target range
|
||||||
@@ -1542,9 +1526,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||||||
// This also avoids writing to the target bytes so that the backing allocation is never
|
// This also avoids writing to the target bytes so that the backing allocation is never
|
||||||
// touched if the bytes stay uninitialized for the whole interpreter execution. On contemporary
|
// touched if the bytes stay uninitialized for the whole interpreter execution. On contemporary
|
||||||
// operating system this can avoid physically allocating the page.
|
// operating system this can avoid physically allocating the page.
|
||||||
dest_alloc
|
dest_alloc.write_uninit(&tcx, dest_range);
|
||||||
.write_uninit(&tcx, dest_range)
|
|
||||||
.map_err(|e| e.to_interp_error(dest_alloc_id))?;
|
|
||||||
// `write_uninit` also resets the provenance, so we are done.
|
// `write_uninit` also resets the provenance, so we are done.
|
||||||
return interp_ok(());
|
return interp_ok(());
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -700,7 +700,7 @@ where
|
|||||||
|
|
||||||
match value {
|
match value {
|
||||||
Immediate::Scalar(scalar) => {
|
Immediate::Scalar(scalar) => {
|
||||||
alloc.write_scalar(alloc_range(Size::ZERO, scalar.size()), scalar)
|
alloc.write_scalar(alloc_range(Size::ZERO, scalar.size()), scalar)?;
|
||||||
}
|
}
|
||||||
Immediate::ScalarPair(a_val, b_val) => {
|
Immediate::ScalarPair(a_val, b_val) => {
|
||||||
let BackendRepr::ScalarPair(a, b) = layout.backend_repr else {
|
let BackendRepr::ScalarPair(a, b) = layout.backend_repr else {
|
||||||
@@ -720,10 +720,10 @@ where
|
|||||||
alloc.write_scalar(alloc_range(Size::ZERO, a_val.size()), a_val)?;
|
alloc.write_scalar(alloc_range(Size::ZERO, a_val.size()), a_val)?;
|
||||||
alloc.write_scalar(alloc_range(b_offset, b_val.size()), b_val)?;
|
alloc.write_scalar(alloc_range(b_offset, b_val.size()), b_val)?;
|
||||||
// We don't have to reset padding here, `write_immediate` will anyway do a validation run.
|
// We don't have to reset padding here, `write_immediate` will anyway do a validation run.
|
||||||
interp_ok(())
|
|
||||||
}
|
}
|
||||||
Immediate::Uninit => alloc.write_uninit_full(),
|
Immediate::Uninit => alloc.write_uninit_full(),
|
||||||
}
|
}
|
||||||
|
interp_ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn write_uninit(
|
pub fn write_uninit(
|
||||||
@@ -743,7 +743,7 @@ where
|
|||||||
// Zero-sized access
|
// Zero-sized access
|
||||||
return interp_ok(());
|
return interp_ok(());
|
||||||
};
|
};
|
||||||
alloc.write_uninit_full()?;
|
alloc.write_uninit_full();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
interp_ok(())
|
interp_ok(())
|
||||||
@@ -767,7 +767,7 @@ where
|
|||||||
// Zero-sized access
|
// Zero-sized access
|
||||||
return interp_ok(());
|
return interp_ok(());
|
||||||
};
|
};
|
||||||
alloc.clear_provenance()?;
|
alloc.clear_provenance();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
interp_ok(())
|
interp_ok(())
|
||||||
|
|||||||
@@ -949,7 +949,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
|
|||||||
let padding_size = offset - padding_cleared_until;
|
let padding_size = offset - padding_cleared_until;
|
||||||
let range = alloc_range(padding_start, padding_size);
|
let range = alloc_range(padding_start, padding_size);
|
||||||
trace!("reset_padding on {}: resetting padding range {range:?}", mplace.layout.ty);
|
trace!("reset_padding on {}: resetting padding range {range:?}", mplace.layout.ty);
|
||||||
alloc.write_uninit(range)?;
|
alloc.write_uninit(range);
|
||||||
}
|
}
|
||||||
padding_cleared_until = offset + size;
|
padding_cleared_until = offset + size;
|
||||||
}
|
}
|
||||||
@@ -1239,7 +1239,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValueVisitor<'tcx, M> for ValidityVisitor<'rt,
|
|||||||
if self.reset_provenance_and_padding {
|
if self.reset_provenance_and_padding {
|
||||||
// We can't share this with above as above, we might be looking at read-only memory.
|
// We can't share this with above as above, we might be looking at read-only memory.
|
||||||
let mut alloc = self.ecx.get_ptr_alloc_mut(mplace.ptr(), size)?.expect("we already excluded size 0");
|
let mut alloc = self.ecx.get_ptr_alloc_mut(mplace.ptr(), size)?.expect("we already excluded size 0");
|
||||||
alloc.clear_provenance()?;
|
alloc.clear_provenance();
|
||||||
// Also, mark this as containing data, not padding.
|
// Also, mark this as containing data, not padding.
|
||||||
self.add_data_range(mplace.ptr(), size);
|
self.add_data_range(mplace.ptr(), size);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -306,8 +306,6 @@ pub enum AllocError {
|
|||||||
ScalarSizeMismatch(ScalarSizeMismatch),
|
ScalarSizeMismatch(ScalarSizeMismatch),
|
||||||
/// Encountered a pointer where we needed raw bytes.
|
/// Encountered a pointer where we needed raw bytes.
|
||||||
ReadPointerAsInt(Option<BadBytesAccess>),
|
ReadPointerAsInt(Option<BadBytesAccess>),
|
||||||
/// Partially overwriting a pointer.
|
|
||||||
OverwritePartialPointer(Size),
|
|
||||||
/// Partially copying a pointer.
|
/// Partially copying a pointer.
|
||||||
ReadPartialPointer(Size),
|
ReadPartialPointer(Size),
|
||||||
/// Using uninitialized data where it is not allowed.
|
/// Using uninitialized data where it is not allowed.
|
||||||
@@ -331,9 +329,6 @@ impl AllocError {
|
|||||||
ReadPointerAsInt(info) => InterpErrorKind::Unsupported(
|
ReadPointerAsInt(info) => InterpErrorKind::Unsupported(
|
||||||
UnsupportedOpInfo::ReadPointerAsInt(info.map(|b| (alloc_id, b))),
|
UnsupportedOpInfo::ReadPointerAsInt(info.map(|b| (alloc_id, b))),
|
||||||
),
|
),
|
||||||
OverwritePartialPointer(offset) => InterpErrorKind::Unsupported(
|
|
||||||
UnsupportedOpInfo::OverwritePartialPointer(Pointer::new(alloc_id, offset)),
|
|
||||||
),
|
|
||||||
ReadPartialPointer(offset) => InterpErrorKind::Unsupported(
|
ReadPartialPointer(offset) => InterpErrorKind::Unsupported(
|
||||||
UnsupportedOpInfo::ReadPartialPointer(Pointer::new(alloc_id, offset)),
|
UnsupportedOpInfo::ReadPartialPointer(Pointer::new(alloc_id, offset)),
|
||||||
),
|
),
|
||||||
@@ -633,11 +628,11 @@ impl<Prov: Provenance, Extra, Bytes: AllocBytes> Allocation<Prov, Extra, Bytes>
|
|||||||
&mut self,
|
&mut self,
|
||||||
cx: &impl HasDataLayout,
|
cx: &impl HasDataLayout,
|
||||||
range: AllocRange,
|
range: AllocRange,
|
||||||
) -> AllocResult<&mut [u8]> {
|
) -> &mut [u8] {
|
||||||
self.mark_init(range, true);
|
self.mark_init(range, true);
|
||||||
self.provenance.clear(range, cx)?;
|
self.provenance.clear(range, cx);
|
||||||
|
|
||||||
Ok(&mut self.bytes[range.start.bytes_usize()..range.end().bytes_usize()])
|
&mut self.bytes[range.start.bytes_usize()..range.end().bytes_usize()]
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A raw pointer variant of `get_bytes_unchecked_for_overwrite` that avoids invalidating existing immutable aliases
|
/// A raw pointer variant of `get_bytes_unchecked_for_overwrite` that avoids invalidating existing immutable aliases
|
||||||
@@ -646,15 +641,15 @@ impl<Prov: Provenance, Extra, Bytes: AllocBytes> Allocation<Prov, Extra, Bytes>
|
|||||||
&mut self,
|
&mut self,
|
||||||
cx: &impl HasDataLayout,
|
cx: &impl HasDataLayout,
|
||||||
range: AllocRange,
|
range: AllocRange,
|
||||||
) -> AllocResult<*mut [u8]> {
|
) -> *mut [u8] {
|
||||||
self.mark_init(range, true);
|
self.mark_init(range, true);
|
||||||
self.provenance.clear(range, cx)?;
|
self.provenance.clear(range, cx);
|
||||||
|
|
||||||
assert!(range.end().bytes_usize() <= self.bytes.len()); // need to do our own bounds-check
|
assert!(range.end().bytes_usize() <= self.bytes.len()); // need to do our own bounds-check
|
||||||
// Crucially, we go via `AllocBytes::as_mut_ptr`, not `AllocBytes::deref_mut`.
|
// Crucially, we go via `AllocBytes::as_mut_ptr`, not `AllocBytes::deref_mut`.
|
||||||
let begin_ptr = self.bytes.as_mut_ptr().wrapping_add(range.start.bytes_usize());
|
let begin_ptr = self.bytes.as_mut_ptr().wrapping_add(range.start.bytes_usize());
|
||||||
let len = range.end().bytes_usize() - range.start.bytes_usize();
|
let len = range.end().bytes_usize() - range.start.bytes_usize();
|
||||||
Ok(ptr::slice_from_raw_parts_mut(begin_ptr, len))
|
ptr::slice_from_raw_parts_mut(begin_ptr, len)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// This gives direct mutable access to the entire buffer, just exposing their internal state
|
/// This gives direct mutable access to the entire buffer, just exposing their internal state
|
||||||
@@ -723,26 +718,45 @@ impl<Prov: Provenance, Extra, Bytes: AllocBytes> Allocation<Prov, Extra, Bytes>
|
|||||||
let ptr = Pointer::new(prov, Size::from_bytes(bits));
|
let ptr = Pointer::new(prov, Size::from_bytes(bits));
|
||||||
return Ok(Scalar::from_pointer(ptr, cx));
|
return Ok(Scalar::from_pointer(ptr, cx));
|
||||||
}
|
}
|
||||||
|
// The other easy case is total absence of provenance.
|
||||||
// If we can work on pointers byte-wise, join the byte-wise provenances.
|
|
||||||
if Prov::OFFSET_IS_ADDR {
|
|
||||||
let mut prov = self.provenance.get(range.start, cx);
|
|
||||||
for offset in Size::from_bytes(1)..range.size {
|
|
||||||
let this_prov = self.provenance.get(range.start + offset, cx);
|
|
||||||
prov = Prov::join(prov, this_prov);
|
|
||||||
}
|
|
||||||
// Now use this provenance.
|
|
||||||
let ptr = Pointer::new(prov, Size::from_bytes(bits));
|
|
||||||
return Ok(Scalar::from_maybe_pointer(ptr, cx));
|
|
||||||
} else {
|
|
||||||
// Without OFFSET_IS_ADDR, the only remaining case we can handle is total absence of
|
|
||||||
// provenance.
|
|
||||||
if self.provenance.range_empty(range, cx) {
|
if self.provenance.range_empty(range, cx) {
|
||||||
return Ok(Scalar::from_uint(bits, range.size));
|
return Ok(Scalar::from_uint(bits, range.size));
|
||||||
}
|
}
|
||||||
// Else we have mixed provenance, that doesn't work.
|
// If we get here, we have to check per-byte provenance, and join them together.
|
||||||
|
let prov = 'prov: {
|
||||||
|
// Initialize with first fragment. Must have index 0.
|
||||||
|
let Some((mut joint_prov, 0)) = self.provenance.get_byte(range.start, cx) else {
|
||||||
|
break 'prov None;
|
||||||
|
};
|
||||||
|
// Update with the remaining fragments.
|
||||||
|
for offset in Size::from_bytes(1)..range.size {
|
||||||
|
// Ensure there is provenance here and it has the right index.
|
||||||
|
let Some((frag_prov, frag_idx)) =
|
||||||
|
self.provenance.get_byte(range.start + offset, cx)
|
||||||
|
else {
|
||||||
|
break 'prov None;
|
||||||
|
};
|
||||||
|
// Wildcard provenance is allowed to come with any index (this is needed
|
||||||
|
// for Miri's native-lib mode to work).
|
||||||
|
if u64::from(frag_idx) != offset.bytes() && Some(frag_prov) != Prov::WILDCARD {
|
||||||
|
break 'prov None;
|
||||||
|
}
|
||||||
|
// Merge this byte's provenance with the previous ones.
|
||||||
|
joint_prov = match Prov::join(joint_prov, frag_prov) {
|
||||||
|
Some(prov) => prov,
|
||||||
|
None => break 'prov None,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
break 'prov Some(joint_prov);
|
||||||
|
};
|
||||||
|
if prov.is_none() && !Prov::OFFSET_IS_ADDR {
|
||||||
|
// There are some bytes with provenance here but overall the provenance does not add up.
|
||||||
|
// We need `OFFSET_IS_ADDR` to fall back to no-provenance here; without that option, we must error.
|
||||||
return Err(AllocError::ReadPartialPointer(range.start));
|
return Err(AllocError::ReadPartialPointer(range.start));
|
||||||
}
|
}
|
||||||
|
// We can use this provenance.
|
||||||
|
let ptr = Pointer::new(prov, Size::from_bytes(bits));
|
||||||
|
return Ok(Scalar::from_maybe_pointer(ptr, cx));
|
||||||
} else {
|
} else {
|
||||||
// We are *not* reading a pointer.
|
// We are *not* reading a pointer.
|
||||||
// If we can just ignore provenance or there is none, that's easy.
|
// If we can just ignore provenance or there is none, that's easy.
|
||||||
@@ -782,7 +796,7 @@ impl<Prov: Provenance, Extra, Bytes: AllocBytes> Allocation<Prov, Extra, Bytes>
|
|||||||
|
|
||||||
let endian = cx.data_layout().endian;
|
let endian = cx.data_layout().endian;
|
||||||
// Yes we do overwrite all the bytes in `dst`.
|
// Yes we do overwrite all the bytes in `dst`.
|
||||||
let dst = self.get_bytes_unchecked_for_overwrite(cx, range)?;
|
let dst = self.get_bytes_unchecked_for_overwrite(cx, range);
|
||||||
write_target_uint(endian, dst, bytes).unwrap();
|
write_target_uint(endian, dst, bytes).unwrap();
|
||||||
|
|
||||||
// See if we have to also store some provenance.
|
// See if we have to also store some provenance.
|
||||||
@@ -795,10 +809,9 @@ impl<Prov: Provenance, Extra, Bytes: AllocBytes> Allocation<Prov, Extra, Bytes>
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Write "uninit" to the given memory range.
|
/// Write "uninit" to the given memory range.
|
||||||
pub fn write_uninit(&mut self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult {
|
pub fn write_uninit(&mut self, cx: &impl HasDataLayout, range: AllocRange) {
|
||||||
self.mark_init(range, false);
|
self.mark_init(range, false);
|
||||||
self.provenance.clear(range, cx)?;
|
self.provenance.clear(range, cx);
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Mark all bytes in the given range as initialised and reset the provenance
|
/// Mark all bytes in the given range as initialised and reset the provenance
|
||||||
@@ -817,9 +830,12 @@ impl<Prov: Provenance, Extra, Bytes: AllocBytes> Allocation<Prov, Extra, Bytes>
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Remove all provenance in the given memory range.
|
/// Remove all provenance in the given memory range.
|
||||||
pub fn clear_provenance(&mut self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult {
|
pub fn clear_provenance(&mut self, cx: &impl HasDataLayout, range: AllocRange) {
|
||||||
self.provenance.clear(range, cx)?;
|
self.provenance.clear(range, cx);
|
||||||
return Ok(());
|
}
|
||||||
|
|
||||||
|
pub fn provenance_merge_bytes(&mut self, cx: &impl HasDataLayout) -> bool {
|
||||||
|
self.provenance.merge_bytes(cx)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Applies a previously prepared provenance copy.
|
/// Applies a previously prepared provenance copy.
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ use rustc_macros::HashStable;
|
|||||||
use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
|
use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
|
||||||
use tracing::trace;
|
use tracing::trace;
|
||||||
|
|
||||||
use super::{AllocError, AllocRange, AllocResult, CtfeProvenance, Provenance, alloc_range};
|
use super::{AllocRange, CtfeProvenance, Provenance, alloc_range};
|
||||||
|
|
||||||
/// Stores the provenance information of pointers stored in memory.
|
/// Stores the provenance information of pointers stored in memory.
|
||||||
#[derive(Clone, PartialEq, Eq, Hash, Debug)]
|
#[derive(Clone, PartialEq, Eq, Hash, Debug)]
|
||||||
@@ -19,25 +19,25 @@ pub struct ProvenanceMap<Prov = CtfeProvenance> {
|
|||||||
/// `Provenance` in this map applies from the given offset for an entire pointer-size worth of
|
/// `Provenance` in this map applies from the given offset for an entire pointer-size worth of
|
||||||
/// bytes. Two entries in this map are always at least a pointer size apart.
|
/// bytes. Two entries in this map are always at least a pointer size apart.
|
||||||
ptrs: SortedMap<Size, Prov>,
|
ptrs: SortedMap<Size, Prov>,
|
||||||
/// Provenance in this map only applies to the given single byte.
|
/// This stores byte-sized provenance fragments.
|
||||||
/// This map is disjoint from the previous. It will always be empty when
|
/// The `u8` indicates the position of this byte inside its original pointer.
|
||||||
/// `Prov::OFFSET_IS_ADDR` is false.
|
/// If the bytes are re-assembled in their original order, the pointer can be used again.
|
||||||
bytes: Option<Box<SortedMap<Size, Prov>>>,
|
/// Wildcard provenance is allowed to have index 0 everywhere.
|
||||||
|
bytes: Option<Box<SortedMap<Size, (Prov, u8)>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
// These impls are generic over `Prov` since `CtfeProvenance` is only decodable/encodable
|
// These impls are generic over `Prov` since `CtfeProvenance` is only decodable/encodable
|
||||||
// for some particular `D`/`S`.
|
// for some particular `D`/`S`.
|
||||||
impl<D: Decoder, Prov: Provenance + Decodable<D>> Decodable<D> for ProvenanceMap<Prov> {
|
impl<D: Decoder, Prov: Provenance + Decodable<D>> Decodable<D> for ProvenanceMap<Prov> {
|
||||||
fn decode(d: &mut D) -> Self {
|
fn decode(d: &mut D) -> Self {
|
||||||
assert!(!Prov::OFFSET_IS_ADDR); // only `CtfeProvenance` is ever serialized
|
// `bytes` is not in the serialized format
|
||||||
Self { ptrs: Decodable::decode(d), bytes: None }
|
Self { ptrs: Decodable::decode(d), bytes: None }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
impl<S: Encoder, Prov: Provenance + Encodable<S>> Encodable<S> for ProvenanceMap<Prov> {
|
impl<S: Encoder, Prov: Provenance + Encodable<S>> Encodable<S> for ProvenanceMap<Prov> {
|
||||||
fn encode(&self, s: &mut S) {
|
fn encode(&self, s: &mut S) {
|
||||||
let Self { ptrs, bytes } = self;
|
let Self { ptrs, bytes } = self;
|
||||||
assert!(!Prov::OFFSET_IS_ADDR); // only `CtfeProvenance` is ever serialized
|
assert!(bytes.is_none()); // interning refuses allocations with pointer fragments
|
||||||
debug_assert!(bytes.is_none()); // without `OFFSET_IS_ADDR`, this is always empty
|
|
||||||
ptrs.encode(s)
|
ptrs.encode(s)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -58,10 +58,10 @@ impl ProvenanceMap {
|
|||||||
/// Give access to the ptr-sized provenances (which can also be thought of as relocations, and
|
/// Give access to the ptr-sized provenances (which can also be thought of as relocations, and
|
||||||
/// indeed that is how codegen treats them).
|
/// indeed that is how codegen treats them).
|
||||||
///
|
///
|
||||||
/// Only exposed with `CtfeProvenance` provenance, since it panics if there is bytewise provenance.
|
/// Only use on interned allocations, as other allocations may have per-byte provenance!
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn ptrs(&self) -> &SortedMap<Size, CtfeProvenance> {
|
pub fn ptrs(&self) -> &SortedMap<Size, CtfeProvenance> {
|
||||||
debug_assert!(self.bytes.is_none()); // `CtfeProvenance::OFFSET_IS_ADDR` is false so this cannot fail
|
assert!(self.bytes.is_none(), "`ptrs()` called on non-interned allocation");
|
||||||
&self.ptrs
|
&self.ptrs
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -88,12 +88,12 @@ impl<Prov: Provenance> ProvenanceMap<Prov> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// `pm.range_ptrs_is_empty(r, cx)` == `pm.range_ptrs_get(r, cx).is_empty()`, but is faster.
|
/// `pm.range_ptrs_is_empty(r, cx)` == `pm.range_ptrs_get(r, cx).is_empty()`, but is faster.
|
||||||
pub(super) fn range_ptrs_is_empty(&self, range: AllocRange, cx: &impl HasDataLayout) -> bool {
|
fn range_ptrs_is_empty(&self, range: AllocRange, cx: &impl HasDataLayout) -> bool {
|
||||||
self.ptrs.range_is_empty(Self::adjusted_range_ptrs(range, cx))
|
self.ptrs.range_is_empty(Self::adjusted_range_ptrs(range, cx))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns all byte-wise provenance in the given range.
|
/// Returns all byte-wise provenance in the given range.
|
||||||
fn range_bytes_get(&self, range: AllocRange) -> &[(Size, Prov)] {
|
fn range_bytes_get(&self, range: AllocRange) -> &[(Size, (Prov, u8))] {
|
||||||
if let Some(bytes) = self.bytes.as_ref() {
|
if let Some(bytes) = self.bytes.as_ref() {
|
||||||
bytes.range(range.start..range.end())
|
bytes.range(range.start..range.end())
|
||||||
} else {
|
} else {
|
||||||
@@ -107,19 +107,47 @@ impl<Prov: Provenance> ProvenanceMap<Prov> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Get the provenance of a single byte.
|
/// Get the provenance of a single byte.
|
||||||
pub fn get(&self, offset: Size, cx: &impl HasDataLayout) -> Option<Prov> {
|
pub fn get_byte(&self, offset: Size, cx: &impl HasDataLayout) -> Option<(Prov, u8)> {
|
||||||
let prov = self.range_ptrs_get(alloc_range(offset, Size::from_bytes(1)), cx);
|
let prov = self.range_ptrs_get(alloc_range(offset, Size::from_bytes(1)), cx);
|
||||||
debug_assert!(prov.len() <= 1);
|
debug_assert!(prov.len() <= 1);
|
||||||
if let Some(entry) = prov.first() {
|
if let Some(entry) = prov.first() {
|
||||||
// If it overlaps with this byte, it is on this byte.
|
// If it overlaps with this byte, it is on this byte.
|
||||||
debug_assert!(self.bytes.as_ref().is_none_or(|b| !b.contains_key(&offset)));
|
debug_assert!(self.bytes.as_ref().is_none_or(|b| !b.contains_key(&offset)));
|
||||||
Some(entry.1)
|
Some((entry.1, (offset - entry.0).bytes() as u8))
|
||||||
} else {
|
} else {
|
||||||
// Look up per-byte provenance.
|
// Look up per-byte provenance.
|
||||||
self.bytes.as_ref().and_then(|b| b.get(&offset).copied())
|
self.bytes.as_ref().and_then(|b| b.get(&offset).copied())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Attempt to merge per-byte provenance back into ptr chunks, if the right fragments
|
||||||
|
/// sit next to each other. Return `false` is that is not possible due to partial pointers.
|
||||||
|
pub fn merge_bytes(&mut self, cx: &impl HasDataLayout) -> bool {
|
||||||
|
let Some(bytes) = self.bytes.as_deref_mut() else {
|
||||||
|
return true;
|
||||||
|
};
|
||||||
|
let ptr_size = cx.data_layout().pointer_size();
|
||||||
|
while let Some((offset, (prov, _))) = bytes.iter().next().copied() {
|
||||||
|
// Check if this fragment starts a pointer.
|
||||||
|
let range = offset..offset + ptr_size;
|
||||||
|
let frags = bytes.range(range.clone());
|
||||||
|
if frags.len() != ptr_size.bytes_usize() {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
for (idx, (_offset, (frag_prov, frag_idx))) in frags.iter().copied().enumerate() {
|
||||||
|
if frag_prov != prov || frag_idx != idx as u8 {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Looks like a pointer! Move it over to the ptr provenance map.
|
||||||
|
bytes.remove_range(range);
|
||||||
|
self.ptrs.insert(offset, prov);
|
||||||
|
}
|
||||||
|
// We managed to convert everything into whole pointers.
|
||||||
|
self.bytes = None;
|
||||||
|
true
|
||||||
|
}
|
||||||
|
|
||||||
/// Check if there is ptr-sized provenance at the given index.
|
/// Check if there is ptr-sized provenance at the given index.
|
||||||
/// Does not mean anything for bytewise provenance! But can be useful as an optimization.
|
/// Does not mean anything for bytewise provenance! But can be useful as an optimization.
|
||||||
pub fn get_ptr(&self, offset: Size) -> Option<Prov> {
|
pub fn get_ptr(&self, offset: Size) -> Option<Prov> {
|
||||||
@@ -137,7 +165,7 @@ impl<Prov: Provenance> ProvenanceMap<Prov> {
|
|||||||
|
|
||||||
/// Yields all the provenances stored in this map.
|
/// Yields all the provenances stored in this map.
|
||||||
pub fn provenances(&self) -> impl Iterator<Item = Prov> {
|
pub fn provenances(&self) -> impl Iterator<Item = Prov> {
|
||||||
let bytes = self.bytes.iter().flat_map(|b| b.values());
|
let bytes = self.bytes.iter().flat_map(|b| b.values().map(|(p, _i)| p));
|
||||||
self.ptrs.values().chain(bytes).copied()
|
self.ptrs.values().chain(bytes).copied()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -148,17 +176,13 @@ impl<Prov: Provenance> ProvenanceMap<Prov> {
|
|||||||
|
|
||||||
/// Removes all provenance inside the given range.
|
/// Removes all provenance inside the given range.
|
||||||
/// If there is provenance overlapping with the edges, might result in an error.
|
/// If there is provenance overlapping with the edges, might result in an error.
|
||||||
pub fn clear(&mut self, range: AllocRange, cx: &impl HasDataLayout) -> AllocResult {
|
pub fn clear(&mut self, range: AllocRange, cx: &impl HasDataLayout) {
|
||||||
let start = range.start;
|
let start = range.start;
|
||||||
let end = range.end();
|
let end = range.end();
|
||||||
// Clear the bytewise part -- this is easy.
|
// Clear the bytewise part -- this is easy.
|
||||||
if Prov::OFFSET_IS_ADDR {
|
|
||||||
if let Some(bytes) = self.bytes.as_mut() {
|
if let Some(bytes) = self.bytes.as_mut() {
|
||||||
bytes.remove_range(start..end);
|
bytes.remove_range(start..end);
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
debug_assert!(self.bytes.is_none());
|
|
||||||
}
|
|
||||||
|
|
||||||
let pointer_size = cx.data_layout().pointer_size();
|
let pointer_size = cx.data_layout().pointer_size();
|
||||||
|
|
||||||
@@ -168,7 +192,7 @@ impl<Prov: Provenance> ProvenanceMap<Prov> {
|
|||||||
// Find all provenance overlapping the given range.
|
// Find all provenance overlapping the given range.
|
||||||
if self.range_ptrs_is_empty(range, cx) {
|
if self.range_ptrs_is_empty(range, cx) {
|
||||||
// No provenance in this range, we are done. This is the common case.
|
// No provenance in this range, we are done. This is the common case.
|
||||||
return Ok(());
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// This redoes some of the work of `range_get_ptrs_is_empty`, but this path is much
|
// This redoes some of the work of `range_get_ptrs_is_empty`, but this path is much
|
||||||
@@ -179,28 +203,20 @@ impl<Prov: Provenance> ProvenanceMap<Prov> {
|
|||||||
|
|
||||||
// We need to handle clearing the provenance from parts of a pointer.
|
// We need to handle clearing the provenance from parts of a pointer.
|
||||||
if first < start {
|
if first < start {
|
||||||
if !Prov::OFFSET_IS_ADDR {
|
|
||||||
// We can't split up the provenance into less than a pointer.
|
|
||||||
return Err(AllocError::OverwritePartialPointer(first));
|
|
||||||
}
|
|
||||||
// Insert the remaining part in the bytewise provenance.
|
// Insert the remaining part in the bytewise provenance.
|
||||||
let prov = self.ptrs[&first];
|
let prov = self.ptrs[&first];
|
||||||
let bytes = self.bytes.get_or_insert_with(Box::default);
|
let bytes = self.bytes.get_or_insert_with(Box::default);
|
||||||
for offset in first..start {
|
for offset in first..start {
|
||||||
bytes.insert(offset, prov);
|
bytes.insert(offset, (prov, (offset - first).bytes() as u8));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if last > end {
|
if last > end {
|
||||||
let begin_of_last = last - pointer_size;
|
let begin_of_last = last - pointer_size;
|
||||||
if !Prov::OFFSET_IS_ADDR {
|
|
||||||
// We can't split up the provenance into less than a pointer.
|
|
||||||
return Err(AllocError::OverwritePartialPointer(begin_of_last));
|
|
||||||
}
|
|
||||||
// Insert the remaining part in the bytewise provenance.
|
// Insert the remaining part in the bytewise provenance.
|
||||||
let prov = self.ptrs[&begin_of_last];
|
let prov = self.ptrs[&begin_of_last];
|
||||||
let bytes = self.bytes.get_or_insert_with(Box::default);
|
let bytes = self.bytes.get_or_insert_with(Box::default);
|
||||||
for offset in end..last {
|
for offset in end..last {
|
||||||
bytes.insert(offset, prov);
|
bytes.insert(offset, (prov, (offset - begin_of_last).bytes() as u8));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -208,8 +224,6 @@ impl<Prov: Provenance> ProvenanceMap<Prov> {
|
|||||||
// Since provenance do not overlap, we know that removing until `last` (exclusive) is fine,
|
// Since provenance do not overlap, we know that removing until `last` (exclusive) is fine,
|
||||||
// i.e., this will not remove any other provenance just after the ones we care about.
|
// i.e., this will not remove any other provenance just after the ones we care about.
|
||||||
self.ptrs.remove_range(first..last);
|
self.ptrs.remove_range(first..last);
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Overwrites all provenance in the given range with wildcard provenance.
|
/// Overwrites all provenance in the given range with wildcard provenance.
|
||||||
@@ -218,10 +232,6 @@ impl<Prov: Provenance> ProvenanceMap<Prov> {
|
|||||||
///
|
///
|
||||||
/// Provided for usage in Miri and panics otherwise.
|
/// Provided for usage in Miri and panics otherwise.
|
||||||
pub fn write_wildcards(&mut self, cx: &impl HasDataLayout, range: AllocRange) {
|
pub fn write_wildcards(&mut self, cx: &impl HasDataLayout, range: AllocRange) {
|
||||||
assert!(
|
|
||||||
Prov::OFFSET_IS_ADDR,
|
|
||||||
"writing wildcard provenance is not supported when `OFFSET_IS_ADDR` is false"
|
|
||||||
);
|
|
||||||
let wildcard = Prov::WILDCARD.unwrap();
|
let wildcard = Prov::WILDCARD.unwrap();
|
||||||
|
|
||||||
let bytes = self.bytes.get_or_insert_with(Box::default);
|
let bytes = self.bytes.get_or_insert_with(Box::default);
|
||||||
@@ -229,21 +239,22 @@ impl<Prov: Provenance> ProvenanceMap<Prov> {
|
|||||||
// Remove pointer provenances that overlap with the range, then readd the edge ones bytewise.
|
// Remove pointer provenances that overlap with the range, then readd the edge ones bytewise.
|
||||||
let ptr_range = Self::adjusted_range_ptrs(range, cx);
|
let ptr_range = Self::adjusted_range_ptrs(range, cx);
|
||||||
let ptrs = self.ptrs.range(ptr_range.clone());
|
let ptrs = self.ptrs.range(ptr_range.clone());
|
||||||
if let Some((offset, prov)) = ptrs.first() {
|
if let Some((offset, prov)) = ptrs.first().copied() {
|
||||||
for byte_ofs in *offset..range.start {
|
for byte_ofs in offset..range.start {
|
||||||
bytes.insert(byte_ofs, *prov);
|
bytes.insert(byte_ofs, (prov, (byte_ofs - offset).bytes() as u8));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if let Some((offset, prov)) = ptrs.last() {
|
if let Some((offset, prov)) = ptrs.last().copied() {
|
||||||
for byte_ofs in range.end()..*offset + cx.data_layout().pointer_size() {
|
for byte_ofs in range.end()..offset + cx.data_layout().pointer_size() {
|
||||||
bytes.insert(byte_ofs, *prov);
|
bytes.insert(byte_ofs, (prov, (byte_ofs - offset).bytes() as u8));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
self.ptrs.remove_range(ptr_range);
|
self.ptrs.remove_range(ptr_range);
|
||||||
|
|
||||||
// Overwrite bytewise provenance.
|
// Overwrite bytewise provenance.
|
||||||
for offset in range.start..range.end() {
|
for offset in range.start..range.end() {
|
||||||
bytes.insert(offset, wildcard);
|
// The fragment index does not matter for wildcard provenance.
|
||||||
|
bytes.insert(offset, (wildcard, 0));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -253,7 +264,7 @@ impl<Prov: Provenance> ProvenanceMap<Prov> {
|
|||||||
/// Offsets are already adjusted to the destination allocation.
|
/// Offsets are already adjusted to the destination allocation.
|
||||||
pub struct ProvenanceCopy<Prov> {
|
pub struct ProvenanceCopy<Prov> {
|
||||||
dest_ptrs: Option<Box<[(Size, Prov)]>>,
|
dest_ptrs: Option<Box<[(Size, Prov)]>>,
|
||||||
dest_bytes: Option<Box<[(Size, Prov)]>>,
|
dest_bytes: Option<Box<[(Size, (Prov, u8))]>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<Prov: Provenance> ProvenanceMap<Prov> {
|
impl<Prov: Provenance> ProvenanceMap<Prov> {
|
||||||
@@ -263,7 +274,7 @@ impl<Prov: Provenance> ProvenanceMap<Prov> {
|
|||||||
dest: Size,
|
dest: Size,
|
||||||
count: u64,
|
count: u64,
|
||||||
cx: &impl HasDataLayout,
|
cx: &impl HasDataLayout,
|
||||||
) -> AllocResult<ProvenanceCopy<Prov>> {
|
) -> ProvenanceCopy<Prov> {
|
||||||
let shift_offset = move |idx, offset| {
|
let shift_offset = move |idx, offset| {
|
||||||
// compute offset for current repetition
|
// compute offset for current repetition
|
||||||
let dest_offset = dest + src.size * idx; // `Size` operations
|
let dest_offset = dest + src.size * idx; // `Size` operations
|
||||||
@@ -301,24 +312,16 @@ impl<Prov: Provenance> ProvenanceMap<Prov> {
|
|||||||
let mut dest_bytes_box = None;
|
let mut dest_bytes_box = None;
|
||||||
let begin_overlap = self.range_ptrs_get(alloc_range(src.start, Size::ZERO), cx).first();
|
let begin_overlap = self.range_ptrs_get(alloc_range(src.start, Size::ZERO), cx).first();
|
||||||
let end_overlap = self.range_ptrs_get(alloc_range(src.end(), Size::ZERO), cx).first();
|
let end_overlap = self.range_ptrs_get(alloc_range(src.end(), Size::ZERO), cx).first();
|
||||||
if !Prov::OFFSET_IS_ADDR {
|
// We only need to go here if there is some overlap or some bytewise provenance.
|
||||||
// There can't be any bytewise provenance, and we cannot split up the begin/end overlap.
|
if begin_overlap.is_some() || end_overlap.is_some() || self.bytes.is_some() {
|
||||||
if let Some(entry) = begin_overlap {
|
let mut bytes: Vec<(Size, (Prov, u8))> = Vec::new();
|
||||||
return Err(AllocError::ReadPartialPointer(entry.0));
|
|
||||||
}
|
|
||||||
if let Some(entry) = end_overlap {
|
|
||||||
return Err(AllocError::ReadPartialPointer(entry.0));
|
|
||||||
}
|
|
||||||
debug_assert!(self.bytes.is_none());
|
|
||||||
} else {
|
|
||||||
let mut bytes = Vec::new();
|
|
||||||
// First, if there is a part of a pointer at the start, add that.
|
// First, if there is a part of a pointer at the start, add that.
|
||||||
if let Some(entry) = begin_overlap {
|
if let Some(entry) = begin_overlap {
|
||||||
trace!("start overlapping entry: {entry:?}");
|
trace!("start overlapping entry: {entry:?}");
|
||||||
// For really small copies, make sure we don't run off the end of the `src` range.
|
// For really small copies, make sure we don't run off the end of the `src` range.
|
||||||
let entry_end = cmp::min(entry.0 + ptr_size, src.end());
|
let entry_end = cmp::min(entry.0 + ptr_size, src.end());
|
||||||
for offset in src.start..entry_end {
|
for offset in src.start..entry_end {
|
||||||
bytes.push((offset, entry.1));
|
bytes.push((offset, (entry.1, (offset - entry.0).bytes() as u8)));
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
trace!("no start overlapping entry");
|
trace!("no start overlapping entry");
|
||||||
@@ -334,8 +337,9 @@ impl<Prov: Provenance> ProvenanceMap<Prov> {
|
|||||||
let entry_start = cmp::max(entry.0, src.start);
|
let entry_start = cmp::max(entry.0, src.start);
|
||||||
for offset in entry_start..src.end() {
|
for offset in entry_start..src.end() {
|
||||||
if bytes.last().is_none_or(|bytes_entry| bytes_entry.0 < offset) {
|
if bytes.last().is_none_or(|bytes_entry| bytes_entry.0 < offset) {
|
||||||
// The last entry, if it exists, has a lower offset than us.
|
// The last entry, if it exists, has a lower offset than us, so we
|
||||||
bytes.push((offset, entry.1));
|
// can add it at the end and remain sorted.
|
||||||
|
bytes.push((offset, (entry.1, (offset - entry.0).bytes() as u8)));
|
||||||
} else {
|
} else {
|
||||||
// There already is an entry for this offset in there! This can happen when the
|
// There already is an entry for this offset in there! This can happen when the
|
||||||
// start and end range checks actually end up hitting the same pointer, so we
|
// start and end range checks actually end up hitting the same pointer, so we
|
||||||
@@ -358,7 +362,7 @@ impl<Prov: Provenance> ProvenanceMap<Prov> {
|
|||||||
dest_bytes_box = Some(dest_bytes.into_boxed_slice());
|
dest_bytes_box = Some(dest_bytes.into_boxed_slice());
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(ProvenanceCopy { dest_ptrs: dest_ptrs_box, dest_bytes: dest_bytes_box })
|
ProvenanceCopy { dest_ptrs: dest_ptrs_box, dest_bytes: dest_bytes_box }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Applies a provenance copy.
|
/// Applies a provenance copy.
|
||||||
@@ -368,14 +372,10 @@ impl<Prov: Provenance> ProvenanceMap<Prov> {
|
|||||||
if let Some(dest_ptrs) = copy.dest_ptrs {
|
if let Some(dest_ptrs) = copy.dest_ptrs {
|
||||||
self.ptrs.insert_presorted(dest_ptrs.into());
|
self.ptrs.insert_presorted(dest_ptrs.into());
|
||||||
}
|
}
|
||||||
if Prov::OFFSET_IS_ADDR {
|
|
||||||
if let Some(dest_bytes) = copy.dest_bytes
|
if let Some(dest_bytes) = copy.dest_bytes
|
||||||
&& !dest_bytes.is_empty()
|
&& !dest_bytes.is_empty()
|
||||||
{
|
{
|
||||||
self.bytes.get_or_insert_with(Box::default).insert_presorted(dest_bytes.into());
|
self.bytes.get_or_insert_with(Box::default).insert_presorted(dest_bytes.into());
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
debug_assert!(copy.dest_bytes.is_none());
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -577,9 +577,6 @@ pub enum UnsupportedOpInfo {
|
|||||||
//
|
//
|
||||||
// The variants below are only reachable from CTFE/const prop, miri will never emit them.
|
// The variants below are only reachable from CTFE/const prop, miri will never emit them.
|
||||||
//
|
//
|
||||||
/// Overwriting parts of a pointer; without knowing absolute addresses, the resulting state
|
|
||||||
/// cannot be represented by the CTFE interpreter.
|
|
||||||
OverwritePartialPointer(Pointer<AllocId>),
|
|
||||||
/// Attempting to read or copy parts of a pointer to somewhere else; without knowing absolute
|
/// Attempting to read or copy parts of a pointer to somewhere else; without knowing absolute
|
||||||
/// addresses, the resulting state cannot be represented by the CTFE interpreter.
|
/// addresses, the resulting state cannot be represented by the CTFE interpreter.
|
||||||
ReadPartialPointer(Pointer<AllocId>),
|
ReadPartialPointer(Pointer<AllocId>),
|
||||||
|
|||||||
@@ -56,7 +56,7 @@ impl<T: HasDataLayout> PointerArithmetic for T {}
|
|||||||
/// mostly opaque; the `Machine` trait extends it with some more operations that also have access to
|
/// mostly opaque; the `Machine` trait extends it with some more operations that also have access to
|
||||||
/// some global state.
|
/// some global state.
|
||||||
/// The `Debug` rendering is used to display bare provenance, and for the default impl of `fmt`.
|
/// The `Debug` rendering is used to display bare provenance, and for the default impl of `fmt`.
|
||||||
pub trait Provenance: Copy + fmt::Debug + 'static {
|
pub trait Provenance: Copy + PartialEq + fmt::Debug + 'static {
|
||||||
/// Says whether the `offset` field of `Pointer`s with this provenance is the actual physical address.
|
/// Says whether the `offset` field of `Pointer`s with this provenance is the actual physical address.
|
||||||
/// - If `false`, the offset *must* be relative. This means the bytes representing a pointer are
|
/// - If `false`, the offset *must* be relative. This means the bytes representing a pointer are
|
||||||
/// different from what the Abstract Machine prescribes, so the interpreter must prevent any
|
/// different from what the Abstract Machine prescribes, so the interpreter must prevent any
|
||||||
@@ -79,7 +79,7 @@ pub trait Provenance: Copy + fmt::Debug + 'static {
|
|||||||
fn get_alloc_id(self) -> Option<AllocId>;
|
fn get_alloc_id(self) -> Option<AllocId>;
|
||||||
|
|
||||||
/// Defines the 'join' of provenance: what happens when doing a pointer load and different bytes have different provenance.
|
/// Defines the 'join' of provenance: what happens when doing a pointer load and different bytes have different provenance.
|
||||||
fn join(left: Option<Self>, right: Option<Self>) -> Option<Self>;
|
fn join(left: Self, right: Self) -> Option<Self>;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The type of provenance in the compile-time interpreter.
|
/// The type of provenance in the compile-time interpreter.
|
||||||
@@ -192,8 +192,8 @@ impl Provenance for CtfeProvenance {
|
|||||||
Some(self.alloc_id())
|
Some(self.alloc_id())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn join(_left: Option<Self>, _right: Option<Self>) -> Option<Self> {
|
fn join(left: Self, right: Self) -> Option<Self> {
|
||||||
panic!("merging provenance is not supported when `OFFSET_IS_ADDR` is false")
|
if left == right { Some(left) } else { None }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -224,8 +224,8 @@ impl Provenance for AllocId {
|
|||||||
Some(self)
|
Some(self)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn join(_left: Option<Self>, _right: Option<Self>) -> Option<Self> {
|
fn join(_left: Self, _right: Self) -> Option<Self> {
|
||||||
panic!("merging provenance is not supported when `OFFSET_IS_ADDR` is false")
|
unreachable!()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1826,7 +1826,7 @@ pub fn write_allocation_bytes<'tcx, Prov: Provenance, Extra, Bytes: AllocBytes>(
|
|||||||
ascii.push('╼');
|
ascii.push('╼');
|
||||||
i += ptr_size;
|
i += ptr_size;
|
||||||
}
|
}
|
||||||
} else if let Some(prov) = alloc.provenance().get(i, &tcx) {
|
} else if let Some((prov, idx)) = alloc.provenance().get_byte(i, &tcx) {
|
||||||
// Memory with provenance must be defined
|
// Memory with provenance must be defined
|
||||||
assert!(
|
assert!(
|
||||||
alloc.init_mask().is_range_initialized(alloc_range(i, Size::from_bytes(1))).is_ok()
|
alloc.init_mask().is_range_initialized(alloc_range(i, Size::from_bytes(1))).is_ok()
|
||||||
@@ -1836,7 +1836,7 @@ pub fn write_allocation_bytes<'tcx, Prov: Provenance, Extra, Bytes: AllocBytes>(
|
|||||||
// Format is similar to "oversized" above.
|
// Format is similar to "oversized" above.
|
||||||
let j = i.bytes_usize();
|
let j = i.bytes_usize();
|
||||||
let c = alloc.inspect_with_uninit_and_ptr_outside_interpreter(j..j + 1)[0];
|
let c = alloc.inspect_with_uninit_and_ptr_outside_interpreter(j..j + 1)[0];
|
||||||
write!(w, "╾{c:02x}{prov:#?} (1 ptr byte)╼")?;
|
write!(w, "╾{c:02x}{prov:#?} (ptr fragment {idx})╼")?;
|
||||||
i += Size::from_bytes(1);
|
i += Size::from_bytes(1);
|
||||||
} else if alloc
|
} else if alloc
|
||||||
.init_mask()
|
.init_mask()
|
||||||
|
|||||||
@@ -1345,40 +1345,6 @@ pub const unsafe fn swap<T>(x: *mut T, y: *mut T) {
|
|||||||
/// assert_eq!(x, [7, 8, 3, 4]);
|
/// assert_eq!(x, [7, 8, 3, 4]);
|
||||||
/// assert_eq!(y, [1, 2, 9]);
|
/// assert_eq!(y, [1, 2, 9]);
|
||||||
/// ```
|
/// ```
|
||||||
///
|
|
||||||
/// # Const evaluation limitations
|
|
||||||
///
|
|
||||||
/// If this function is invoked during const-evaluation, the current implementation has a small (and
|
|
||||||
/// rarely relevant) limitation: if `count` is at least 2 and the data pointed to by `x` or `y`
|
|
||||||
/// contains a pointer that crosses the boundary of two `T`-sized chunks of memory, the function may
|
|
||||||
/// fail to evaluate (similar to a panic during const-evaluation). This behavior may change in the
|
|
||||||
/// future.
|
|
||||||
///
|
|
||||||
/// The limitation is illustrated by the following example:
|
|
||||||
///
|
|
||||||
/// ```
|
|
||||||
/// use std::mem::size_of;
|
|
||||||
/// use std::ptr;
|
|
||||||
///
|
|
||||||
/// const { unsafe {
|
|
||||||
/// const PTR_SIZE: usize = size_of::<*const i32>();
|
|
||||||
/// let mut data1 = [0u8; PTR_SIZE];
|
|
||||||
/// let mut data2 = [0u8; PTR_SIZE];
|
|
||||||
/// // Store a pointer in `data1`.
|
|
||||||
/// data1.as_mut_ptr().cast::<*const i32>().write_unaligned(&42);
|
|
||||||
/// // Swap the contents of `data1` and `data2` by swapping `PTR_SIZE` many `u8`-sized chunks.
|
|
||||||
/// // This call will fail, because the pointer in `data1` crosses the boundary
|
|
||||||
/// // between several of the 1-byte chunks that are being swapped here.
|
|
||||||
/// //ptr::swap_nonoverlapping(data1.as_mut_ptr(), data2.as_mut_ptr(), PTR_SIZE);
|
|
||||||
/// // Swap the contents of `data1` and `data2` by swapping a single chunk of size
|
|
||||||
/// // `[u8; PTR_SIZE]`. That works, as there is no pointer crossing the boundary between
|
|
||||||
/// // two chunks.
|
|
||||||
/// ptr::swap_nonoverlapping(&mut data1, &mut data2, 1);
|
|
||||||
/// // Read the pointer from `data2` and dereference it.
|
|
||||||
/// let ptr = data2.as_ptr().cast::<*const i32>().read_unaligned();
|
|
||||||
/// assert!(*ptr == 42);
|
|
||||||
/// } }
|
|
||||||
/// ```
|
|
||||||
#[inline]
|
#[inline]
|
||||||
#[stable(feature = "swap_nonoverlapping", since = "1.27.0")]
|
#[stable(feature = "swap_nonoverlapping", since = "1.27.0")]
|
||||||
#[rustc_const_stable(feature = "const_swap_nonoverlapping", since = "1.88.0")]
|
#[rustc_const_stable(feature = "const_swap_nonoverlapping", since = "1.88.0")]
|
||||||
@@ -1407,9 +1373,7 @@ pub const unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
|
|||||||
const_eval_select!(
|
const_eval_select!(
|
||||||
@capture[T] { x: *mut T, y: *mut T, count: usize }:
|
@capture[T] { x: *mut T, y: *mut T, count: usize }:
|
||||||
if const {
|
if const {
|
||||||
// At compile-time we want to always copy this in chunks of `T`, to ensure that if there
|
// At compile-time we don't need all the special code below.
|
||||||
// are pointers inside `T` we will copy them in one go rather than trying to copy a part
|
|
||||||
// of a pointer (which would not work).
|
|
||||||
// SAFETY: Same preconditions as this function
|
// SAFETY: Same preconditions as this function
|
||||||
unsafe { swap_nonoverlapping_const(x, y, count) }
|
unsafe { swap_nonoverlapping_const(x, y, count) }
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
@@ -936,22 +936,18 @@ fn test_const_swap_ptr() {
|
|||||||
assert!(*s1.0.ptr == 666);
|
assert!(*s1.0.ptr == 666);
|
||||||
assert!(*s2.0.ptr == 1);
|
assert!(*s2.0.ptr == 1);
|
||||||
|
|
||||||
// Swap them back, again as an array.
|
// Swap them back, byte-for-byte
|
||||||
unsafe {
|
unsafe {
|
||||||
ptr::swap_nonoverlapping(
|
ptr::swap_nonoverlapping(
|
||||||
ptr::from_mut(&mut s1).cast::<T>(),
|
ptr::from_mut(&mut s1).cast::<u8>(),
|
||||||
ptr::from_mut(&mut s2).cast::<T>(),
|
ptr::from_mut(&mut s2).cast::<u8>(),
|
||||||
1,
|
size_of::<A>(),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make sure they still work.
|
// Make sure they still work.
|
||||||
assert!(*s1.0.ptr == 1);
|
assert!(*s1.0.ptr == 1);
|
||||||
assert!(*s2.0.ptr == 666);
|
assert!(*s2.0.ptr == 666);
|
||||||
|
|
||||||
// This is where we'd swap again using a `u8` type and a `count` of `size_of::<T>()` if it
|
|
||||||
// were not for the limitation of `swap_nonoverlapping` around pointers crossing multiple
|
|
||||||
// elements.
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -285,16 +285,16 @@ impl interpret::Provenance for Provenance {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn join(left: Option<Self>, right: Option<Self>) -> Option<Self> {
|
fn join(left: Self, right: Self) -> Option<Self> {
|
||||||
match (left, right) {
|
match (left, right) {
|
||||||
// If both are the *same* concrete tag, that is the result.
|
// If both are the *same* concrete tag, that is the result.
|
||||||
(
|
(
|
||||||
Some(Provenance::Concrete { alloc_id: left_alloc, tag: left_tag }),
|
Provenance::Concrete { alloc_id: left_alloc, tag: left_tag },
|
||||||
Some(Provenance::Concrete { alloc_id: right_alloc, tag: right_tag }),
|
Provenance::Concrete { alloc_id: right_alloc, tag: right_tag },
|
||||||
) if left_alloc == right_alloc && left_tag == right_tag => left,
|
) if left_alloc == right_alloc && left_tag == right_tag => Some(left),
|
||||||
// If one side is a wildcard, the best possible outcome is that it is equal to the other
|
// If one side is a wildcard, the best possible outcome is that it is equal to the other
|
||||||
// one, and we use that.
|
// one, and we use that.
|
||||||
(Some(Provenance::Wildcard), o) | (o, Some(Provenance::Wildcard)) => o,
|
(Provenance::Wildcard, o) | (o, Provenance::Wildcard) => Some(o),
|
||||||
// Otherwise, fall back to `None`.
|
// Otherwise, fall back to `None`.
|
||||||
_ => None,
|
_ => None,
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -246,7 +246,7 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
|||||||
let p_map = alloc.provenance();
|
let p_map = alloc.provenance();
|
||||||
for idx in overlap {
|
for idx in overlap {
|
||||||
// If a provenance was read by the foreign code, expose it.
|
// If a provenance was read by the foreign code, expose it.
|
||||||
if let Some(prov) = p_map.get(Size::from_bytes(idx), this) {
|
if let Some((prov, _idx)) = p_map.get_byte(Size::from_bytes(idx), this) {
|
||||||
this.expose_provenance(prov)?;
|
this.expose_provenance(prov)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
21
src/tools/miri/tests/fail/provenance/mix-ptrs1.rs
Normal file
21
src/tools/miri/tests/fail/provenance/mix-ptrs1.rs
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
use std::{mem, ptr};
|
||||||
|
|
||||||
|
const PTR_SIZE: usize = mem::size_of::<&i32>();
|
||||||
|
|
||||||
|
fn main() {
|
||||||
|
unsafe {
|
||||||
|
let ptr = &0 as *const i32;
|
||||||
|
let arr = [ptr; 2];
|
||||||
|
// We want to do a scalar read of a pointer at offset PTR_SIZE/2 into this array. But we
|
||||||
|
// cannot use a packed struct or `read_unaligned`, as those use the memcpy code path in
|
||||||
|
// Miri. So instead we shift the entire array by a bit and then the actual read we want to
|
||||||
|
// do is perfectly aligned.
|
||||||
|
let mut target_arr = [ptr::null::<i32>(); 3];
|
||||||
|
let target = target_arr.as_mut_ptr().cast::<u8>();
|
||||||
|
target.add(PTR_SIZE / 2).cast::<[*const i32; 2]>().write_unaligned(arr);
|
||||||
|
// Now target_arr[1] is a mix of the two `ptr` we had stored in `arr`.
|
||||||
|
// They all have the same provenance, but not in the right order, so we reject this.
|
||||||
|
let strange_ptr = target_arr[1];
|
||||||
|
assert_eq!(*strange_ptr.with_addr(ptr.addr()), 0); //~ERROR: no provenance
|
||||||
|
}
|
||||||
|
}
|
||||||
16
src/tools/miri/tests/fail/provenance/mix-ptrs1.stderr
Normal file
16
src/tools/miri/tests/fail/provenance/mix-ptrs1.stderr
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
error: Undefined Behavior: pointer not dereferenceable: pointer must be dereferenceable for 4 bytes, but got $HEX[noalloc] which is a dangling pointer (it has no provenance)
|
||||||
|
--> tests/fail/provenance/mix-ptrs1.rs:LL:CC
|
||||||
|
|
|
||||||
|
LL | assert_eq!(*strange_ptr.with_addr(ptr.addr()), 0);
|
||||||
|
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Undefined Behavior occurred here
|
||||||
|
|
|
||||||
|
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
|
||||||
|
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information
|
||||||
|
= note: BACKTRACE:
|
||||||
|
= note: inside `main` at RUSTLIB/core/src/macros/mod.rs:LL:CC
|
||||||
|
= note: this error originates in the macro `assert_eq` (in Nightly builds, run with -Z macro-backtrace for more info)
|
||||||
|
|
||||||
|
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
|
||||||
|
|
||||||
|
error: aborting due to 1 previous error
|
||||||
|
|
||||||
37
src/tools/miri/tests/fail/provenance/mix-ptrs2.rs
Normal file
37
src/tools/miri/tests/fail/provenance/mix-ptrs2.rs
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
use std::mem;
|
||||||
|
|
||||||
|
const PTR_SIZE: usize = mem::size_of::<&i32>();
|
||||||
|
|
||||||
|
/// Overwrite one byte of a pointer, then restore it.
|
||||||
|
fn main() {
|
||||||
|
unsafe fn ptr_bytes<'x>(ptr: &'x mut *const i32) -> &'x mut [mem::MaybeUninit<u8>; PTR_SIZE] {
|
||||||
|
mem::transmute(ptr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns a value with the same provenance as `x` but 0 for the integer value.
|
||||||
|
// `x` must be initialized.
|
||||||
|
unsafe fn zero_with_provenance(x: mem::MaybeUninit<u8>) -> mem::MaybeUninit<u8> {
|
||||||
|
let ptr = [x; PTR_SIZE];
|
||||||
|
let ptr: *const i32 = mem::transmute(ptr);
|
||||||
|
let mut ptr = ptr.with_addr(0);
|
||||||
|
ptr_bytes(&mut ptr)[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
unsafe {
|
||||||
|
let ptr = &42;
|
||||||
|
let mut ptr = ptr as *const i32;
|
||||||
|
// Get a bytewise view of the pointer.
|
||||||
|
let ptr_bytes = ptr_bytes(&mut ptr);
|
||||||
|
|
||||||
|
// The highest bytes must be 0 for this to work.
|
||||||
|
let hi = if cfg!(target_endian = "little") { ptr_bytes.len() - 1 } else { 0 };
|
||||||
|
assert_eq!(*ptr_bytes[hi].as_ptr().cast::<u8>(), 0);
|
||||||
|
// Overwrite provenance on the last byte.
|
||||||
|
ptr_bytes[hi] = mem::MaybeUninit::new(0);
|
||||||
|
// Restore it from the another byte.
|
||||||
|
ptr_bytes[hi] = zero_with_provenance(ptr_bytes[1]);
|
||||||
|
|
||||||
|
// Now ptr is almost good, except the provenance fragment indices do not work out...
|
||||||
|
assert_eq!(*ptr, 42); //~ERROR: no provenance
|
||||||
|
}
|
||||||
|
}
|
||||||
16
src/tools/miri/tests/fail/provenance/mix-ptrs2.stderr
Normal file
16
src/tools/miri/tests/fail/provenance/mix-ptrs2.stderr
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
error: Undefined Behavior: pointer not dereferenceable: pointer must be dereferenceable for 4 bytes, but got $HEX[noalloc] which is a dangling pointer (it has no provenance)
|
||||||
|
--> tests/fail/provenance/mix-ptrs2.rs:LL:CC
|
||||||
|
|
|
||||||
|
LL | assert_eq!(*ptr, 42);
|
||||||
|
| ^^^^^^^^^^^^^^^^^^^^ Undefined Behavior occurred here
|
||||||
|
|
|
||||||
|
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
|
||||||
|
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information
|
||||||
|
= note: BACKTRACE:
|
||||||
|
= note: inside `main` at RUSTLIB/core/src/macros/mod.rs:LL:CC
|
||||||
|
= note: this error originates in the macro `assert_eq` (in Nightly builds, run with -Z macro-backtrace for more info)
|
||||||
|
|
||||||
|
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
|
||||||
|
|
||||||
|
error: aborting due to 1 previous error
|
||||||
|
|
||||||
@@ -9,11 +9,10 @@ use std::alloc::{Layout, alloc, dealloc};
|
|||||||
use std::mem::{self, MaybeUninit};
|
use std::mem::{self, MaybeUninit};
|
||||||
use std::slice::from_raw_parts;
|
use std::slice::from_raw_parts;
|
||||||
|
|
||||||
fn byte_with_provenance<T>(val: u8, prov: *const T) -> MaybeUninit<u8> {
|
fn byte_with_provenance<T>(val: u8, prov: *const T, frag_idx: usize) -> MaybeUninit<u8> {
|
||||||
let ptr = prov.with_addr(val as usize);
|
let ptr = prov.with_addr(usize::from_ne_bytes([val; _]));
|
||||||
let bytes: [MaybeUninit<u8>; mem::size_of::<*const ()>()] = unsafe { mem::transmute(ptr) };
|
let bytes: [MaybeUninit<u8>; mem::size_of::<*const ()>()] = unsafe { mem::transmute(ptr) };
|
||||||
let lsb = if cfg!(target_endian = "little") { 0 } else { bytes.len() - 1 };
|
bytes[frag_idx]
|
||||||
bytes[lsb]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
@@ -21,10 +20,10 @@ fn main() {
|
|||||||
unsafe {
|
unsafe {
|
||||||
let ptr = alloc(layout);
|
let ptr = alloc(layout);
|
||||||
let ptr_raw = ptr.cast::<MaybeUninit<u8>>();
|
let ptr_raw = ptr.cast::<MaybeUninit<u8>>();
|
||||||
*ptr_raw.add(0) = byte_with_provenance(0x42, &42u8);
|
*ptr_raw.add(0) = byte_with_provenance(0x42, &42u8, 0);
|
||||||
*ptr.add(1) = 0x12;
|
*ptr.add(1) = 0x12;
|
||||||
*ptr.add(2) = 0x13;
|
*ptr.add(2) = 0x13;
|
||||||
*ptr_raw.add(3) = byte_with_provenance(0x43, &0u8);
|
*ptr_raw.add(3) = byte_with_provenance(0x43, &0u8, 1);
|
||||||
let slice1 = from_raw_parts(ptr, 8);
|
let slice1 = from_raw_parts(ptr, 8);
|
||||||
let slice2 = from_raw_parts(ptr.add(8), 8);
|
let slice2 = from_raw_parts(ptr.add(8), 8);
|
||||||
drop(slice1.cmp(slice2));
|
drop(slice1.cmp(slice2));
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ LL | drop(slice1.cmp(slice2));
|
|||||||
|
|
||||||
Uninitialized memory occurred at ALLOC[0x4..0x8], in this allocation:
|
Uninitialized memory occurred at ALLOC[0x4..0x8], in this allocation:
|
||||||
ALLOC (Rust heap, size: 16, align: 8) {
|
ALLOC (Rust heap, size: 16, align: 8) {
|
||||||
╾42[ALLOC]<TAG> (1 ptr byte)╼ 12 13 ╾43[ALLOC]<TAG> (1 ptr byte)╼ __ __ __ __ __ __ __ __ __ __ __ __ │ ━..━░░░░░░░░░░░░
|
╾42[ALLOC]<TAG> (ptr fragment 0)╼ 12 13 ╾43[ALLOC]<TAG> (ptr fragment 1)╼ __ __ __ __ __ __ __ __ __ __ __ __ │ ━..━░░░░░░░░░░░░
|
||||||
}
|
}
|
||||||
ALLOC (global (static or const), size: 1, align: 1) {
|
ALLOC (global (static or const), size: 1, align: 1) {
|
||||||
2a │ *
|
2a │ *
|
||||||
|
|||||||
@@ -35,7 +35,7 @@ LL | partial_init();
|
|||||||
|
|
||||||
Uninitialized memory occurred at ALLOC[0x2..0x3], in this allocation:
|
Uninitialized memory occurred at ALLOC[0x2..0x3], in this allocation:
|
||||||
ALLOC (stack variable, size: 3, align: 1) {
|
ALLOC (stack variable, size: 3, align: 1) {
|
||||||
╾00[wildcard] (1 ptr byte)╼ ╾00[wildcard] (1 ptr byte)╼ __ │ ━━░
|
╾00[wildcard] (ptr fragment 0)╼ ╾00[wildcard] (ptr fragment 0)╼ __ │ ━━░
|
||||||
}
|
}
|
||||||
|
|
||||||
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
|
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
|
||||||
|
|||||||
@@ -6,7 +6,6 @@ const PTR_SIZE: usize = mem::size_of::<&i32>();
|
|||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
basic();
|
basic();
|
||||||
partial_overwrite_then_restore();
|
|
||||||
bytewise_ptr_methods();
|
bytewise_ptr_methods();
|
||||||
bytewise_custom_memcpy();
|
bytewise_custom_memcpy();
|
||||||
bytewise_custom_memcpy_chunked();
|
bytewise_custom_memcpy_chunked();
|
||||||
@@ -29,40 +28,6 @@ fn basic() {
|
|||||||
assert_eq!(unsafe { *ptr_back }, 42);
|
assert_eq!(unsafe { *ptr_back }, 42);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Overwrite one byte of a pointer, then restore it.
|
|
||||||
fn partial_overwrite_then_restore() {
|
|
||||||
unsafe fn ptr_bytes<'x>(ptr: &'x mut *const i32) -> &'x mut [mem::MaybeUninit<u8>; PTR_SIZE] {
|
|
||||||
mem::transmute(ptr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns a value with the same provenance as `x` but 0 for the integer value.
|
|
||||||
// `x` must be initialized.
|
|
||||||
unsafe fn zero_with_provenance(x: mem::MaybeUninit<u8>) -> mem::MaybeUninit<u8> {
|
|
||||||
let ptr = [x; PTR_SIZE];
|
|
||||||
let ptr: *const i32 = mem::transmute(ptr);
|
|
||||||
let mut ptr = ptr.with_addr(0);
|
|
||||||
ptr_bytes(&mut ptr)[0]
|
|
||||||
}
|
|
||||||
|
|
||||||
unsafe {
|
|
||||||
let ptr = &42;
|
|
||||||
let mut ptr = ptr as *const i32;
|
|
||||||
// Get a bytewise view of the pointer.
|
|
||||||
let ptr_bytes = ptr_bytes(&mut ptr);
|
|
||||||
|
|
||||||
// The highest bytes must be 0 for this to work.
|
|
||||||
let hi = if cfg!(target_endian = "little") { ptr_bytes.len() - 1 } else { 0 };
|
|
||||||
assert_eq!(*ptr_bytes[hi].as_ptr().cast::<u8>(), 0);
|
|
||||||
// Overwrite provenance on the last byte.
|
|
||||||
ptr_bytes[hi] = mem::MaybeUninit::new(0);
|
|
||||||
// Restore it from the another byte.
|
|
||||||
ptr_bytes[hi] = zero_with_provenance(ptr_bytes[1]);
|
|
||||||
|
|
||||||
// Now ptr should be good again.
|
|
||||||
assert_eq!(*ptr, 42);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn bytewise_ptr_methods() {
|
fn bytewise_ptr_methods() {
|
||||||
let mut ptr1 = &1;
|
let mut ptr1 = &1;
|
||||||
let mut ptr2 = &2;
|
let mut ptr2 = &2;
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
//@revisions: stack tree
|
//@revisions: stack tree
|
||||||
//@[tree]compile-flags: -Zmiri-tree-borrows
|
//@[tree]compile-flags: -Zmiri-tree-borrows
|
||||||
use std::{mem, ptr};
|
use std::mem;
|
||||||
|
|
||||||
fn t1() {
|
fn t1() {
|
||||||
// If we are careful, we can exploit data layout...
|
// If we are careful, we can exploit data layout...
|
||||||
@@ -27,27 +27,8 @@ fn ptr_integer_array() {
|
|||||||
let _x: [u8; PTR_SIZE] = unsafe { mem::transmute(&0) };
|
let _x: [u8; PTR_SIZE] = unsafe { mem::transmute(&0) };
|
||||||
}
|
}
|
||||||
|
|
||||||
fn ptr_in_two_halves() {
|
|
||||||
unsafe {
|
|
||||||
let ptr = &0 as *const i32;
|
|
||||||
let arr = [ptr; 2];
|
|
||||||
// We want to do a scalar read of a pointer at offset PTR_SIZE/2 into this array. But we
|
|
||||||
// cannot use a packed struct or `read_unaligned`, as those use the memcpy code path in
|
|
||||||
// Miri. So instead we shift the entire array by a bit and then the actual read we want to
|
|
||||||
// do is perfectly aligned.
|
|
||||||
let mut target_arr = [ptr::null::<i32>(); 3];
|
|
||||||
let target = target_arr.as_mut_ptr().cast::<u8>();
|
|
||||||
target.add(PTR_SIZE / 2).cast::<[*const i32; 2]>().write_unaligned(arr);
|
|
||||||
// Now target_arr[1] is a mix of the two `ptr` we had stored in `arr`.
|
|
||||||
let strange_ptr = target_arr[1];
|
|
||||||
// Check that the provenance works out.
|
|
||||||
assert_eq!(*strange_ptr.with_addr(ptr.addr()), 0);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
t1();
|
t1();
|
||||||
t2();
|
t2();
|
||||||
ptr_integer_array();
|
ptr_integer_array();
|
||||||
ptr_in_two_halves();
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ error: encountered `const_allocate` pointer in final value that was not made glo
|
|||||||
LL | const FOO: &i32 = foo();
|
LL | const FOO: &i32 = foo();
|
||||||
| ^^^^^^^^^^^^^^^
|
| ^^^^^^^^^^^^^^^
|
||||||
|
|
|
|
||||||
= note: use `const_make_global` to make allocated pointers immutable before returning
|
= note: use `const_make_global` to turn allocated pointers into immutable globals before returning
|
||||||
|
|
||||||
error: encountered `const_allocate` pointer in final value that was not made global
|
error: encountered `const_allocate` pointer in final value that was not made global
|
||||||
--> $DIR/ptr_not_made_global.rs:12:1
|
--> $DIR/ptr_not_made_global.rs:12:1
|
||||||
@@ -12,7 +12,7 @@ error: encountered `const_allocate` pointer in final value that was not made glo
|
|||||||
LL | const FOO_RAW: *const i32 = foo();
|
LL | const FOO_RAW: *const i32 = foo();
|
||||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^
|
| ^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
|
|
||||||
= note: use `const_make_global` to make allocated pointers immutable before returning
|
= note: use `const_make_global` to turn allocated pointers into immutable globals before returning
|
||||||
|
|
||||||
error: aborting due to 2 previous errors
|
error: aborting due to 2 previous errors
|
||||||
|
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ error: encountered `const_allocate` pointer in final value that was not made glo
|
|||||||
LL | const BAR: *mut i32 = unsafe { intrinsics::const_allocate(4, 4) as *mut i32 };
|
LL | const BAR: *mut i32 = unsafe { intrinsics::const_allocate(4, 4) as *mut i32 };
|
||||||
| ^^^^^^^^^^^^^^^^^^^
|
| ^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
|
|
||||||
= note: use `const_make_global` to make allocated pointers immutable before returning
|
= note: use `const_make_global` to turn allocated pointers into immutable globals before returning
|
||||||
|
|
||||||
error: aborting due to 1 previous error
|
error: aborting due to 1 previous error
|
||||||
|
|
||||||
|
|||||||
@@ -1,12 +0,0 @@
|
|||||||
// Test for the behavior described in <https://github.com/rust-lang/rust/issues/87184>.
|
|
||||||
|
|
||||||
const PARTIAL_OVERWRITE: () = {
|
|
||||||
let mut p = &42;
|
|
||||||
unsafe {
|
|
||||||
let ptr: *mut _ = &mut p;
|
|
||||||
*(ptr as *mut u8) = 123; //~ ERROR unable to overwrite parts of a pointer
|
|
||||||
}
|
|
||||||
let x = *p;
|
|
||||||
};
|
|
||||||
|
|
||||||
fn main() {}
|
|
||||||
@@ -1,12 +0,0 @@
|
|||||||
error[E0080]: unable to overwrite parts of a pointer in memory at ALLOC0
|
|
||||||
--> $DIR/partial_ptr_overwrite.rs:7:9
|
|
||||||
|
|
|
||||||
LL | *(ptr as *mut u8) = 123;
|
|
||||||
| ^^^^^^^^^^^^^^^^^^^^^^^ evaluation of `PARTIAL_OVERWRITE` failed here
|
|
||||||
|
|
|
||||||
= help: this code performed an operation that depends on the underlying bytes representing a pointer
|
|
||||||
= help: the absolute address of a pointer is not known at compile-time, so such operations are not supported
|
|
||||||
|
|
||||||
error: aborting due to 1 previous error
|
|
||||||
|
|
||||||
For more information about this error, try `rustc --explain E0080`.
|
|
||||||
63
tests/ui/consts/const-eval/ptr_fragments.rs
Normal file
63
tests/ui/consts/const-eval/ptr_fragments.rs
Normal file
@@ -0,0 +1,63 @@
|
|||||||
|
//! Test that various operations involving pointer fragments work as expected.
|
||||||
|
//@ run-pass
|
||||||
|
|
||||||
|
use std::mem::{self, MaybeUninit, transmute};
|
||||||
|
use std::ptr;
|
||||||
|
|
||||||
|
type Byte = MaybeUninit<u8>;
|
||||||
|
|
||||||
|
const unsafe fn memcpy(dst: *mut Byte, src: *const Byte, n: usize) {
|
||||||
|
let mut i = 0;
|
||||||
|
while i < n {
|
||||||
|
*dst.add(i) = *src.add(i);
|
||||||
|
i += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const _MEMCPY: () = unsafe {
|
||||||
|
let ptr = &42;
|
||||||
|
let mut ptr2 = ptr::null::<i32>();
|
||||||
|
memcpy(&mut ptr2 as *mut _ as *mut _, &ptr as *const _ as *const _, mem::size_of::<&i32>());
|
||||||
|
assert!(*ptr2 == 42);
|
||||||
|
};
|
||||||
|
const _MEMCPY_OFFSET: () = unsafe {
|
||||||
|
// Same as above, but the pointer has a non-zero offset so not all the data bytes are the same.
|
||||||
|
let ptr = &(42, 18);
|
||||||
|
let ptr = &ptr.1;
|
||||||
|
let mut ptr2 = ptr::null::<i32>();
|
||||||
|
memcpy(&mut ptr2 as *mut _ as *mut _, &ptr as *const _ as *const _, mem::size_of::<&i32>());
|
||||||
|
assert!(*ptr2 == 18);
|
||||||
|
};
|
||||||
|
|
||||||
|
const MEMCPY_RET: MaybeUninit<*const i32> = unsafe {
|
||||||
|
let ptr = &42;
|
||||||
|
let mut ptr2 = MaybeUninit::new(ptr::null::<i32>());
|
||||||
|
memcpy(&mut ptr2 as *mut _ as *mut _, &ptr as *const _ as *const _, mem::size_of::<&i32>());
|
||||||
|
// Return in a MaybeUninit so it does not get treated as a scalar.
|
||||||
|
ptr2
|
||||||
|
};
|
||||||
|
|
||||||
|
#[allow(dead_code)]
|
||||||
|
fn reassemble_ptr_fragments_in_static() {
|
||||||
|
static DATA: i32 = 1i32;
|
||||||
|
|
||||||
|
#[cfg(target_pointer_width = "64")]
|
||||||
|
struct Thing {
|
||||||
|
x: MaybeUninit<u32>,
|
||||||
|
y: MaybeUninit<u32>,
|
||||||
|
}
|
||||||
|
#[cfg(target_pointer_width = "32")]
|
||||||
|
struct Thing {
|
||||||
|
x: MaybeUninit<u16>,
|
||||||
|
y: MaybeUninit<u16>,
|
||||||
|
}
|
||||||
|
|
||||||
|
static X: Thing = unsafe {
|
||||||
|
let Thing { x, y } = transmute(&raw const DATA);
|
||||||
|
Thing { x, y }
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
fn main() {
|
||||||
|
assert_eq!(unsafe { MEMCPY_RET.assume_init().read() }, 42);
|
||||||
|
}
|
||||||
25
tests/ui/consts/const-eval/ptr_fragments_in_final.rs
Normal file
25
tests/ui/consts/const-eval/ptr_fragments_in_final.rs
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
//! Test that we properly error when there is a pointer fragment in the final value.
|
||||||
|
|
||||||
|
use std::{mem::{self, MaybeUninit}, ptr};
|
||||||
|
|
||||||
|
type Byte = MaybeUninit<u8>;
|
||||||
|
|
||||||
|
const unsafe fn memcpy(dst: *mut Byte, src: *const Byte, n: usize) {
|
||||||
|
let mut i = 0;
|
||||||
|
while i < n {
|
||||||
|
dst.add(i).write(src.add(i).read());
|
||||||
|
i += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const MEMCPY_RET: MaybeUninit<*const i32> = unsafe { //~ERROR: partial pointer in final value
|
||||||
|
let ptr = &42;
|
||||||
|
let mut ptr2 = MaybeUninit::new(ptr::null::<i32>());
|
||||||
|
memcpy(&mut ptr2 as *mut _ as *mut _, &ptr as *const _ as *const _, mem::size_of::<&i32>() / 2);
|
||||||
|
// Return in a MaybeUninit so it does not get treated as a scalar.
|
||||||
|
ptr2
|
||||||
|
};
|
||||||
|
|
||||||
|
fn main() {
|
||||||
|
assert_eq!(unsafe { MEMCPY_RET.assume_init().read() }, 42);
|
||||||
|
}
|
||||||
10
tests/ui/consts/const-eval/ptr_fragments_in_final.stderr
Normal file
10
tests/ui/consts/const-eval/ptr_fragments_in_final.stderr
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
error: encountered partial pointer in final value of constant
|
||||||
|
--> $DIR/ptr_fragments_in_final.rs:15:1
|
||||||
|
|
|
||||||
|
LL | const MEMCPY_RET: MaybeUninit<*const i32> = unsafe {
|
||||||
|
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
|
||||||
|
= note: while pointers can be broken apart into individual bytes during const-evaluation, only complete pointers (with all their bytes in the right order) are supported in the final value
|
||||||
|
|
||||||
|
error: aborting due to 1 previous error
|
||||||
|
|
||||||
49
tests/ui/consts/const-eval/read_partial_ptr.rs
Normal file
49
tests/ui/consts/const-eval/read_partial_ptr.rs
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
//! Ensure we error when trying to load from a pointer whose provenance has been messed with.
|
||||||
|
|
||||||
|
const PARTIAL_OVERWRITE: () = {
|
||||||
|
let mut p = &42;
|
||||||
|
// Overwrite one byte with a no-provenance value.
|
||||||
|
unsafe {
|
||||||
|
let ptr: *mut _ = &mut p;
|
||||||
|
*(ptr as *mut u8) = 123;
|
||||||
|
}
|
||||||
|
let x = *p; //~ ERROR: unable to read parts of a pointer
|
||||||
|
};
|
||||||
|
|
||||||
|
const PTR_BYTES_SWAP: () = {
|
||||||
|
let mut p = &42;
|
||||||
|
// Swap the first two bytes.
|
||||||
|
unsafe {
|
||||||
|
let ptr = &mut p as *mut _ as *mut std::mem::MaybeUninit<u8>;
|
||||||
|
let byte0 = ptr.read();
|
||||||
|
let byte1 = ptr.add(1).read();
|
||||||
|
ptr.write(byte1);
|
||||||
|
ptr.add(1).write(byte0);
|
||||||
|
}
|
||||||
|
let x = *p; //~ ERROR: unable to read parts of a pointer
|
||||||
|
};
|
||||||
|
|
||||||
|
const PTR_BYTES_REPEAT: () = {
|
||||||
|
let mut p = &42;
|
||||||
|
// Duplicate the first byte over the second.
|
||||||
|
unsafe {
|
||||||
|
let ptr = &mut p as *mut _ as *mut std::mem::MaybeUninit<u8>;
|
||||||
|
let byte0 = ptr.read();
|
||||||
|
ptr.add(1).write(byte0);
|
||||||
|
}
|
||||||
|
let x = *p; //~ ERROR: unable to read parts of a pointer
|
||||||
|
};
|
||||||
|
|
||||||
|
const PTR_BYTES_MIX: () = {
|
||||||
|
let mut p = &42;
|
||||||
|
let q = &43;
|
||||||
|
// Overwrite the first byte of p with the first byte of q.
|
||||||
|
unsafe {
|
||||||
|
let ptr = &mut p as *mut _ as *mut std::mem::MaybeUninit<u8>;
|
||||||
|
let qtr = &q as *const _ as *const std::mem::MaybeUninit<u8>;
|
||||||
|
ptr.write(qtr.read());
|
||||||
|
}
|
||||||
|
let x = *p; //~ ERROR: unable to read parts of a pointer
|
||||||
|
};
|
||||||
|
|
||||||
|
fn main() {}
|
||||||
39
tests/ui/consts/const-eval/read_partial_ptr.stderr
Normal file
39
tests/ui/consts/const-eval/read_partial_ptr.stderr
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
error[E0080]: unable to read parts of a pointer from memory at ALLOC0
|
||||||
|
--> $DIR/read_partial_ptr.rs:10:13
|
||||||
|
|
|
||||||
|
LL | let x = *p;
|
||||||
|
| ^^ evaluation of `PARTIAL_OVERWRITE` failed here
|
||||||
|
|
|
||||||
|
= help: this code performed an operation that depends on the underlying bytes representing a pointer
|
||||||
|
= help: the absolute address of a pointer is not known at compile-time, so such operations are not supported
|
||||||
|
|
||||||
|
error[E0080]: unable to read parts of a pointer from memory at ALLOC1
|
||||||
|
--> $DIR/read_partial_ptr.rs:23:13
|
||||||
|
|
|
||||||
|
LL | let x = *p;
|
||||||
|
| ^^ evaluation of `PTR_BYTES_SWAP` failed here
|
||||||
|
|
|
||||||
|
= help: this code performed an operation that depends on the underlying bytes representing a pointer
|
||||||
|
= help: the absolute address of a pointer is not known at compile-time, so such operations are not supported
|
||||||
|
|
||||||
|
error[E0080]: unable to read parts of a pointer from memory at ALLOC2
|
||||||
|
--> $DIR/read_partial_ptr.rs:34:13
|
||||||
|
|
|
||||||
|
LL | let x = *p;
|
||||||
|
| ^^ evaluation of `PTR_BYTES_REPEAT` failed here
|
||||||
|
|
|
||||||
|
= help: this code performed an operation that depends on the underlying bytes representing a pointer
|
||||||
|
= help: the absolute address of a pointer is not known at compile-time, so such operations are not supported
|
||||||
|
|
||||||
|
error[E0080]: unable to read parts of a pointer from memory at ALLOC3
|
||||||
|
--> $DIR/read_partial_ptr.rs:46:13
|
||||||
|
|
|
||||||
|
LL | let x = *p;
|
||||||
|
| ^^ evaluation of `PTR_BYTES_MIX` failed here
|
||||||
|
|
|
||||||
|
= help: this code performed an operation that depends on the underlying bytes representing a pointer
|
||||||
|
= help: the absolute address of a pointer is not known at compile-time, so such operations are not supported
|
||||||
|
|
||||||
|
error: aborting due to 4 previous errors
|
||||||
|
|
||||||
|
For more information about this error, try `rustc --explain E0080`.
|
||||||
@@ -1,3 +1,4 @@
|
|||||||
|
//! Check what happens when the error occurs inside a std function that we can't print the span of.
|
||||||
//@ ignore-backends: gcc
|
//@ ignore-backends: gcc
|
||||||
//@ compile-flags: -Z ui-testing=no
|
//@ compile-flags: -Z ui-testing=no
|
||||||
|
|
||||||
@@ -7,15 +8,15 @@ use std::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
const X: () = {
|
const X: () = {
|
||||||
let mut ptr1 = &1;
|
let mut x1 = 1;
|
||||||
let mut ptr2 = &2;
|
let mut x2 = 2;
|
||||||
|
|
||||||
// Swap them, bytewise.
|
// Swap them, bytewise.
|
||||||
unsafe {
|
unsafe {
|
||||||
ptr::swap_nonoverlapping( //~ ERROR unable to copy parts of a pointer
|
ptr::swap_nonoverlapping( //~ ERROR beyond the end of the allocation
|
||||||
&mut ptr1 as *mut _ as *mut MaybeUninit<u8>,
|
&mut x1 as *mut _ as *mut MaybeUninit<u8>,
|
||||||
&mut ptr2 as *mut _ as *mut MaybeUninit<u8>,
|
&mut x2 as *mut _ as *mut MaybeUninit<u8>,
|
||||||
mem::size_of::<&i32>(),
|
10,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -1,15 +1,13 @@
|
|||||||
error[E0080]: unable to copy parts of a pointer from memory at ALLOC0
|
error[E0080]: memory access failed: attempting to access 1 byte, but got ALLOC0+0x4 which is at or beyond the end of the allocation of size 4 bytes
|
||||||
--> $DIR/missing_span_in_backtrace.rs:15:9
|
--> $DIR/missing_span_in_backtrace.rs:16:9
|
||||||
|
|
|
|
||||||
15 | / ptr::swap_nonoverlapping(
|
16 | / ptr::swap_nonoverlapping(
|
||||||
16 | | &mut ptr1 as *mut _ as *mut MaybeUninit<u8>,
|
17 | | &mut x1 as *mut _ as *mut MaybeUninit<u8>,
|
||||||
17 | | &mut ptr2 as *mut _ as *mut MaybeUninit<u8>,
|
18 | | &mut x2 as *mut _ as *mut MaybeUninit<u8>,
|
||||||
18 | | mem::size_of::<&i32>(),
|
19 | | 10,
|
||||||
19 | | );
|
20 | | );
|
||||||
| |_________^ evaluation of `X` failed inside this call
|
| |_________^ evaluation of `X` failed inside this call
|
||||||
|
|
|
|
||||||
= help: this code performed an operation that depends on the underlying bytes representing a pointer
|
|
||||||
= help: the absolute address of a pointer is not known at compile-time, so such operations are not supported
|
|
||||||
note: inside `swap_nonoverlapping::compiletime::<MaybeUninit<u8>>`
|
note: inside `swap_nonoverlapping::compiletime::<MaybeUninit<u8>>`
|
||||||
--> $SRC_DIR/core/src/ptr/mod.rs:LL:COL
|
--> $SRC_DIR/core/src/ptr/mod.rs:LL:COL
|
||||||
note: inside `std::ptr::swap_nonoverlapping_const::<MaybeUninit<u8>>`
|
note: inside `std::ptr::swap_nonoverlapping_const::<MaybeUninit<u8>>`
|
||||||
|
|||||||
Reference in New Issue
Block a user