const-eval: full support for pointer fragments

This commit is contained in:
Ralf Jung
2025-07-17 20:00:19 +02:00
parent 051d0e8a95
commit ba5b6b9ec4
36 changed files with 543 additions and 359 deletions

View File

@@ -117,6 +117,13 @@ fn eval_body_using_ecx<'tcx, R: InterpretationResult<'tcx>>(
ecx.tcx.dcx().emit_err(errors::ConstHeapPtrInFinal { span: ecx.tcx.span }),
)));
}
Err(InternError::PartialPointer) => {
throw_inval!(AlreadyReported(ReportedErrorInfo::non_const_eval_error(
ecx.tcx
.dcx()
.emit_err(errors::PartialPtrInFinal { span: ecx.tcx.span, kind: intern_kind }),
)));
}
}
interp_ok(R::make_result(ret, ecx))

View File

@@ -51,6 +51,15 @@ pub(crate) struct ConstHeapPtrInFinal {
pub span: Span,
}
#[derive(Diagnostic)]
#[diag(const_eval_partial_pointer_in_final)]
#[note]
pub(crate) struct PartialPtrInFinal {
#[primary_span]
pub span: Span,
pub kind: InternKind,
}
#[derive(Diagnostic)]
#[diag(const_eval_unstable_in_stable_exposed)]
pub(crate) struct UnstableInStableExposed {
@@ -832,8 +841,7 @@ impl ReportErrorExt for UnsupportedOpInfo {
UnsupportedOpInfo::Unsupported(s) => s.clone().into(),
UnsupportedOpInfo::ExternTypeField => const_eval_extern_type_field,
UnsupportedOpInfo::UnsizedLocal => const_eval_unsized_local,
UnsupportedOpInfo::OverwritePartialPointer(_) => const_eval_partial_pointer_overwrite,
UnsupportedOpInfo::ReadPartialPointer(_) => const_eval_partial_pointer_copy,
UnsupportedOpInfo::ReadPartialPointer(_) => const_eval_partial_pointer_read,
UnsupportedOpInfo::ReadPointerAsInt(_) => const_eval_read_pointer_as_int,
UnsupportedOpInfo::ThreadLocalStatic(_) => const_eval_thread_local_static,
UnsupportedOpInfo::ExternStatic(_) => const_eval_extern_static,
@@ -844,7 +852,7 @@ impl ReportErrorExt for UnsupportedOpInfo {
use UnsupportedOpInfo::*;
use crate::fluent_generated::*;
if let ReadPointerAsInt(_) | OverwritePartialPointer(_) | ReadPartialPointer(_) = self {
if let ReadPointerAsInt(_) | ReadPartialPointer(_) = self {
diag.help(const_eval_ptr_as_bytes_1);
diag.help(const_eval_ptr_as_bytes_2);
}
@@ -856,7 +864,7 @@ impl ReportErrorExt for UnsupportedOpInfo {
| UnsupportedOpInfo::ExternTypeField
| Unsupported(_)
| ReadPointerAsInt(_) => {}
OverwritePartialPointer(ptr) | ReadPartialPointer(ptr) => {
ReadPartialPointer(ptr) => {
diag.arg("ptr", ptr);
}
ThreadLocalStatic(did) | ExternStatic(did) => rustc_middle::ty::tls::with(|tcx| {

View File

@@ -19,9 +19,12 @@ use rustc_data_structures::fx::{FxHashSet, FxIndexMap};
use rustc_hir as hir;
use rustc_hir::definitions::{DefPathData, DisambiguatorState};
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs;
use rustc_middle::mir::interpret::{ConstAllocation, CtfeProvenance, InterpResult};
use rustc_middle::mir::interpret::{
AllocBytes, ConstAllocation, CtfeProvenance, InterpResult, Provenance,
};
use rustc_middle::query::TyCtxtAt;
use rustc_middle::span_bug;
use rustc_middle::ty::TyCtxt;
use rustc_middle::ty::layout::TyAndLayout;
use rustc_span::def_id::LocalDefId;
use tracing::{instrument, trace};
@@ -52,6 +55,45 @@ impl HasStaticRootDefId for const_eval::CompileTimeMachine<'_> {
}
}
fn prepare_alloc<'tcx, Prov: Provenance, Extra, Bytes: AllocBytes>(
tcx: TyCtxt<'tcx>,
kind: MemoryKind<const_eval::MemoryKind>,
alloc: &mut Allocation<Prov, Extra, Bytes>,
mutability: Mutability,
) -> Result<(), InternError> {
match kind {
MemoryKind::Machine(const_eval::MemoryKind::Heap { was_made_global }) => {
if !was_made_global {
// Attempting to intern a `const_allocate`d pointer that was not made global via
// `const_make_global`.
tcx.dcx().delayed_bug("non-global heap allocation in const value");
return Err(InternError::ConstAllocNotGlobal);
}
}
MemoryKind::Stack | MemoryKind::CallerLocation => {}
}
if !alloc.provenance_merge_bytes(&tcx) {
// Per-byte provenance is not supported by backends, so we cannot accept it here.
tcx.dcx().delayed_bug("partial pointer in const value");
return Err(InternError::PartialPointer);
}
// Set allocation mutability as appropriate. This is used by LLVM to put things into
// read-only memory, and also by Miri when evaluating other globals that
// access this one.
match mutability {
Mutability::Not => {
alloc.mutability = Mutability::Not;
}
Mutability::Mut => {
// This must be already mutable, we won't "un-freeze" allocations ever.
assert_eq!(alloc.mutability, Mutability::Mut);
}
}
Ok(())
}
/// Intern an allocation. Returns `Err` if the allocation does not exist in the local memory.
///
/// `mutability` can be used to force immutable interning: if it is `Mutability::Not`, the
@@ -72,31 +114,13 @@ fn intern_shallow<'tcx, M: CompileTimeMachine<'tcx>>(
return Err(InternError::DanglingPointer);
};
match kind {
MemoryKind::Machine(const_eval::MemoryKind::Heap { was_made_global }) => {
if !was_made_global {
// Attempting to intern a `const_allocate`d pointer that was not made global via
// `const_make_global`. We want to error here, but we have to first put the
// allocation back into the `alloc_map` to keep things in a consistent state.
ecx.memory.alloc_map.insert(alloc_id, (kind, alloc));
return Err(InternError::ConstAllocNotGlobal);
}
}
MemoryKind::Stack | MemoryKind::CallerLocation => {}
if let Err(err) = prepare_alloc(*ecx.tcx, kind, &mut alloc, mutability) {
// We want to error here, but we have to first put the
// allocation back into the `alloc_map` to keep things in a consistent state.
ecx.memory.alloc_map.insert(alloc_id, (kind, alloc));
return Err(err);
}
// Set allocation mutability as appropriate. This is used by LLVM to put things into
// read-only memory, and also by Miri when evaluating other globals that
// access this one.
match mutability {
Mutability::Not => {
alloc.mutability = Mutability::Not;
}
Mutability::Mut => {
// This must be already mutable, we won't "un-freeze" allocations ever.
assert_eq!(alloc.mutability, Mutability::Mut);
}
}
// link the alloc id to the actual allocation
let alloc = ecx.tcx.mk_const_alloc(alloc);
if let Some(static_id) = ecx.machine.static_def_id() {
@@ -166,6 +190,7 @@ pub enum InternError {
BadMutablePointer,
DanglingPointer,
ConstAllocNotGlobal,
PartialPointer,
}
/// Intern `ret` and everything it references.
@@ -221,13 +246,11 @@ pub fn intern_const_alloc_recursive<'tcx, M: CompileTimeMachine<'tcx>>(
let mut todo: Vec<_> = if is_static {
// Do not steal the root allocation, we need it later to create the return value of `eval_static_initializer`.
// But still change its mutability to match the requested one.
let alloc = ecx.memory.alloc_map.get_mut(&base_alloc_id).unwrap();
alloc.1.mutability = base_mutability;
alloc.1.provenance().ptrs().iter().map(|&(_, prov)| prov).collect()
let (kind, alloc) = ecx.memory.alloc_map.get_mut(&base_alloc_id).unwrap();
prepare_alloc(*ecx.tcx, *kind, alloc, base_mutability)?;
alloc.provenance().ptrs().iter().map(|&(_, prov)| prov).collect()
} else {
intern_shallow(ecx, base_alloc_id, base_mutability, Some(&mut disambiguator))
.unwrap()
.collect()
intern_shallow(ecx, base_alloc_id, base_mutability, Some(&mut disambiguator))?.collect()
};
// We need to distinguish "has just been interned" from "was already in `tcx`",
// so we track this in a separate set.
@@ -235,7 +258,6 @@ pub fn intern_const_alloc_recursive<'tcx, M: CompileTimeMachine<'tcx>>(
// Whether we encountered a bad mutable pointer.
// We want to first report "dangling" and then "mutable", so we need to delay reporting these
// errors.
let mut result = Ok(());
let mut found_bad_mutable_ptr = false;
// Keep interning as long as there are things to intern.
@@ -310,20 +332,15 @@ pub fn intern_const_alloc_recursive<'tcx, M: CompileTimeMachine<'tcx>>(
// okay with losing some potential for immutability here. This can anyway only affect
// `static mut`.
just_interned.insert(alloc_id);
match intern_shallow(ecx, alloc_id, inner_mutability, Some(&mut disambiguator)) {
Ok(nested) => todo.extend(nested),
Err(err) => {
ecx.tcx.dcx().delayed_bug("error during const interning");
result = Err(err);
}
}
let next = intern_shallow(ecx, alloc_id, inner_mutability, Some(&mut disambiguator))?;
todo.extend(next);
}
if found_bad_mutable_ptr && result.is_ok() {
if found_bad_mutable_ptr {
// We found a mutable pointer inside a const where inner allocations should be immutable,
// and there was no other error. This should usually never happen! However, this can happen
// in unleash-miri mode, so report it as a normal error then.
if ecx.tcx.sess.opts.unstable_opts.unleash_the_miri_inside_of_you {
result = Err(InternError::BadMutablePointer);
return Err(InternError::BadMutablePointer);
} else {
span_bug!(
ecx.tcx.span,
@@ -331,7 +348,7 @@ pub fn intern_const_alloc_recursive<'tcx, M: CompileTimeMachine<'tcx>>(
);
}
}
result
Ok(())
}
/// Intern `ret`. This function assumes that `ret` references no other allocation.

View File

@@ -1310,29 +1310,20 @@ impl<'a, 'tcx, Prov: Provenance, Extra, Bytes: AllocBytes>
}
/// Mark the given sub-range (relative to this allocation reference) as uninitialized.
pub fn write_uninit(&mut self, range: AllocRange) -> InterpResult<'tcx> {
pub fn write_uninit(&mut self, range: AllocRange) {
let range = self.range.subrange(range);
self.alloc
.write_uninit(&self.tcx, range)
.map_err(|e| e.to_interp_error(self.alloc_id))
.into()
self.alloc.write_uninit(&self.tcx, range);
}
/// Mark the entire referenced range as uninitialized
pub fn write_uninit_full(&mut self) -> InterpResult<'tcx> {
self.alloc
.write_uninit(&self.tcx, self.range)
.map_err(|e| e.to_interp_error(self.alloc_id))
.into()
pub fn write_uninit_full(&mut self) {
self.alloc.write_uninit(&self.tcx, self.range);
}
/// Remove all provenance in the reference range.
pub fn clear_provenance(&mut self) -> InterpResult<'tcx> {
self.alloc
.clear_provenance(&self.tcx, self.range)
.map_err(|e| e.to_interp_error(self.alloc_id))
.into()
pub fn clear_provenance(&mut self) {
self.alloc.clear_provenance(&self.tcx, self.range);
}
}
@@ -1423,11 +1414,8 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
// Side-step AllocRef and directly access the underlying bytes more efficiently.
// (We are staying inside the bounds here and all bytes do get overwritten so all is good.)
let alloc_id = alloc_ref.alloc_id;
let bytes = alloc_ref
.alloc
.get_bytes_unchecked_for_overwrite(&alloc_ref.tcx, alloc_ref.range)
.map_err(move |e| e.to_interp_error(alloc_id))?;
let bytes =
alloc_ref.alloc.get_bytes_unchecked_for_overwrite(&alloc_ref.tcx, alloc_ref.range);
// `zip` would stop when the first iterator ends; we want to definitely
// cover all of `bytes`.
for dest in bytes {
@@ -1509,10 +1497,8 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
// `get_bytes_mut` will clear the provenance, which is correct,
// since we don't want to keep any provenance at the target.
// This will also error if copying partial provenance is not supported.
let provenance = src_alloc
.provenance()
.prepare_copy(src_range, dest_offset, num_copies, self)
.map_err(|e| e.to_interp_error(src_alloc_id))?;
let provenance =
src_alloc.provenance().prepare_copy(src_range, dest_offset, num_copies, self);
// Prepare a copy of the initialization mask.
let init = src_alloc.init_mask().prepare_copy(src_range);
@@ -1530,10 +1516,8 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
dest_range,
)?;
// Yes we do overwrite all bytes in `dest_bytes`.
let dest_bytes = dest_alloc
.get_bytes_unchecked_for_overwrite_ptr(&tcx, dest_range)
.map_err(|e| e.to_interp_error(dest_alloc_id))?
.as_mut_ptr();
let dest_bytes =
dest_alloc.get_bytes_unchecked_for_overwrite_ptr(&tcx, dest_range).as_mut_ptr();
if init.no_bytes_init() {
// Fast path: If all bytes are `uninit` then there is nothing to copy. The target range
@@ -1542,9 +1526,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
// This also avoids writing to the target bytes so that the backing allocation is never
// touched if the bytes stay uninitialized for the whole interpreter execution. On contemporary
// operating system this can avoid physically allocating the page.
dest_alloc
.write_uninit(&tcx, dest_range)
.map_err(|e| e.to_interp_error(dest_alloc_id))?;
dest_alloc.write_uninit(&tcx, dest_range);
// `write_uninit` also resets the provenance, so we are done.
return interp_ok(());
}

View File

@@ -700,7 +700,7 @@ where
match value {
Immediate::Scalar(scalar) => {
alloc.write_scalar(alloc_range(Size::ZERO, scalar.size()), scalar)
alloc.write_scalar(alloc_range(Size::ZERO, scalar.size()), scalar)?;
}
Immediate::ScalarPair(a_val, b_val) => {
let BackendRepr::ScalarPair(a, b) = layout.backend_repr else {
@@ -720,10 +720,10 @@ where
alloc.write_scalar(alloc_range(Size::ZERO, a_val.size()), a_val)?;
alloc.write_scalar(alloc_range(b_offset, b_val.size()), b_val)?;
// We don't have to reset padding here, `write_immediate` will anyway do a validation run.
interp_ok(())
}
Immediate::Uninit => alloc.write_uninit_full(),
}
interp_ok(())
}
pub fn write_uninit(
@@ -743,7 +743,7 @@ where
// Zero-sized access
return interp_ok(());
};
alloc.write_uninit_full()?;
alloc.write_uninit_full();
}
}
interp_ok(())
@@ -767,7 +767,7 @@ where
// Zero-sized access
return interp_ok(());
};
alloc.clear_provenance()?;
alloc.clear_provenance();
}
}
interp_ok(())

View File

@@ -949,7 +949,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
let padding_size = offset - padding_cleared_until;
let range = alloc_range(padding_start, padding_size);
trace!("reset_padding on {}: resetting padding range {range:?}", mplace.layout.ty);
alloc.write_uninit(range)?;
alloc.write_uninit(range);
}
padding_cleared_until = offset + size;
}
@@ -1239,7 +1239,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValueVisitor<'tcx, M> for ValidityVisitor<'rt,
if self.reset_provenance_and_padding {
// We can't share this with above as above, we might be looking at read-only memory.
let mut alloc = self.ecx.get_ptr_alloc_mut(mplace.ptr(), size)?.expect("we already excluded size 0");
alloc.clear_provenance()?;
alloc.clear_provenance();
// Also, mark this as containing data, not padding.
self.add_data_range(mplace.ptr(), size);
}