2023-01-17 10:21:30 +01:00
|
|
|
mod cache;
|
2023-03-21 16:26:23 +01:00
|
|
|
|
2023-01-17 10:21:30 +01:00
|
|
|
use self::cache::ProvisionalEntry;
|
2023-08-03 14:43:26 +02:00
|
|
|
use super::inspect::ProofTreeBuilder;
|
|
|
|
|
use super::SolverMode;
|
2023-01-17 10:21:30 +01:00
|
|
|
use cache::ProvisionalCache;
|
2023-08-03 14:43:26 +02:00
|
|
|
use rustc_data_structures::fx::FxHashSet;
|
|
|
|
|
use rustc_index::Idx;
|
2023-04-19 10:57:17 +00:00
|
|
|
use rustc_index::IndexVec;
|
2023-02-24 02:35:15 +00:00
|
|
|
use rustc_middle::dep_graph::DepKind;
|
2023-08-03 14:43:26 +02:00
|
|
|
use rustc_middle::traits::solve::inspect::CacheHit;
|
|
|
|
|
use rustc_middle::traits::solve::CacheData;
|
2023-07-28 12:50:23 +02:00
|
|
|
use rustc_middle::traits::solve::{CanonicalInput, Certainty, EvaluationCache, QueryResult};
|
2023-01-17 10:21:30 +01:00
|
|
|
use rustc_middle::ty::TyCtxt;
|
2023-08-03 14:43:26 +02:00
|
|
|
use rustc_session::Limit;
|
2023-08-10 12:35:34 +02:00
|
|
|
use std::collections::hash_map::Entry;
|
2023-01-17 10:21:30 +01:00
|
|
|
|
|
|
|
|
rustc_index::newtype_index! {
|
|
|
|
|
pub struct StackDepth {}
|
|
|
|
|
}
|
|
|
|
|
|
2023-08-03 14:43:26 +02:00
|
|
|
#[derive(Debug)]
|
|
|
|
|
struct StackEntry<'tcx> {
|
2023-05-16 23:51:23 +00:00
|
|
|
input: CanonicalInput<'tcx>,
|
2023-08-03 14:43:26 +02:00
|
|
|
available_depth: Limit,
|
|
|
|
|
// The maximum depth reached by this stack entry, only up-to date
|
|
|
|
|
// for the top of the stack and lazily updated for the rest.
|
|
|
|
|
reached_depth: StackDepth,
|
|
|
|
|
encountered_overflow: bool,
|
2023-01-17 10:21:30 +01:00
|
|
|
has_been_used: bool,
|
2023-08-03 14:43:26 +02:00
|
|
|
|
|
|
|
|
/// We put only the root goal of a coinductive cycle into the global cache.
|
|
|
|
|
///
|
|
|
|
|
/// If we were to use that result when later trying to prove another cycle
|
|
|
|
|
/// participant, we can end up with unstable query results.
|
|
|
|
|
///
|
|
|
|
|
/// See tests/ui/new-solver/coinduction/incompleteness-unstable-result.rs for
|
|
|
|
|
/// an example of where this is needed.
|
|
|
|
|
cycle_participants: FxHashSet<CanonicalInput<'tcx>>,
|
2023-01-17 10:21:30 +01:00
|
|
|
}
|
|
|
|
|
|
2023-02-14 10:01:30 -03:00
|
|
|
pub(super) struct SearchGraph<'tcx> {
|
2023-03-21 16:26:23 +01:00
|
|
|
mode: SolverMode,
|
2023-08-03 14:41:34 +02:00
|
|
|
local_overflow_limit: usize,
|
2023-01-17 10:21:30 +01:00
|
|
|
/// The stack of goals currently being computed.
|
|
|
|
|
///
|
|
|
|
|
/// An element is *deeper* in the stack if its index is *lower*.
|
2023-08-03 14:43:26 +02:00
|
|
|
stack: IndexVec<StackDepth, StackEntry<'tcx>>,
|
2023-01-17 10:21:30 +01:00
|
|
|
provisional_cache: ProvisionalCache<'tcx>,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl<'tcx> SearchGraph<'tcx> {
|
2023-03-21 16:26:23 +01:00
|
|
|
pub(super) fn new(tcx: TyCtxt<'tcx>, mode: SolverMode) -> SearchGraph<'tcx> {
|
2023-01-17 10:21:30 +01:00
|
|
|
Self {
|
2023-03-21 16:26:23 +01:00
|
|
|
mode,
|
2023-08-30 09:43:36 +00:00
|
|
|
local_overflow_limit: tcx.recursion_limit().0.checked_ilog2().unwrap_or(0) as usize,
|
2023-01-17 10:21:30 +01:00
|
|
|
stack: Default::default(),
|
|
|
|
|
provisional_cache: ProvisionalCache::empty(),
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-03-21 16:26:23 +01:00
|
|
|
pub(super) fn solver_mode(&self) -> SolverMode {
|
|
|
|
|
self.mode
|
|
|
|
|
}
|
|
|
|
|
|
2023-08-03 14:41:34 +02:00
|
|
|
pub(super) fn local_overflow_limit(&self) -> usize {
|
|
|
|
|
self.local_overflow_limit
|
|
|
|
|
}
|
|
|
|
|
|
2023-08-03 14:43:26 +02:00
|
|
|
/// Update the stack and reached depths on cache hits.
|
|
|
|
|
#[instrument(level = "debug", skip(self))]
|
|
|
|
|
fn on_cache_hit(&mut self, additional_depth: usize, encountered_overflow: bool) {
|
|
|
|
|
let reached_depth = self.stack.next_index().plus(additional_depth);
|
|
|
|
|
if let Some(last) = self.stack.raw.last_mut() {
|
|
|
|
|
last.reached_depth = last.reached_depth.max(reached_depth);
|
|
|
|
|
last.encountered_overflow |= encountered_overflow;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Pops the highest goal from the stack, lazily updating the
|
|
|
|
|
/// the next goal in the stack.
|
2023-03-28 21:45:35 +02:00
|
|
|
///
|
2023-08-03 14:43:26 +02:00
|
|
|
/// Directly popping from the stack instead of using this method
|
|
|
|
|
/// would cause us to not track overflow and recursion depth correctly.
|
|
|
|
|
fn pop_stack(&mut self) -> StackEntry<'tcx> {
|
|
|
|
|
let elem = self.stack.pop().unwrap();
|
|
|
|
|
if let Some(last) = self.stack.raw.last_mut() {
|
|
|
|
|
last.reached_depth = last.reached_depth.max(elem.reached_depth);
|
|
|
|
|
last.encountered_overflow |= elem.encountered_overflow;
|
|
|
|
|
}
|
|
|
|
|
elem
|
|
|
|
|
}
|
|
|
|
|
|
2023-03-28 21:45:35 +02:00
|
|
|
/// The trait solver behavior is different for coherence
|
2023-08-03 14:43:26 +02:00
|
|
|
/// so we use a separate cache. Alternatively we could use
|
|
|
|
|
/// a single cache and share it between coherence and ordinary
|
|
|
|
|
/// trait solving.
|
2023-07-20 13:21:04 +02:00
|
|
|
pub(super) fn global_cache(&self, tcx: TyCtxt<'tcx>) -> &'tcx EvaluationCache<'tcx> {
|
2023-03-28 21:45:35 +02:00
|
|
|
match self.mode {
|
2023-07-20 13:21:04 +02:00
|
|
|
SolverMode::Normal => &tcx.new_solver_evaluation_cache,
|
|
|
|
|
SolverMode::Coherence => &tcx.new_solver_coherence_evaluation_cache,
|
2023-03-28 21:45:35 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-01-17 11:47:47 +01:00
|
|
|
pub(super) fn is_empty(&self) -> bool {
|
2023-03-16 14:58:26 +00:00
|
|
|
self.stack.is_empty() && self.provisional_cache.is_empty()
|
2023-01-17 11:47:47 +01:00
|
|
|
}
|
|
|
|
|
|
2023-02-14 10:53:25 +01:00
|
|
|
/// Whether we're currently in a cycle. This should only be used
|
|
|
|
|
/// for debug assertions.
|
|
|
|
|
pub(super) fn in_cycle(&self) -> bool {
|
2023-03-29 00:04:14 -07:00
|
|
|
if let Some(stack_depth) = self.stack.last_index() {
|
2023-02-14 10:53:25 +01:00
|
|
|
// Either the current goal on the stack is the root of a cycle...
|
|
|
|
|
if self.stack[stack_depth].has_been_used {
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// ...or it depends on a goal with a lower depth.
|
2023-05-16 23:51:23 +00:00
|
|
|
let current_goal = self.stack[stack_depth].input;
|
2023-02-14 10:53:25 +01:00
|
|
|
let entry_index = self.provisional_cache.lookup_table[¤t_goal];
|
|
|
|
|
self.provisional_cache.entries[entry_index].depth != stack_depth
|
|
|
|
|
} else {
|
|
|
|
|
false
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-08-03 14:43:26 +02:00
|
|
|
/// Fetches whether the current goal encountered overflow.
|
|
|
|
|
///
|
|
|
|
|
/// This should only be used for the check in `evaluate_goal`.
|
|
|
|
|
pub(super) fn encountered_overflow(&self) -> bool {
|
|
|
|
|
if let Some(last) = self.stack.raw.last() { last.encountered_overflow } else { false }
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Resets `encountered_overflow` of the current goal.
|
|
|
|
|
///
|
|
|
|
|
/// This should only be used for the check in `evaluate_goal`.
|
2023-08-08 16:28:56 +02:00
|
|
|
pub(super) fn reset_encountered_overflow(&mut self, encountered_overflow: bool) -> bool {
|
|
|
|
|
if let Some(last) = self.stack.raw.last_mut() {
|
|
|
|
|
let prev = last.encountered_overflow;
|
|
|
|
|
last.encountered_overflow = encountered_overflow;
|
|
|
|
|
prev
|
|
|
|
|
} else {
|
|
|
|
|
false
|
2023-08-03 14:43:26 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Returns the remaining depth allowed for nested goals.
|
|
|
|
|
///
|
|
|
|
|
/// This is generally simply one less than the current depth.
|
|
|
|
|
/// However, if we encountered overflow, we significantly reduce
|
|
|
|
|
/// the remaining depth of all nested goals to prevent hangs
|
|
|
|
|
/// in case there is exponential blowup.
|
|
|
|
|
fn allowed_depth_for_nested(
|
|
|
|
|
tcx: TyCtxt<'tcx>,
|
|
|
|
|
stack: &IndexVec<StackDepth, StackEntry<'tcx>>,
|
|
|
|
|
) -> Option<Limit> {
|
|
|
|
|
if let Some(last) = stack.raw.last() {
|
|
|
|
|
if last.available_depth.0 == 0 {
|
|
|
|
|
return None;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Some(if last.encountered_overflow {
|
|
|
|
|
Limit(last.available_depth.0 / 4)
|
|
|
|
|
} else {
|
|
|
|
|
Limit(last.available_depth.0 - 1)
|
|
|
|
|
})
|
|
|
|
|
} else {
|
|
|
|
|
Some(tcx.recursion_limit())
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-08-03 15:12:34 +02:00
|
|
|
/// Probably the most involved method of the whole solver.
|
2023-01-17 10:21:30 +01:00
|
|
|
///
|
2023-08-03 15:12:34 +02:00
|
|
|
/// Given some goal which is proven via the `prove_goal` closure, this
|
|
|
|
|
/// handles caching, overflow, and coinductive cycles.
|
|
|
|
|
pub(super) fn with_new_goal(
|
2023-01-17 10:21:30 +01:00
|
|
|
&mut self,
|
|
|
|
|
tcx: TyCtxt<'tcx>,
|
2023-05-16 23:51:23 +00:00
|
|
|
input: CanonicalInput<'tcx>,
|
2023-06-08 19:58:04 +01:00
|
|
|
inspect: &mut ProofTreeBuilder<'tcx>,
|
2023-08-03 15:12:34 +02:00
|
|
|
mut prove_goal: impl FnMut(&mut Self, &mut ProofTreeBuilder<'tcx>) -> QueryResult<'tcx>,
|
|
|
|
|
) -> QueryResult<'tcx> {
|
|
|
|
|
// Check for overflow.
|
|
|
|
|
let Some(available_depth) = Self::allowed_depth_for_nested(tcx, &self.stack) else {
|
|
|
|
|
if let Some(last) = self.stack.raw.last_mut() {
|
|
|
|
|
last.encountered_overflow = true;
|
|
|
|
|
}
|
|
|
|
|
return Self::response_no_constraints(tcx, input, Certainty::OVERFLOW);
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
// Try to fetch the goal from the global cache.
|
|
|
|
|
if inspect.use_global_cache() {
|
|
|
|
|
if let Some(CacheData { result, reached_depth, encountered_overflow }) =
|
|
|
|
|
self.global_cache(tcx).get(
|
|
|
|
|
tcx,
|
|
|
|
|
input,
|
|
|
|
|
|cycle_participants| {
|
|
|
|
|
self.stack.iter().any(|entry| cycle_participants.contains(&entry.input))
|
|
|
|
|
},
|
|
|
|
|
available_depth,
|
|
|
|
|
)
|
|
|
|
|
{
|
|
|
|
|
self.on_cache_hit(reached_depth, encountered_overflow);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Look at the provisional cache to detect cycles.
|
2023-01-17 10:21:30 +01:00
|
|
|
let cache = &mut self.provisional_cache;
|
2023-05-16 23:51:23 +00:00
|
|
|
match cache.lookup_table.entry(input) {
|
2023-08-03 15:12:34 +02:00
|
|
|
// No entry, we push this goal on the stack and try to prove it.
|
2023-01-17 10:21:30 +01:00
|
|
|
Entry::Vacant(v) => {
|
2023-08-03 14:43:26 +02:00
|
|
|
let depth = self.stack.next_index();
|
|
|
|
|
let entry = StackEntry {
|
|
|
|
|
input,
|
|
|
|
|
available_depth,
|
|
|
|
|
reached_depth: depth,
|
|
|
|
|
encountered_overflow: false,
|
|
|
|
|
has_been_used: false,
|
|
|
|
|
cycle_participants: Default::default(),
|
|
|
|
|
};
|
|
|
|
|
assert_eq!(self.stack.push(entry), depth);
|
2023-08-10 12:35:34 +02:00
|
|
|
let entry_index =
|
|
|
|
|
cache.entries.push(ProvisionalEntry { response: None, depth, input });
|
2023-01-17 10:21:30 +01:00
|
|
|
v.insert(entry_index);
|
|
|
|
|
}
|
|
|
|
|
// We have a nested goal which relies on a goal `root` deeper in the stack.
|
|
|
|
|
//
|
2023-08-03 15:12:34 +02:00
|
|
|
// We first store that we may have to reprove `root` in case the provisional
|
|
|
|
|
// response is not equal to the final response. We also update the depth of all
|
|
|
|
|
// goals which recursively depend on our current goal to depend on `root`
|
2023-01-17 10:21:30 +01:00
|
|
|
// instead.
|
|
|
|
|
//
|
|
|
|
|
// Finally we can return either the provisional response for that goal if we have a
|
|
|
|
|
// coinductive cycle or an ambiguous result if the cycle is inductive.
|
|
|
|
|
Entry::Occupied(entry_index) => {
|
2023-06-08 19:21:55 +01:00
|
|
|
inspect.cache_hit(CacheHit::Provisional);
|
|
|
|
|
|
2023-01-17 10:21:30 +01:00
|
|
|
let entry_index = *entry_index.get();
|
|
|
|
|
let stack_depth = cache.depth(entry_index);
|
2023-01-27 10:01:16 +01:00
|
|
|
debug!("encountered cycle with depth {stack_depth:?}");
|
|
|
|
|
|
|
|
|
|
cache.add_dependency_of_leaf_on(entry_index);
|
2023-08-03 14:43:26 +02:00
|
|
|
let mut iter = self.stack.iter_mut();
|
|
|
|
|
let root = iter.nth(stack_depth.as_usize()).unwrap();
|
|
|
|
|
for e in iter {
|
|
|
|
|
root.cycle_participants.insert(e.input);
|
|
|
|
|
}
|
2023-01-17 10:21:30 +01:00
|
|
|
|
2023-08-10 12:35:34 +02:00
|
|
|
// If we're in a cycle, we have to retry proving the current goal
|
|
|
|
|
// until we reach a fixpoint.
|
|
|
|
|
self.stack[stack_depth].has_been_used = true;
|
|
|
|
|
return if let Some(result) = cache.provisional_result(entry_index) {
|
|
|
|
|
result
|
2023-01-17 10:21:30 +01:00
|
|
|
} else {
|
2023-08-10 12:35:34 +02:00
|
|
|
// If we don't have a provisional result yet, the goal has to
|
|
|
|
|
// still be on the stack.
|
|
|
|
|
let mut goal_on_stack = false;
|
|
|
|
|
let mut is_coinductive = true;
|
|
|
|
|
for entry in self.stack.raw[stack_depth.index()..]
|
|
|
|
|
.iter()
|
|
|
|
|
.skip_while(|entry| entry.input != input)
|
|
|
|
|
{
|
|
|
|
|
goal_on_stack = true;
|
|
|
|
|
is_coinductive &= entry.input.value.goal.predicate.is_coinductive(tcx);
|
|
|
|
|
}
|
|
|
|
|
debug_assert!(goal_on_stack);
|
|
|
|
|
|
|
|
|
|
if is_coinductive {
|
|
|
|
|
Self::response_no_constraints(tcx, input, Certainty::Yes)
|
|
|
|
|
} else {
|
|
|
|
|
Self::response_no_constraints(tcx, input, Certainty::OVERFLOW)
|
|
|
|
|
}
|
|
|
|
|
};
|
2023-01-17 10:21:30 +01:00
|
|
|
}
|
|
|
|
|
}
|
2023-02-24 02:35:15 +00:00
|
|
|
|
|
|
|
|
// This is for global caching, so we properly track query dependencies.
|
2023-08-03 15:12:34 +02:00
|
|
|
// Everything that affects the `result` should be performed within this
|
2023-02-24 02:35:15 +00:00
|
|
|
// `with_anon_task` closure.
|
2023-08-03 14:43:26 +02:00
|
|
|
let ((final_entry, result), dep_node) =
|
|
|
|
|
tcx.dep_graph.with_anon_task(tcx, DepKind::TraitSelect, || {
|
2023-08-03 15:12:34 +02:00
|
|
|
// When we encounter a coinductive cycle, we have to fetch the
|
|
|
|
|
// result of that cycle while we are still computing it. Because
|
|
|
|
|
// of this we continuously recompute the cycle until the result
|
|
|
|
|
// of the previous iteration is equal to the final result, at which
|
|
|
|
|
// point we are done.
|
2023-08-03 14:43:26 +02:00
|
|
|
for _ in 0..self.local_overflow_limit() {
|
2023-08-03 15:12:34 +02:00
|
|
|
let response = prove_goal(self, inspect);
|
|
|
|
|
|
|
|
|
|
// Check whether the current goal is the root of a cycle and whether
|
|
|
|
|
// we have to rerun because its provisional result differed from the
|
|
|
|
|
// final result.
|
|
|
|
|
//
|
|
|
|
|
// Also update the response for this goal stored in the provisional
|
|
|
|
|
// cache.
|
|
|
|
|
let stack_entry = self.pop_stack();
|
|
|
|
|
debug_assert_eq!(stack_entry.input, input);
|
|
|
|
|
let cache = &mut self.provisional_cache;
|
|
|
|
|
let provisional_entry_index =
|
|
|
|
|
*cache.lookup_table.get(&stack_entry.input).unwrap();
|
|
|
|
|
let provisional_entry = &mut cache.entries[provisional_entry_index];
|
2023-08-10 12:35:34 +02:00
|
|
|
if stack_entry.has_been_used
|
|
|
|
|
&& provisional_entry.response.map_or(true, |r| r != response)
|
|
|
|
|
{
|
|
|
|
|
// If so, update the provisional result for this goal and remove
|
|
|
|
|
// all entries whose result depends on this goal from the provisional
|
|
|
|
|
// cache...
|
2023-08-03 15:12:34 +02:00
|
|
|
//
|
2023-08-10 12:35:34 +02:00
|
|
|
// That's not completely correct, as a nested goal can also only
|
2023-08-03 15:12:34 +02:00
|
|
|
// depend on a goal which is lower in the stack so it doesn't
|
|
|
|
|
// actually depend on the current goal. This should be fairly
|
|
|
|
|
// rare and is hopefully not relevant for performance.
|
2023-08-10 12:35:34 +02:00
|
|
|
provisional_entry.response = Some(response);
|
2023-08-03 15:12:34 +02:00
|
|
|
#[allow(rustc::potential_query_instability)]
|
|
|
|
|
cache.lookup_table.retain(|_key, index| *index <= provisional_entry_index);
|
|
|
|
|
cache.entries.truncate(provisional_entry_index.index() + 1);
|
|
|
|
|
|
|
|
|
|
// ...and finally push our goal back on the stack and reevaluate it.
|
|
|
|
|
self.stack.push(StackEntry { has_been_used: false, ..stack_entry });
|
|
|
|
|
} else {
|
|
|
|
|
return (stack_entry, response);
|
2023-08-03 14:43:26 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
debug!("canonical cycle overflow");
|
|
|
|
|
let current_entry = self.pop_stack();
|
|
|
|
|
let result = Self::response_no_constraints(tcx, input, Certainty::OVERFLOW);
|
|
|
|
|
(current_entry, result)
|
|
|
|
|
});
|
2023-02-24 02:35:15 +00:00
|
|
|
|
2023-08-03 15:12:34 +02:00
|
|
|
// We're now done with this goal. In case this goal is involved in a larger cycle
|
2023-08-10 12:35:34 +02:00
|
|
|
// do not remove it from the provisional cache and update its provisional result.
|
|
|
|
|
// We only add the root of cycles to the global cache.
|
2023-02-13 14:37:46 -03:00
|
|
|
//
|
2023-08-03 14:43:26 +02:00
|
|
|
// It is not possible for any nested goal to depend on something deeper on the
|
|
|
|
|
// stack, as this would have also updated the depth of the current goal.
|
2023-08-03 15:12:34 +02:00
|
|
|
let cache = &mut self.provisional_cache;
|
|
|
|
|
let provisional_entry_index = *cache.lookup_table.get(&input).unwrap();
|
|
|
|
|
let provisional_entry = &mut cache.entries[provisional_entry_index];
|
|
|
|
|
let depth = provisional_entry.depth;
|
2023-02-13 14:37:46 -03:00
|
|
|
if depth == self.stack.next_index() {
|
2023-08-03 14:43:26 +02:00
|
|
|
for (i, entry) in cache.entries.drain_enumerated(provisional_entry_index.index()..) {
|
2023-05-16 23:51:23 +00:00
|
|
|
let actual_index = cache.lookup_table.remove(&entry.input);
|
2023-02-13 14:37:46 -03:00
|
|
|
debug_assert_eq!(Some(i), actual_index);
|
|
|
|
|
debug_assert!(entry.depth == depth);
|
|
|
|
|
}
|
2023-02-06 17:11:43 -03:00
|
|
|
|
2023-08-03 14:43:26 +02:00
|
|
|
// When encountering a cycle, both inductive and coinductive, we only
|
|
|
|
|
// move the root into the global cache. We also store all other cycle
|
|
|
|
|
// participants involved.
|
2023-02-24 02:35:15 +00:00
|
|
|
//
|
2023-08-03 14:43:26 +02:00
|
|
|
// We disable the global cache entry of the root goal if a cycle
|
|
|
|
|
// participant is on the stack. This is necessary to prevent unstable
|
|
|
|
|
// results. See the comment of `StackEntry::cycle_participants` for
|
|
|
|
|
// more details.
|
|
|
|
|
let reached_depth = final_entry.reached_depth.as_usize() - self.stack.len();
|
|
|
|
|
self.global_cache(tcx).insert(
|
|
|
|
|
input,
|
|
|
|
|
reached_depth,
|
|
|
|
|
final_entry.encountered_overflow,
|
|
|
|
|
final_entry.cycle_participants,
|
|
|
|
|
dep_node,
|
|
|
|
|
result,
|
|
|
|
|
)
|
2023-08-10 12:35:34 +02:00
|
|
|
} else {
|
|
|
|
|
provisional_entry.response = Some(result);
|
2023-02-06 17:11:43 -03:00
|
|
|
}
|
|
|
|
|
|
2023-02-24 02:35:15 +00:00
|
|
|
result
|
2023-02-06 17:11:43 -03:00
|
|
|
}
|
2023-08-03 14:30:13 +02:00
|
|
|
|
|
|
|
|
fn response_no_constraints(
|
|
|
|
|
tcx: TyCtxt<'tcx>,
|
|
|
|
|
goal: CanonicalInput<'tcx>,
|
|
|
|
|
certainty: Certainty,
|
|
|
|
|
) -> QueryResult<'tcx> {
|
|
|
|
|
Ok(super::response_no_constraints_raw(tcx, goal.max_universe, goal.variables, certainty))
|
|
|
|
|
}
|
2023-01-17 10:21:30 +01:00
|
|
|
}
|