Move DepKind to rustc_query_system and define it as u16
This commit is contained in:
@@ -1,6 +1,6 @@
|
||||
//! Query configuration and description traits.
|
||||
|
||||
use crate::dep_graph::{DepNode, DepNodeParams, SerializedDepNodeIndex};
|
||||
use crate::dep_graph::{DepKind, DepNode, DepNodeParams, SerializedDepNodeIndex};
|
||||
use crate::error::HandleCycleError;
|
||||
use crate::ich::StableHashingContext;
|
||||
use crate::query::caches::QueryCache;
|
||||
@@ -27,7 +27,7 @@ pub trait QueryConfig<Qcx: QueryContext>: Copy {
|
||||
fn format_value(self) -> fn(&Self::Value) -> String;
|
||||
|
||||
// Don't use this method to access query results, instead use the methods on TyCtxt
|
||||
fn query_state<'a>(self, tcx: Qcx) -> &'a QueryState<Self::Key, Qcx::DepKind>
|
||||
fn query_state<'a>(self, tcx: Qcx) -> &'a QueryState<Self::Key>
|
||||
where
|
||||
Qcx: 'a;
|
||||
|
||||
@@ -57,7 +57,7 @@ pub trait QueryConfig<Qcx: QueryContext>: Copy {
|
||||
fn value_from_cycle_error(
|
||||
self,
|
||||
tcx: Qcx::DepContext,
|
||||
cycle: &[QueryInfo<Qcx::DepKind>],
|
||||
cycle: &[QueryInfo],
|
||||
guar: ErrorGuaranteed,
|
||||
) -> Self::Value;
|
||||
|
||||
@@ -66,12 +66,12 @@ pub trait QueryConfig<Qcx: QueryContext>: Copy {
|
||||
fn depth_limit(self) -> bool;
|
||||
fn feedable(self) -> bool;
|
||||
|
||||
fn dep_kind(self) -> Qcx::DepKind;
|
||||
fn dep_kind(self) -> DepKind;
|
||||
fn handle_cycle_error(self) -> HandleCycleError;
|
||||
fn hash_result(self) -> HashResult<Self::Value>;
|
||||
|
||||
// Just here for convenience and checking that the key matches the kind, don't override this.
|
||||
fn construct_dep_node(self, tcx: Qcx::DepContext, key: &Self::Key) -> DepNode<Qcx::DepKind> {
|
||||
fn construct_dep_node(self, tcx: Qcx::DepContext, key: &Self::Key) -> DepNode {
|
||||
DepNode::construct(tcx, self.dep_kind(), key)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
use crate::dep_graph::DepKind;
|
||||
use crate::dep_graph::DepContext;
|
||||
use crate::error::CycleStack;
|
||||
use crate::query::plumbing::CycleError;
|
||||
use crate::query::DepKind;
|
||||
use crate::query::{QueryContext, QueryStackFrame};
|
||||
use core::marker::PhantomData;
|
||||
|
||||
use rustc_data_structures::fx::FxHashMap;
|
||||
use rustc_errors::{
|
||||
Diagnostic, DiagnosticBuilder, ErrorGuaranteed, Handler, IntoDiagnostic, Level,
|
||||
@@ -30,48 +29,48 @@ use {
|
||||
|
||||
/// Represents a span and a query key.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct QueryInfo<D: DepKind> {
|
||||
pub struct QueryInfo {
|
||||
/// The span corresponding to the reason for which this query was required.
|
||||
pub span: Span,
|
||||
pub query: QueryStackFrame<D>,
|
||||
pub query: QueryStackFrame,
|
||||
}
|
||||
|
||||
pub type QueryMap<D> = FxHashMap<QueryJobId, QueryJobInfo<D>>;
|
||||
pub type QueryMap = FxHashMap<QueryJobId, QueryJobInfo>;
|
||||
|
||||
/// A value uniquely identifying an active query job.
|
||||
#[derive(Copy, Clone, Eq, PartialEq, Hash)]
|
||||
pub struct QueryJobId(pub NonZeroU64);
|
||||
|
||||
impl QueryJobId {
|
||||
fn query<D: DepKind>(self, map: &QueryMap<D>) -> QueryStackFrame<D> {
|
||||
fn query(self, map: &QueryMap) -> QueryStackFrame {
|
||||
map.get(&self).unwrap().query.clone()
|
||||
}
|
||||
|
||||
#[cfg(parallel_compiler)]
|
||||
fn span<D: DepKind>(self, map: &QueryMap<D>) -> Span {
|
||||
fn span(self, map: &QueryMap) -> Span {
|
||||
map.get(&self).unwrap().job.span
|
||||
}
|
||||
|
||||
#[cfg(parallel_compiler)]
|
||||
fn parent<D: DepKind>(self, map: &QueryMap<D>) -> Option<QueryJobId> {
|
||||
fn parent(self, map: &QueryMap) -> Option<QueryJobId> {
|
||||
map.get(&self).unwrap().job.parent
|
||||
}
|
||||
|
||||
#[cfg(parallel_compiler)]
|
||||
fn latch<D: DepKind>(self, map: &QueryMap<D>) -> Option<&QueryLatch<D>> {
|
||||
fn latch(self, map: &QueryMap) -> Option<&QueryLatch> {
|
||||
map.get(&self).unwrap().job.latch.as_ref()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct QueryJobInfo<D: DepKind> {
|
||||
pub query: QueryStackFrame<D>,
|
||||
pub job: QueryJob<D>,
|
||||
pub struct QueryJobInfo {
|
||||
pub query: QueryStackFrame,
|
||||
pub job: QueryJob,
|
||||
}
|
||||
|
||||
/// Represents an active query job.
|
||||
#[derive(Clone)]
|
||||
pub struct QueryJob<D: DepKind> {
|
||||
pub struct QueryJob {
|
||||
pub id: QueryJobId,
|
||||
|
||||
/// The span corresponding to the reason for which this query was required.
|
||||
@@ -82,11 +81,10 @@ pub struct QueryJob<D: DepKind> {
|
||||
|
||||
/// The latch that is used to wait on this job.
|
||||
#[cfg(parallel_compiler)]
|
||||
latch: Option<QueryLatch<D>>,
|
||||
spooky: core::marker::PhantomData<D>,
|
||||
latch: Option<QueryLatch>,
|
||||
}
|
||||
|
||||
impl<D: DepKind> QueryJob<D> {
|
||||
impl QueryJob {
|
||||
/// Creates a new query job.
|
||||
#[inline]
|
||||
pub fn new(id: QueryJobId, span: Span, parent: Option<QueryJobId>) -> Self {
|
||||
@@ -96,12 +94,11 @@ impl<D: DepKind> QueryJob<D> {
|
||||
parent,
|
||||
#[cfg(parallel_compiler)]
|
||||
latch: None,
|
||||
spooky: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(parallel_compiler)]
|
||||
pub(super) fn latch(&mut self) -> QueryLatch<D> {
|
||||
pub(super) fn latch(&mut self) -> QueryLatch {
|
||||
if self.latch.is_none() {
|
||||
self.latch = Some(QueryLatch::new());
|
||||
}
|
||||
@@ -124,12 +121,12 @@ impl<D: DepKind> QueryJob<D> {
|
||||
}
|
||||
|
||||
impl QueryJobId {
|
||||
pub(super) fn find_cycle_in_stack<D: DepKind>(
|
||||
pub(super) fn find_cycle_in_stack(
|
||||
&self,
|
||||
query_map: QueryMap<D>,
|
||||
query_map: QueryMap,
|
||||
current_job: &Option<QueryJobId>,
|
||||
span: Span,
|
||||
) -> CycleError<D> {
|
||||
) -> CycleError {
|
||||
// Find the waitee amongst `current_job` parents
|
||||
let mut cycle = Vec::new();
|
||||
let mut current_job = Option::clone(current_job);
|
||||
@@ -163,18 +160,18 @@ impl QueryJobId {
|
||||
|
||||
#[cold]
|
||||
#[inline(never)]
|
||||
pub fn try_find_layout_root<D: DepKind>(
|
||||
pub fn try_find_layout_root(
|
||||
&self,
|
||||
query_map: QueryMap<D>,
|
||||
) -> Option<(QueryJobInfo<D>, usize)> {
|
||||
query_map: QueryMap,
|
||||
layout_of_kind: DepKind,
|
||||
) -> Option<(QueryJobInfo, usize)> {
|
||||
let mut last_layout = None;
|
||||
let mut current_id = Some(*self);
|
||||
let mut depth = 0;
|
||||
|
||||
while let Some(id) = current_id {
|
||||
let info = query_map.get(&id).unwrap();
|
||||
// FIXME: This string comparison should probably not be done.
|
||||
if format!("{:?}", info.query.dep_kind) == "layout_of" {
|
||||
if info.query.dep_kind == layout_of_kind {
|
||||
depth += 1;
|
||||
last_layout = Some((info.clone(), depth));
|
||||
}
|
||||
@@ -185,15 +182,15 @@ impl QueryJobId {
|
||||
}
|
||||
|
||||
#[cfg(parallel_compiler)]
|
||||
struct QueryWaiter<D: DepKind> {
|
||||
struct QueryWaiter {
|
||||
query: Option<QueryJobId>,
|
||||
condvar: Condvar,
|
||||
span: Span,
|
||||
cycle: Mutex<Option<CycleError<D>>>,
|
||||
cycle: Mutex<Option<CycleError>>,
|
||||
}
|
||||
|
||||
#[cfg(parallel_compiler)]
|
||||
impl<D: DepKind> QueryWaiter<D> {
|
||||
impl QueryWaiter {
|
||||
fn notify(&self, registry: &rayon_core::Registry) {
|
||||
rayon_core::mark_unblocked(registry);
|
||||
self.condvar.notify_one();
|
||||
@@ -201,19 +198,19 @@ impl<D: DepKind> QueryWaiter<D> {
|
||||
}
|
||||
|
||||
#[cfg(parallel_compiler)]
|
||||
struct QueryLatchInfo<D: DepKind> {
|
||||
struct QueryLatchInfo {
|
||||
complete: bool,
|
||||
waiters: Vec<Arc<QueryWaiter<D>>>,
|
||||
waiters: Vec<Arc<QueryWaiter>>,
|
||||
}
|
||||
|
||||
#[cfg(parallel_compiler)]
|
||||
#[derive(Clone)]
|
||||
pub(super) struct QueryLatch<D: DepKind> {
|
||||
info: Arc<Mutex<QueryLatchInfo<D>>>,
|
||||
pub(super) struct QueryLatch {
|
||||
info: Arc<Mutex<QueryLatchInfo>>,
|
||||
}
|
||||
|
||||
#[cfg(parallel_compiler)]
|
||||
impl<D: DepKind> QueryLatch<D> {
|
||||
impl QueryLatch {
|
||||
fn new() -> Self {
|
||||
QueryLatch {
|
||||
info: Arc::new(Mutex::new(QueryLatchInfo { complete: false, waiters: Vec::new() })),
|
||||
@@ -221,11 +218,7 @@ impl<D: DepKind> QueryLatch<D> {
|
||||
}
|
||||
|
||||
/// Awaits for the query job to complete.
|
||||
pub(super) fn wait_on(
|
||||
&self,
|
||||
query: Option<QueryJobId>,
|
||||
span: Span,
|
||||
) -> Result<(), CycleError<D>> {
|
||||
pub(super) fn wait_on(&self, query: Option<QueryJobId>, span: Span) -> Result<(), CycleError> {
|
||||
let waiter =
|
||||
Arc::new(QueryWaiter { query, span, cycle: Mutex::new(None), condvar: Condvar::new() });
|
||||
self.wait_on_inner(&waiter);
|
||||
@@ -240,7 +233,7 @@ impl<D: DepKind> QueryLatch<D> {
|
||||
}
|
||||
|
||||
/// Awaits the caller on this latch by blocking the current thread.
|
||||
fn wait_on_inner(&self, waiter: &Arc<QueryWaiter<D>>) {
|
||||
fn wait_on_inner(&self, waiter: &Arc<QueryWaiter>) {
|
||||
let mut info = self.info.lock();
|
||||
if !info.complete {
|
||||
// We push the waiter on to the `waiters` list. It can be accessed inside
|
||||
@@ -274,7 +267,7 @@ impl<D: DepKind> QueryLatch<D> {
|
||||
|
||||
/// Removes a single waiter from the list of waiters.
|
||||
/// This is used to break query cycles.
|
||||
fn extract_waiter(&self, waiter: usize) -> Arc<QueryWaiter<D>> {
|
||||
fn extract_waiter(&self, waiter: usize) -> Arc<QueryWaiter> {
|
||||
let mut info = self.info.lock();
|
||||
debug_assert!(!info.complete);
|
||||
// Remove the waiter from the list of waiters
|
||||
@@ -296,14 +289,9 @@ type Waiter = (QueryJobId, usize);
|
||||
/// required information to resume the waiter.
|
||||
/// If all `visit` calls returns None, this function also returns None.
|
||||
#[cfg(parallel_compiler)]
|
||||
fn visit_waiters<F, D>(
|
||||
query_map: &QueryMap<D>,
|
||||
query: QueryJobId,
|
||||
mut visit: F,
|
||||
) -> Option<Option<Waiter>>
|
||||
fn visit_waiters<F>(query_map: &QueryMap, query: QueryJobId, mut visit: F) -> Option<Option<Waiter>>
|
||||
where
|
||||
F: FnMut(Span, QueryJobId) -> Option<Option<Waiter>>,
|
||||
D: DepKind,
|
||||
{
|
||||
// Visit the parent query which is a non-resumable waiter since it's on the same stack
|
||||
if let Some(parent) = query.parent(query_map) {
|
||||
@@ -332,8 +320,8 @@ where
|
||||
/// If a cycle is detected, this initial value is replaced with the span causing
|
||||
/// the cycle.
|
||||
#[cfg(parallel_compiler)]
|
||||
fn cycle_check<D: DepKind>(
|
||||
query_map: &QueryMap<D>,
|
||||
fn cycle_check(
|
||||
query_map: &QueryMap,
|
||||
query: QueryJobId,
|
||||
span: Span,
|
||||
stack: &mut Vec<(Span, QueryJobId)>,
|
||||
@@ -373,8 +361,8 @@ fn cycle_check<D: DepKind>(
|
||||
/// from `query` without going through any of the queries in `visited`.
|
||||
/// This is achieved with a depth first search.
|
||||
#[cfg(parallel_compiler)]
|
||||
fn connected_to_root<D: DepKind>(
|
||||
query_map: &QueryMap<D>,
|
||||
fn connected_to_root(
|
||||
query_map: &QueryMap,
|
||||
query: QueryJobId,
|
||||
visited: &mut FxHashSet<QueryJobId>,
|
||||
) -> bool {
|
||||
@@ -396,10 +384,9 @@ fn connected_to_root<D: DepKind>(
|
||||
|
||||
// Deterministically pick an query from a list
|
||||
#[cfg(parallel_compiler)]
|
||||
fn pick_query<'a, T, F, D>(query_map: &QueryMap<D>, queries: &'a [T], f: F) -> &'a T
|
||||
fn pick_query<'a, T, F>(query_map: &QueryMap, queries: &'a [T], f: F) -> &'a T
|
||||
where
|
||||
F: Fn(&T) -> (Span, QueryJobId),
|
||||
D: DepKind,
|
||||
{
|
||||
// Deterministically pick an entry point
|
||||
// FIXME: Sort this instead
|
||||
@@ -423,10 +410,10 @@ where
|
||||
/// If a cycle was not found, the starting query is removed from `jobs` and
|
||||
/// the function returns false.
|
||||
#[cfg(parallel_compiler)]
|
||||
fn remove_cycle<D: DepKind>(
|
||||
query_map: &QueryMap<D>,
|
||||
fn remove_cycle(
|
||||
query_map: &QueryMap,
|
||||
jobs: &mut Vec<QueryJobId>,
|
||||
wakelist: &mut Vec<Arc<QueryWaiter<D>>>,
|
||||
wakelist: &mut Vec<Arc<QueryWaiter>>,
|
||||
) -> bool {
|
||||
let mut visited = FxHashSet::default();
|
||||
let mut stack = Vec::new();
|
||||
@@ -528,7 +515,7 @@ fn remove_cycle<D: DepKind>(
|
||||
/// There may be multiple cycles involved in a deadlock, so this searches
|
||||
/// all active queries for cycles before finally resuming all the waiters at once.
|
||||
#[cfg(parallel_compiler)]
|
||||
pub fn deadlock<D: DepKind>(query_map: QueryMap<D>, registry: &rayon_core::Registry) {
|
||||
pub fn deadlock(query_map: QueryMap, registry: &rayon_core::Registry) {
|
||||
let on_panic = defer(|| {
|
||||
eprintln!("deadlock handler panicked, aborting process");
|
||||
process::abort();
|
||||
@@ -566,9 +553,9 @@ pub fn deadlock<D: DepKind>(query_map: QueryMap<D>, registry: &rayon_core::Regis
|
||||
|
||||
#[inline(never)]
|
||||
#[cold]
|
||||
pub(crate) fn report_cycle<'a, D: DepKind>(
|
||||
pub(crate) fn report_cycle<'a>(
|
||||
sess: &'a Session,
|
||||
CycleError { usage, cycle: stack }: &CycleError<D>,
|
||||
CycleError { usage, cycle: stack }: &CycleError,
|
||||
) -> DiagnosticBuilder<'a, ErrorGuaranteed> {
|
||||
assert!(!stack.is_empty());
|
||||
|
||||
@@ -655,8 +642,10 @@ pub fn print_query_stack<Qcx: QueryContext>(
|
||||
if let Some(ref mut file) = file {
|
||||
let _ = writeln!(
|
||||
file,
|
||||
"#{} [{:?}] {}",
|
||||
count_total, query_info.query.dep_kind, query_info.query.description
|
||||
"#{} [{}] {}",
|
||||
count_total,
|
||||
qcx.dep_context().dep_kind_info(query_info.query.dep_kind).name,
|
||||
query_info.query.description
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
@@ -28,27 +28,27 @@ use thin_vec::ThinVec;
|
||||
///
|
||||
/// This is mostly used in case of cycles for error reporting.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct QueryStackFrame<D: DepKind> {
|
||||
pub struct QueryStackFrame {
|
||||
pub description: String,
|
||||
span: Option<Span>,
|
||||
pub def_id: Option<DefId>,
|
||||
pub def_kind: Option<DefKind>,
|
||||
pub ty_adt_id: Option<DefId>,
|
||||
pub dep_kind: D,
|
||||
pub dep_kind: DepKind,
|
||||
/// This hash is used to deterministically pick
|
||||
/// a query to remove cycles in the parallel compiler.
|
||||
#[cfg(parallel_compiler)]
|
||||
hash: Hash64,
|
||||
}
|
||||
|
||||
impl<D: DepKind> QueryStackFrame<D> {
|
||||
impl QueryStackFrame {
|
||||
#[inline]
|
||||
pub fn new(
|
||||
description: String,
|
||||
span: Option<Span>,
|
||||
def_id: Option<DefId>,
|
||||
def_kind: Option<DefKind>,
|
||||
dep_kind: D,
|
||||
dep_kind: DepKind,
|
||||
ty_adt_id: Option<DefId>,
|
||||
_hash: impl FnOnce() -> Hash64,
|
||||
) -> Self {
|
||||
@@ -106,7 +106,7 @@ pub trait QueryContext: HasDepContext {
|
||||
/// Get the query information from the TLS context.
|
||||
fn current_query_job(self) -> Option<QueryJobId>;
|
||||
|
||||
fn try_collect_active_jobs(self) -> Option<QueryMap<Self::DepKind>>;
|
||||
fn try_collect_active_jobs(self) -> Option<QueryMap>;
|
||||
|
||||
/// Load side effects associated to the node in the previous session.
|
||||
fn load_side_effects(self, prev_dep_node_index: SerializedDepNodeIndex) -> QuerySideEffects;
|
||||
|
||||
@@ -2,8 +2,8 @@
|
||||
//! generate the actual methods on tcx which find and execute the provider,
|
||||
//! manage the caches, and so forth.
|
||||
|
||||
use crate::dep_graph::{DepContext, DepKind, DepNode, DepNodeIndex, DepNodeParams};
|
||||
use crate::dep_graph::{DepGraphData, HasDepContext};
|
||||
use crate::dep_graph::DepGraphData;
|
||||
use crate::dep_graph::{DepContext, DepNode, DepNodeIndex, DepNodeParams};
|
||||
use crate::ich::StableHashingContext;
|
||||
use crate::query::caches::QueryCache;
|
||||
#[cfg(parallel_compiler)]
|
||||
@@ -30,24 +30,23 @@ use thin_vec::ThinVec;
|
||||
|
||||
use super::QueryConfig;
|
||||
|
||||
pub struct QueryState<K, D: DepKind> {
|
||||
active: Sharded<FxHashMap<K, QueryResult<D>>>,
|
||||
pub struct QueryState<K> {
|
||||
active: Sharded<FxHashMap<K, QueryResult>>,
|
||||
}
|
||||
|
||||
/// Indicates the state of a query for a given key in a query map.
|
||||
enum QueryResult<D: DepKind> {
|
||||
enum QueryResult {
|
||||
/// An already executing query. The query job can be used to await for its completion.
|
||||
Started(QueryJob<D>),
|
||||
Started(QueryJob),
|
||||
|
||||
/// The query panicked. Queries trying to wait on this will raise a fatal error which will
|
||||
/// silently panic.
|
||||
Poisoned,
|
||||
}
|
||||
|
||||
impl<K, D> QueryState<K, D>
|
||||
impl<K> QueryState<K>
|
||||
where
|
||||
K: Eq + Hash + Copy + Debug,
|
||||
D: DepKind,
|
||||
{
|
||||
pub fn all_inactive(&self) -> bool {
|
||||
self.active.lock_shards().all(|shard| shard.is_empty())
|
||||
@@ -56,8 +55,8 @@ where
|
||||
pub fn try_collect_active_jobs<Qcx: Copy>(
|
||||
&self,
|
||||
qcx: Qcx,
|
||||
make_query: fn(Qcx, K) -> QueryStackFrame<D>,
|
||||
jobs: &mut QueryMap<D>,
|
||||
make_query: fn(Qcx, K) -> QueryStackFrame,
|
||||
jobs: &mut QueryMap,
|
||||
) -> Option<()> {
|
||||
let mut active = Vec::new();
|
||||
|
||||
@@ -82,25 +81,25 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<K, D: DepKind> Default for QueryState<K, D> {
|
||||
fn default() -> QueryState<K, D> {
|
||||
impl<K> Default for QueryState<K> {
|
||||
fn default() -> QueryState<K> {
|
||||
QueryState { active: Default::default() }
|
||||
}
|
||||
}
|
||||
|
||||
/// A type representing the responsibility to execute the job in the `job` field.
|
||||
/// This will poison the relevant query if dropped.
|
||||
struct JobOwner<'tcx, K, D: DepKind>
|
||||
struct JobOwner<'tcx, K>
|
||||
where
|
||||
K: Eq + Hash + Copy,
|
||||
{
|
||||
state: &'tcx QueryState<K, D>,
|
||||
state: &'tcx QueryState<K>,
|
||||
key: K,
|
||||
}
|
||||
|
||||
#[cold]
|
||||
#[inline(never)]
|
||||
fn mk_cycle<Q, Qcx>(query: Q, qcx: Qcx, cycle_error: CycleError<Qcx::DepKind>) -> Q::Value
|
||||
fn mk_cycle<Q, Qcx>(query: Q, qcx: Qcx, cycle_error: CycleError) -> Q::Value
|
||||
where
|
||||
Q: QueryConfig<Qcx>,
|
||||
Qcx: QueryContext,
|
||||
@@ -112,7 +111,7 @@ where
|
||||
fn handle_cycle_error<Q, Qcx>(
|
||||
query: Q,
|
||||
qcx: Qcx,
|
||||
cycle_error: &CycleError<Qcx::DepKind>,
|
||||
cycle_error: &CycleError,
|
||||
mut error: DiagnosticBuilder<'_, ErrorGuaranteed>,
|
||||
) -> Q::Value
|
||||
where
|
||||
@@ -137,7 +136,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<'tcx, K, D: DepKind> JobOwner<'tcx, K, D>
|
||||
impl<'tcx, K> JobOwner<'tcx, K>
|
||||
where
|
||||
K: Eq + Hash + Copy,
|
||||
{
|
||||
@@ -169,10 +168,9 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<'tcx, K, D> Drop for JobOwner<'tcx, K, D>
|
||||
impl<'tcx, K> Drop for JobOwner<'tcx, K>
|
||||
where
|
||||
K: Eq + Hash + Copy,
|
||||
D: DepKind,
|
||||
{
|
||||
#[inline(never)]
|
||||
#[cold]
|
||||
@@ -195,10 +193,10 @@ where
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct CycleError<D: DepKind> {
|
||||
pub(crate) struct CycleError {
|
||||
/// The query and related span that uses the cycle.
|
||||
pub usage: Option<(Span, QueryStackFrame<D>)>,
|
||||
pub cycle: Vec<QueryInfo<D>>,
|
||||
pub usage: Option<(Span, QueryStackFrame)>,
|
||||
pub cycle: Vec<QueryInfo>,
|
||||
}
|
||||
|
||||
/// Checks if the query is already computed and in the cache.
|
||||
@@ -248,7 +246,7 @@ fn wait_for_query<Q, Qcx>(
|
||||
qcx: Qcx,
|
||||
span: Span,
|
||||
key: Q::Key,
|
||||
latch: QueryLatch<Qcx::DepKind>,
|
||||
latch: QueryLatch,
|
||||
current: Option<QueryJobId>,
|
||||
) -> (Q::Value, Option<DepNodeIndex>)
|
||||
where
|
||||
@@ -296,7 +294,7 @@ fn try_execute_query<Q, Qcx, const INCR: bool>(
|
||||
qcx: Qcx,
|
||||
span: Span,
|
||||
key: Q::Key,
|
||||
dep_node: Option<DepNode<Qcx::DepKind>>,
|
||||
dep_node: Option<DepNode>,
|
||||
) -> (Q::Value, Option<DepNodeIndex>)
|
||||
where
|
||||
Q: QueryConfig<Qcx>,
|
||||
@@ -364,10 +362,10 @@ where
|
||||
fn execute_job<Q, Qcx, const INCR: bool>(
|
||||
query: Q,
|
||||
qcx: Qcx,
|
||||
state: &QueryState<Q::Key, Qcx::DepKind>,
|
||||
state: &QueryState<Q::Key>,
|
||||
key: Q::Key,
|
||||
id: QueryJobId,
|
||||
dep_node: Option<DepNode<Qcx::DepKind>>,
|
||||
dep_node: Option<DepNode>,
|
||||
) -> (Q::Value, Option<DepNodeIndex>)
|
||||
where
|
||||
Q: QueryConfig<Qcx>,
|
||||
@@ -474,9 +472,9 @@ where
|
||||
fn execute_job_incr<Q, Qcx>(
|
||||
query: Q,
|
||||
qcx: Qcx,
|
||||
dep_graph_data: &DepGraphData<Qcx::DepKind>,
|
||||
dep_graph_data: &DepGraphData<Qcx::Deps>,
|
||||
key: Q::Key,
|
||||
mut dep_node_opt: Option<DepNode<Qcx::DepKind>>,
|
||||
mut dep_node_opt: Option<DepNode>,
|
||||
job_id: QueryJobId,
|
||||
) -> (Q::Value, DepNodeIndex)
|
||||
where
|
||||
@@ -540,10 +538,10 @@ where
|
||||
#[inline(always)]
|
||||
fn try_load_from_disk_and_cache_in_memory<Q, Qcx>(
|
||||
query: Q,
|
||||
dep_graph_data: &DepGraphData<Qcx::DepKind>,
|
||||
dep_graph_data: &DepGraphData<Qcx::Deps>,
|
||||
qcx: Qcx,
|
||||
key: &Q::Key,
|
||||
dep_node: &DepNode<Qcx::DepKind>,
|
||||
dep_node: &DepNode,
|
||||
) -> Option<(Q::Value, DepNodeIndex)>
|
||||
where
|
||||
Q: QueryConfig<Qcx>,
|
||||
@@ -637,7 +635,7 @@ where
|
||||
#[instrument(skip(tcx, dep_graph_data, result, hash_result, format_value), level = "debug")]
|
||||
pub(crate) fn incremental_verify_ich<Tcx, V>(
|
||||
tcx: Tcx,
|
||||
dep_graph_data: &DepGraphData<Tcx::DepKind>,
|
||||
dep_graph_data: &DepGraphData<Tcx::Deps>,
|
||||
result: &V,
|
||||
prev_index: SerializedDepNodeIndex,
|
||||
hash_result: Option<fn(&mut StableHashingContext<'_>, &V) -> Fingerprint>,
|
||||
@@ -730,7 +728,7 @@ fn ensure_must_run<Q, Qcx>(
|
||||
qcx: Qcx,
|
||||
key: &Q::Key,
|
||||
check_cache: bool,
|
||||
) -> (bool, Option<DepNode<Qcx::DepKind>>)
|
||||
) -> (bool, Option<DepNode>)
|
||||
where
|
||||
Q: QueryConfig<Qcx>,
|
||||
Qcx: QueryContext,
|
||||
@@ -821,12 +819,8 @@ where
|
||||
Some(result)
|
||||
}
|
||||
|
||||
pub fn force_query<Q, Qcx>(
|
||||
query: Q,
|
||||
qcx: Qcx,
|
||||
key: Q::Key,
|
||||
dep_node: DepNode<<Qcx as HasDepContext>::DepKind>,
|
||||
) where
|
||||
pub fn force_query<Q, Qcx>(query: Q, qcx: Qcx, key: Q::Key, dep_node: DepNode)
|
||||
where
|
||||
Q: QueryConfig<Qcx>,
|
||||
Qcx: QueryContext,
|
||||
{
|
||||
|
||||
Reference in New Issue
Block a user