rustc_query_system/query/
plumbing.rs

1//! The implementation of the query system itself. This defines the macros that
2//! generate the actual methods on tcx which find and execute the provider,
3//! manage the caches, and so forth.
4
5use std::cell::Cell;
6use std::fmt::Debug;
7use std::hash::Hash;
8use std::mem;
9
10use hashbrown::HashTable;
11use hashbrown::hash_table::Entry;
12use rustc_data_structures::fingerprint::Fingerprint;
13use rustc_data_structures::sharded::{self, Sharded};
14use rustc_data_structures::stack::ensure_sufficient_stack;
15use rustc_data_structures::sync::LockGuard;
16use rustc_data_structures::{outline, sync};
17use rustc_errors::{Diag, FatalError, StashKey};
18use rustc_span::{DUMMY_SP, Span};
19use tracing::instrument;
20
21use super::{QueryConfig, QueryStackFrameExtra};
22use crate::HandleCycleError;
23use crate::dep_graph::{DepContext, DepGraphData, DepNode, DepNodeIndex, DepNodeParams};
24use crate::ich::StableHashingContext;
25use crate::query::caches::QueryCache;
26use crate::query::job::{QueryInfo, QueryJob, QueryJobId, QueryJobInfo, QueryLatch, report_cycle};
27use crate::query::{QueryContext, QueryMap, QueryStackFrame, SerializedDepNodeIndex};
28
29#[inline]
30fn equivalent_key<K: Eq, V>(k: &K) -> impl Fn(&(K, V)) -> bool + '_ {
31    move |x| x.0 == *k
32}
33
34pub struct QueryState<K, I> {
35    active: Sharded<hashbrown::HashTable<(K, QueryResult<I>)>>,
36}
37
38/// Indicates the state of a query for a given key in a query map.
39enum QueryResult<I> {
40    /// An already executing query. The query job can be used to await for its completion.
41    Started(QueryJob<I>),
42
43    /// The query panicked. Queries trying to wait on this will raise a fatal error which will
44    /// silently panic.
45    Poisoned,
46}
47
48impl<I> QueryResult<I> {
49    /// Unwraps the query job expecting that it has started.
50    fn expect_job(self) -> QueryJob<I> {
51        match self {
52            Self::Started(job) => job,
53            Self::Poisoned => {
54                panic!("job for query failed to start and was poisoned")
55            }
56        }
57    }
58}
59
60impl<K, I> QueryState<K, I>
61where
62    K: Eq + Hash + Copy + Debug,
63{
64    pub fn all_inactive(&self) -> bool {
65        self.active.lock_shards().all(|shard| shard.is_empty())
66    }
67
68    pub fn collect_active_jobs<Qcx: Copy>(
69        &self,
70        qcx: Qcx,
71        make_query: fn(Qcx, K) -> QueryStackFrame<I>,
72        jobs: &mut QueryMap<I>,
73        require_complete: bool,
74    ) -> Option<()> {
75        let mut active = Vec::new();
76
77        let mut collect = |iter: LockGuard<'_, HashTable<(K, QueryResult<I>)>>| {
78            for (k, v) in iter.iter() {
79                if let QueryResult::Started(ref job) = *v {
80                    active.push((*k, job.clone()));
81                }
82            }
83        };
84
85        if require_complete {
86            for shard in self.active.lock_shards() {
87                collect(shard);
88            }
89        } else {
90            // We use try_lock_shards here since we are called from the
91            // deadlock handler, and this shouldn't be locked.
92            for shard in self.active.try_lock_shards() {
93                collect(shard?);
94            }
95        }
96
97        // Call `make_query` while we're not holding a `self.active` lock as `make_query` may call
98        // queries leading to a deadlock.
99        for (key, job) in active {
100            let query = make_query(qcx, key);
101            jobs.insert(job.id, QueryJobInfo { query, job });
102        }
103
104        Some(())
105    }
106}
107
108impl<K, I> Default for QueryState<K, I> {
109    fn default() -> QueryState<K, I> {
110        QueryState { active: Default::default() }
111    }
112}
113
114/// A type representing the responsibility to execute the job in the `job` field.
115/// This will poison the relevant query if dropped.
116struct JobOwner<'tcx, K, I>
117where
118    K: Eq + Hash + Copy,
119{
120    state: &'tcx QueryState<K, I>,
121    key: K,
122}
123
124#[cold]
125#[inline(never)]
126fn mk_cycle<Q, Qcx>(query: Q, qcx: Qcx, cycle_error: CycleError) -> Q::Value
127where
128    Q: QueryConfig<Qcx>,
129    Qcx: QueryContext,
130{
131    let error = report_cycle(qcx.dep_context().sess(), &cycle_error);
132    handle_cycle_error(query, qcx, &cycle_error, error)
133}
134
135fn handle_cycle_error<Q, Qcx>(
136    query: Q,
137    qcx: Qcx,
138    cycle_error: &CycleError,
139    error: Diag<'_>,
140) -> Q::Value
141where
142    Q: QueryConfig<Qcx>,
143    Qcx: QueryContext,
144{
145    use HandleCycleError::*;
146    match query.handle_cycle_error() {
147        Error => {
148            let guar = error.emit();
149            query.value_from_cycle_error(*qcx.dep_context(), cycle_error, guar)
150        }
151        Fatal => {
152            error.emit();
153            qcx.dep_context().sess().dcx().abort_if_errors();
154            unreachable!()
155        }
156        DelayBug => {
157            let guar = error.delay_as_bug();
158            query.value_from_cycle_error(*qcx.dep_context(), cycle_error, guar)
159        }
160        Stash => {
161            let guar = if let Some(root) = cycle_error.cycle.first()
162                && let Some(span) = root.query.info.span
163            {
164                error.stash(span, StashKey::Cycle).unwrap()
165            } else {
166                error.emit()
167            };
168            query.value_from_cycle_error(*qcx.dep_context(), cycle_error, guar)
169        }
170    }
171}
172
173impl<'tcx, K, I> JobOwner<'tcx, K, I>
174where
175    K: Eq + Hash + Copy,
176{
177    /// Completes the query by updating the query cache with the `result`,
178    /// signals the waiter and forgets the JobOwner, so it won't poison the query
179    fn complete<C>(self, cache: &C, key_hash: u64, result: C::Value, dep_node_index: DepNodeIndex)
180    where
181        C: QueryCache<Key = K>,
182    {
183        let key = self.key;
184        let state = self.state;
185
186        // Forget ourself so our destructor won't poison the query
187        mem::forget(self);
188
189        // Mark as complete before we remove the job from the active state
190        // so no other thread can re-execute this query.
191        cache.complete(key, result, dep_node_index);
192
193        let job = {
194            // don't keep the lock during the `unwrap()` of the retrieved value, or we taint the
195            // underlying shard.
196            // since unwinding also wants to look at this map, this can also prevent a double
197            // panic.
198            let mut shard = state.active.lock_shard_by_hash(key_hash);
199            match shard.find_entry(key_hash, equivalent_key(&key)) {
200                Err(_) => None,
201                Ok(occupied) => Some(occupied.remove().0.1),
202            }
203        };
204        let job = job.expect("active query job entry").expect_job();
205
206        job.signal_complete();
207    }
208}
209
210impl<'tcx, K, I> Drop for JobOwner<'tcx, K, I>
211where
212    K: Eq + Hash + Copy,
213{
214    #[inline(never)]
215    #[cold]
216    fn drop(&mut self) {
217        // Poison the query so jobs waiting on it panic.
218        let state = self.state;
219        let job = {
220            let key_hash = sharded::make_hash(&self.key);
221            let mut shard = state.active.lock_shard_by_hash(key_hash);
222            match shard.find_entry(key_hash, equivalent_key(&self.key)) {
223                Err(_) => panic!(),
224                Ok(occupied) => {
225                    let ((key, value), vacant) = occupied.remove();
226                    vacant.insert((key, QueryResult::Poisoned));
227                    value.expect_job()
228                }
229            }
230        };
231        // Also signal the completion of the job, so waiters
232        // will continue execution.
233        job.signal_complete();
234    }
235}
236
237#[derive(Clone, Debug)]
238pub struct CycleError<I = QueryStackFrameExtra> {
239    /// The query and related span that uses the cycle.
240    pub usage: Option<(Span, QueryStackFrame<I>)>,
241    pub cycle: Vec<QueryInfo<I>>,
242}
243
244impl<I> CycleError<I> {
245    fn lift<Qcx: QueryContext<QueryInfo = I>>(&self, qcx: Qcx) -> CycleError<QueryStackFrameExtra> {
246        CycleError {
247            usage: self.usage.as_ref().map(|(span, frame)| (*span, frame.lift(qcx))),
248            cycle: self.cycle.iter().map(|info| info.lift(qcx)).collect(),
249        }
250    }
251}
252
253/// Checks whether there is already a value for this key in the in-memory
254/// query cache, returning that value if present.
255///
256/// (Also performs some associated bookkeeping, if a value was found.)
257#[inline(always)]
258pub fn try_get_cached<Tcx, C>(tcx: Tcx, cache: &C, key: &C::Key) -> Option<C::Value>
259where
260    C: QueryCache,
261    Tcx: DepContext,
262{
263    match cache.lookup(key) {
264        Some((value, index)) => {
265            tcx.profiler().query_cache_hit(index.into());
266            tcx.dep_graph().read_index(index);
267            Some(value)
268        }
269        None => None,
270    }
271}
272
273#[cold]
274#[inline(never)]
275fn cycle_error<Q, Qcx>(
276    query: Q,
277    qcx: Qcx,
278    try_execute: QueryJobId,
279    span: Span,
280) -> (Q::Value, Option<DepNodeIndex>)
281where
282    Q: QueryConfig<Qcx>,
283    Qcx: QueryContext,
284{
285    // Ensure there was no errors collecting all active jobs.
286    // We need the complete map to ensure we find a cycle to break.
287    let query_map = qcx.collect_active_jobs(false).ok().expect("failed to collect active queries");
288
289    let error = try_execute.find_cycle_in_stack(query_map, &qcx.current_query_job(), span);
290    (mk_cycle(query, qcx, error.lift(qcx)), None)
291}
292
293#[inline(always)]
294fn wait_for_query<Q, Qcx>(
295    query: Q,
296    qcx: Qcx,
297    span: Span,
298    key: Q::Key,
299    latch: QueryLatch<Qcx::QueryInfo>,
300    current: Option<QueryJobId>,
301) -> (Q::Value, Option<DepNodeIndex>)
302where
303    Q: QueryConfig<Qcx>,
304    Qcx: QueryContext,
305{
306    // For parallel queries, we'll block and wait until the query running
307    // in another thread has completed. Record how long we wait in the
308    // self-profiler.
309    let query_blocked_prof_timer = qcx.dep_context().profiler().query_blocked();
310
311    // With parallel queries we might just have to wait on some other
312    // thread.
313    let result = latch.wait_on(qcx, current, span);
314
315    match result {
316        Ok(()) => {
317            let Some((v, index)) = query.query_cache(qcx).lookup(&key) else {
318                outline(|| {
319                    // We didn't find the query result in the query cache. Check if it was
320                    // poisoned due to a panic instead.
321                    let key_hash = sharded::make_hash(&key);
322                    let shard = query.query_state(qcx).active.lock_shard_by_hash(key_hash);
323                    match shard.find(key_hash, equivalent_key(&key)) {
324                        // The query we waited on panicked. Continue unwinding here.
325                        Some((_, QueryResult::Poisoned)) => FatalError.raise(),
326                        _ => panic!(
327                            "query '{}' result must be in the cache or the query must be poisoned after a wait",
328                            query.name()
329                        ),
330                    }
331                })
332            };
333
334            qcx.dep_context().profiler().query_cache_hit(index.into());
335            query_blocked_prof_timer.finish_with_query_invocation_id(index.into());
336
337            (v, Some(index))
338        }
339        Err(cycle) => (mk_cycle(query, qcx, cycle.lift(qcx)), None),
340    }
341}
342
343#[inline(never)]
344fn try_execute_query<Q, Qcx, const INCR: bool>(
345    query: Q,
346    qcx: Qcx,
347    span: Span,
348    key: Q::Key,
349    dep_node: Option<DepNode>,
350) -> (Q::Value, Option<DepNodeIndex>)
351where
352    Q: QueryConfig<Qcx>,
353    Qcx: QueryContext,
354{
355    let state = query.query_state(qcx);
356    let key_hash = sharded::make_hash(&key);
357    let mut state_lock = state.active.lock_shard_by_hash(key_hash);
358
359    // For the parallel compiler we need to check both the query cache and query state structures
360    // while holding the state lock to ensure that 1) the query has not yet completed and 2) the
361    // query is not still executing. Without checking the query cache here, we can end up
362    // re-executing the query since `try_start` only checks that the query is not currently
363    // executing, but another thread may have already completed the query and stores it result
364    // in the query cache.
365    if qcx.dep_context().sess().threads() > 1 {
366        if let Some((value, index)) = query.query_cache(qcx).lookup(&key) {
367            qcx.dep_context().profiler().query_cache_hit(index.into());
368            return (value, Some(index));
369        }
370    }
371
372    let current_job_id = qcx.current_query_job();
373
374    match state_lock.entry(key_hash, equivalent_key(&key), |(k, _)| sharded::make_hash(k)) {
375        Entry::Vacant(entry) => {
376            // Nothing has computed or is computing the query, so we start a new job and insert it in the
377            // state map.
378            let id = qcx.next_job_id();
379            let job = QueryJob::new(id, span, current_job_id);
380            entry.insert((key, QueryResult::Started(job)));
381
382            // Drop the lock before we start executing the query
383            drop(state_lock);
384
385            execute_job::<_, _, INCR>(query, qcx, state, key, key_hash, id, dep_node)
386        }
387        Entry::Occupied(mut entry) => {
388            match &mut entry.get_mut().1 {
389                QueryResult::Started(job) => {
390                    if sync::is_dyn_thread_safe() {
391                        // Get the latch out
392                        let latch = job.latch();
393                        drop(state_lock);
394
395                        // Only call `wait_for_query` if we're using a Rayon thread pool
396                        // as it will attempt to mark the worker thread as blocked.
397                        return wait_for_query(query, qcx, span, key, latch, current_job_id);
398                    }
399
400                    let id = job.id;
401                    drop(state_lock);
402
403                    // If we are single-threaded we know that we have cycle error,
404                    // so we just return the error.
405                    cycle_error(query, qcx, id, span)
406                }
407                QueryResult::Poisoned => FatalError.raise(),
408            }
409        }
410    }
411}
412
413#[inline(always)]
414fn execute_job<Q, Qcx, const INCR: bool>(
415    query: Q,
416    qcx: Qcx,
417    state: &QueryState<Q::Key, Qcx::QueryInfo>,
418    key: Q::Key,
419    key_hash: u64,
420    id: QueryJobId,
421    dep_node: Option<DepNode>,
422) -> (Q::Value, Option<DepNodeIndex>)
423where
424    Q: QueryConfig<Qcx>,
425    Qcx: QueryContext,
426{
427    // Use `JobOwner` so the query will be poisoned if executing it panics.
428    let job_owner = JobOwner { state, key };
429
430    debug_assert_eq!(qcx.dep_context().dep_graph().is_fully_enabled(), INCR);
431
432    let (result, dep_node_index) = if INCR {
433        execute_job_incr(
434            query,
435            qcx,
436            qcx.dep_context().dep_graph().data().unwrap(),
437            key,
438            dep_node,
439            id,
440        )
441    } else {
442        execute_job_non_incr(query, qcx, key, id)
443    };
444
445    let cache = query.query_cache(qcx);
446    if query.feedable() {
447        // We should not compute queries that also got a value via feeding.
448        // This can't happen, as query feeding adds the very dependencies to the fed query
449        // as its feeding query had. So if the fed query is red, so is its feeder, which will
450        // get evaluated first, and re-feed the query.
451        if let Some((cached_result, _)) = cache.lookup(&key) {
452            let Some(hasher) = query.hash_result() else {
453                panic!(
454                    "no_hash fed query later has its value computed.\n\
455                    Remove `no_hash` modifier to allow recomputation.\n\
456                    The already cached value: {}",
457                    (query.format_value())(&cached_result)
458                );
459            };
460
461            let (old_hash, new_hash) = qcx.dep_context().with_stable_hashing_context(|mut hcx| {
462                (hasher(&mut hcx, &cached_result), hasher(&mut hcx, &result))
463            });
464            let formatter = query.format_value();
465            if old_hash != new_hash {
466                // We have an inconsistency. This can happen if one of the two
467                // results is tainted by errors.
468                assert!(
469                    qcx.dep_context().sess().dcx().has_errors().is_some(),
470                    "Computed query value for {:?}({:?}) is inconsistent with fed value,\n\
471                        computed={:#?}\nfed={:#?}",
472                    query.dep_kind(),
473                    key,
474                    formatter(&result),
475                    formatter(&cached_result),
476                );
477            }
478        }
479    }
480    job_owner.complete(cache, key_hash, result, dep_node_index);
481
482    (result, Some(dep_node_index))
483}
484
485// Fast path for when incr. comp. is off.
486#[inline(always)]
487fn execute_job_non_incr<Q, Qcx>(
488    query: Q,
489    qcx: Qcx,
490    key: Q::Key,
491    job_id: QueryJobId,
492) -> (Q::Value, DepNodeIndex)
493where
494    Q: QueryConfig<Qcx>,
495    Qcx: QueryContext,
496{
497    debug_assert!(!qcx.dep_context().dep_graph().is_fully_enabled());
498
499    // Fingerprint the key, just to assert that it doesn't
500    // have anything we don't consider hashable
501    if cfg!(debug_assertions) {
502        let _ = key.to_fingerprint(*qcx.dep_context());
503    }
504
505    let prof_timer = qcx.dep_context().profiler().query_provider();
506    let result = qcx.start_query(job_id, query.depth_limit(), || query.compute(qcx, key));
507    let dep_node_index = qcx.dep_context().dep_graph().next_virtual_depnode_index();
508    prof_timer.finish_with_query_invocation_id(dep_node_index.into());
509
510    // Similarly, fingerprint the result to assert that
511    // it doesn't have anything not considered hashable.
512    if cfg!(debug_assertions)
513        && let Some(hash_result) = query.hash_result()
514    {
515        qcx.dep_context().with_stable_hashing_context(|mut hcx| {
516            hash_result(&mut hcx, &result);
517        });
518    }
519
520    (result, dep_node_index)
521}
522
523#[inline(always)]
524fn execute_job_incr<Q, Qcx>(
525    query: Q,
526    qcx: Qcx,
527    dep_graph_data: &DepGraphData<Qcx::Deps>,
528    key: Q::Key,
529    mut dep_node_opt: Option<DepNode>,
530    job_id: QueryJobId,
531) -> (Q::Value, DepNodeIndex)
532where
533    Q: QueryConfig<Qcx>,
534    Qcx: QueryContext,
535{
536    if !query.anon() && !query.eval_always() {
537        // `to_dep_node` is expensive for some `DepKind`s.
538        let dep_node =
539            dep_node_opt.get_or_insert_with(|| query.construct_dep_node(*qcx.dep_context(), &key));
540
541        // The diagnostics for this query will be promoted to the current session during
542        // `try_mark_green()`, so we can ignore them here.
543        if let Some(ret) = qcx.start_query(job_id, false, || {
544            try_load_from_disk_and_cache_in_memory(query, dep_graph_data, qcx, &key, dep_node)
545        }) {
546            return ret;
547        }
548    }
549
550    let prof_timer = qcx.dep_context().profiler().query_provider();
551
552    let (result, dep_node_index) = qcx.start_query(job_id, query.depth_limit(), || {
553        if query.anon() {
554            return dep_graph_data.with_anon_task_inner(
555                *qcx.dep_context(),
556                query.dep_kind(),
557                || query.compute(qcx, key),
558            );
559        }
560
561        // `to_dep_node` is expensive for some `DepKind`s.
562        let dep_node =
563            dep_node_opt.unwrap_or_else(|| query.construct_dep_node(*qcx.dep_context(), &key));
564
565        dep_graph_data.with_task(
566            dep_node,
567            (qcx, query),
568            key,
569            |(qcx, query), key| query.compute(qcx, key),
570            query.hash_result(),
571        )
572    });
573
574    prof_timer.finish_with_query_invocation_id(dep_node_index.into());
575
576    (result, dep_node_index)
577}
578
579#[inline(always)]
580fn try_load_from_disk_and_cache_in_memory<Q, Qcx>(
581    query: Q,
582    dep_graph_data: &DepGraphData<Qcx::Deps>,
583    qcx: Qcx,
584    key: &Q::Key,
585    dep_node: &DepNode,
586) -> Option<(Q::Value, DepNodeIndex)>
587where
588    Q: QueryConfig<Qcx>,
589    Qcx: QueryContext,
590{
591    // Note this function can be called concurrently from the same query
592    // We must ensure that this is handled correctly.
593
594    let (prev_dep_node_index, dep_node_index) = dep_graph_data.try_mark_green(qcx, dep_node)?;
595
596    debug_assert!(dep_graph_data.is_index_green(prev_dep_node_index));
597
598    // First we try to load the result from the on-disk cache.
599    // Some things are never cached on disk.
600    if let Some(result) = query.try_load_from_disk(qcx, key, prev_dep_node_index, dep_node_index) {
601        if std::intrinsics::unlikely(qcx.dep_context().sess().opts.unstable_opts.query_dep_graph) {
602            dep_graph_data.mark_debug_loaded_from_disk(*dep_node)
603        }
604
605        let prev_fingerprint = dep_graph_data.prev_fingerprint_of(prev_dep_node_index);
606        // If `-Zincremental-verify-ich` is specified, re-hash results from
607        // the cache and make sure that they have the expected fingerprint.
608        //
609        // If not, we still seek to verify a subset of fingerprints loaded
610        // from disk. Re-hashing results is fairly expensive, so we can't
611        // currently afford to verify every hash. This subset should still
612        // give us some coverage of potential bugs though.
613        let try_verify = prev_fingerprint.split().1.as_u64().is_multiple_of(32);
614        if std::intrinsics::unlikely(
615            try_verify || qcx.dep_context().sess().opts.unstable_opts.incremental_verify_ich,
616        ) {
617            incremental_verify_ich(
618                *qcx.dep_context(),
619                dep_graph_data,
620                &result,
621                prev_dep_node_index,
622                query.hash_result(),
623                query.format_value(),
624            );
625        }
626
627        return Some((result, dep_node_index));
628    }
629
630    // We always expect to find a cached result for things that
631    // can be forced from `DepNode`.
632    debug_assert!(
633        !query.cache_on_disk(*qcx.dep_context(), key)
634            || !qcx.dep_context().fingerprint_style(dep_node.kind).reconstructible(),
635        "missing on-disk cache entry for {dep_node:?}"
636    );
637
638    // Sanity check for the logic in `ensure`: if the node is green and the result loadable,
639    // we should actually be able to load it.
640    debug_assert!(
641        !query.loadable_from_disk(qcx, key, prev_dep_node_index),
642        "missing on-disk cache entry for loadable {dep_node:?}"
643    );
644
645    // We could not load a result from the on-disk cache, so
646    // recompute.
647    let prof_timer = qcx.dep_context().profiler().query_provider();
648
649    // The dep-graph for this computation is already in-place.
650    let result = qcx.dep_context().dep_graph().with_ignore(|| query.compute(qcx, *key));
651
652    prof_timer.finish_with_query_invocation_id(dep_node_index.into());
653
654    // Verify that re-running the query produced a result with the expected hash
655    // This catches bugs in query implementations, turning them into ICEs.
656    // For example, a query might sort its result by `DefId` - since `DefId`s are
657    // not stable across compilation sessions, the result could get up getting sorted
658    // in a different order when the query is re-run, even though all of the inputs
659    // (e.g. `DefPathHash` values) were green.
660    //
661    // See issue #82920 for an example of a miscompilation that would get turned into
662    // an ICE by this check
663    incremental_verify_ich(
664        *qcx.dep_context(),
665        dep_graph_data,
666        &result,
667        prev_dep_node_index,
668        query.hash_result(),
669        query.format_value(),
670    );
671
672    Some((result, dep_node_index))
673}
674
675#[inline]
676#[instrument(skip(tcx, dep_graph_data, result, hash_result, format_value), level = "debug")]
677pub(crate) fn incremental_verify_ich<Tcx, V>(
678    tcx: Tcx,
679    dep_graph_data: &DepGraphData<Tcx::Deps>,
680    result: &V,
681    prev_index: SerializedDepNodeIndex,
682    hash_result: Option<fn(&mut StableHashingContext<'_>, &V) -> Fingerprint>,
683    format_value: fn(&V) -> String,
684) where
685    Tcx: DepContext,
686{
687    if !dep_graph_data.is_index_green(prev_index) {
688        incremental_verify_ich_not_green(tcx, prev_index)
689    }
690
691    let new_hash = hash_result.map_or(Fingerprint::ZERO, |f| {
692        tcx.with_stable_hashing_context(|mut hcx| f(&mut hcx, result))
693    });
694
695    let old_hash = dep_graph_data.prev_fingerprint_of(prev_index);
696
697    if new_hash != old_hash {
698        incremental_verify_ich_failed(tcx, prev_index, &|| format_value(result));
699    }
700}
701
702#[cold]
703#[inline(never)]
704fn incremental_verify_ich_not_green<Tcx>(tcx: Tcx, prev_index: SerializedDepNodeIndex)
705where
706    Tcx: DepContext,
707{
708    panic!(
709        "fingerprint for green query instance not loaded from cache: {:?}",
710        tcx.dep_graph().data().unwrap().prev_node_of(prev_index)
711    )
712}
713
714// Note that this is marked #[cold] and intentionally takes `dyn Debug` for `result`,
715// as we want to avoid generating a bunch of different implementations for LLVM to
716// chew on (and filling up the final binary, too).
717#[cold]
718#[inline(never)]
719fn incremental_verify_ich_failed<Tcx>(
720    tcx: Tcx,
721    prev_index: SerializedDepNodeIndex,
722    result: &dyn Fn() -> String,
723) where
724    Tcx: DepContext,
725{
726    // When we emit an error message and panic, we try to debug-print the `DepNode`
727    // and query result. Unfortunately, this can cause us to run additional queries,
728    // which may result in another fingerprint mismatch while we're in the middle
729    // of processing this one. To avoid a double-panic (which kills the process
730    // before we can print out the query static), we print out a terse
731    // but 'safe' message if we detect a reentrant call to this method.
732    thread_local! {
733        static INSIDE_VERIFY_PANIC: Cell<bool> = const { Cell::new(false) };
734    };
735
736    let old_in_panic = INSIDE_VERIFY_PANIC.replace(true);
737
738    if old_in_panic {
739        tcx.sess().dcx().emit_err(crate::error::Reentrant);
740    } else {
741        let run_cmd = if let Some(crate_name) = &tcx.sess().opts.crate_name {
742            format!("`cargo clean -p {crate_name}` or `cargo clean`")
743        } else {
744            "`cargo clean`".to_string()
745        };
746
747        let dep_node = tcx.dep_graph().data().unwrap().prev_node_of(prev_index);
748        tcx.sess().dcx().emit_err(crate::error::IncrementCompilation {
749            run_cmd,
750            dep_node: format!("{dep_node:?}"),
751        });
752        panic!("Found unstable fingerprints for {dep_node:?}: {}", result());
753    }
754
755    INSIDE_VERIFY_PANIC.set(old_in_panic);
756}
757
758/// Ensure that either this query has all green inputs or been executed.
759/// Executing `query::ensure(D)` is considered a read of the dep-node `D`.
760/// Returns true if the query should still run.
761///
762/// This function is particularly useful when executing passes for their
763/// side-effects -- e.g., in order to report errors for erroneous programs.
764///
765/// Note: The optimization is only available during incr. comp.
766#[inline(never)]
767fn ensure_must_run<Q, Qcx>(
768    query: Q,
769    qcx: Qcx,
770    key: &Q::Key,
771    check_cache: bool,
772) -> (bool, Option<DepNode>)
773where
774    Q: QueryConfig<Qcx>,
775    Qcx: QueryContext,
776{
777    if query.eval_always() {
778        return (true, None);
779    }
780
781    // Ensuring an anonymous query makes no sense
782    assert!(!query.anon());
783
784    let dep_node = query.construct_dep_node(*qcx.dep_context(), key);
785
786    let dep_graph = qcx.dep_context().dep_graph();
787    let serialized_dep_node_index = match dep_graph.try_mark_green(qcx, &dep_node) {
788        None => {
789            // A None return from `try_mark_green` means that this is either
790            // a new dep node or that the dep node has already been marked red.
791            // Either way, we can't call `dep_graph.read()` as we don't have the
792            // DepNodeIndex. We must invoke the query itself. The performance cost
793            // this introduces should be negligible as we'll immediately hit the
794            // in-memory cache, or another query down the line will.
795            return (true, Some(dep_node));
796        }
797        Some((serialized_dep_node_index, dep_node_index)) => {
798            dep_graph.read_index(dep_node_index);
799            qcx.dep_context().profiler().query_cache_hit(dep_node_index.into());
800            serialized_dep_node_index
801        }
802    };
803
804    // We do not need the value at all, so do not check the cache.
805    if !check_cache {
806        return (false, None);
807    }
808
809    let loadable = query.loadable_from_disk(qcx, key, serialized_dep_node_index);
810    (!loadable, Some(dep_node))
811}
812
813#[derive(Debug)]
814pub enum QueryMode {
815    Get,
816    Ensure { check_cache: bool },
817}
818
819#[inline(always)]
820pub fn get_query_non_incr<Q, Qcx>(query: Q, qcx: Qcx, span: Span, key: Q::Key) -> Q::Value
821where
822    Q: QueryConfig<Qcx>,
823    Qcx: QueryContext,
824{
825    debug_assert!(!qcx.dep_context().dep_graph().is_fully_enabled());
826
827    ensure_sufficient_stack(|| try_execute_query::<Q, Qcx, false>(query, qcx, span, key, None).0)
828}
829
830#[inline(always)]
831pub fn get_query_incr<Q, Qcx>(
832    query: Q,
833    qcx: Qcx,
834    span: Span,
835    key: Q::Key,
836    mode: QueryMode,
837) -> Option<Q::Value>
838where
839    Q: QueryConfig<Qcx>,
840    Qcx: QueryContext,
841{
842    debug_assert!(qcx.dep_context().dep_graph().is_fully_enabled());
843
844    let dep_node = if let QueryMode::Ensure { check_cache } = mode {
845        let (must_run, dep_node) = ensure_must_run(query, qcx, &key, check_cache);
846        if !must_run {
847            return None;
848        }
849        dep_node
850    } else {
851        None
852    };
853
854    let (result, dep_node_index) = ensure_sufficient_stack(|| {
855        try_execute_query::<_, _, true>(query, qcx, span, key, dep_node)
856    });
857    if let Some(dep_node_index) = dep_node_index {
858        qcx.dep_context().dep_graph().read_index(dep_node_index)
859    }
860    Some(result)
861}
862
863pub fn force_query<Q, Qcx>(query: Q, qcx: Qcx, key: Q::Key, dep_node: DepNode)
864where
865    Q: QueryConfig<Qcx>,
866    Qcx: QueryContext,
867{
868    // We may be concurrently trying both execute and force a query.
869    // Ensure that only one of them runs the query.
870    if let Some((_, index)) = query.query_cache(qcx).lookup(&key) {
871        qcx.dep_context().profiler().query_cache_hit(index.into());
872        return;
873    }
874
875    debug_assert!(!query.anon());
876
877    ensure_sufficient_stack(|| {
878        try_execute_query::<_, _, true>(query, qcx, DUMMY_SP, key, Some(dep_node))
879    });
880}