Skip to main content

rustc_query_system/query/
plumbing.rs

1//! The implementation of the query system itself. This defines the macros that
2//! generate the actual methods on tcx which find and execute the provider,
3//! manage the caches, and so forth.
4
5use std::cell::Cell;
6use std::fmt::Debug;
7use std::hash::Hash;
8use std::mem;
9
10use rustc_data_structures::fingerprint::Fingerprint;
11use rustc_data_structures::hash_table::{self, Entry, HashTable};
12use rustc_data_structures::sharded::{self, Sharded};
13use rustc_data_structures::stack::ensure_sufficient_stack;
14use rustc_data_structures::{outline, sync};
15use rustc_errors::{Diag, FatalError, StashKey};
16use rustc_span::{DUMMY_SP, Span};
17use tracing::instrument;
18
19use super::{QueryDispatcher, QueryStackDeferred, QueryStackFrameExtra};
20use crate::dep_graph::{
21    DepContext, DepGraphData, DepNode, DepNodeIndex, DepNodeKey, HasDepContext,
22};
23use crate::ich::StableHashingContext;
24use crate::query::caches::QueryCache;
25use crate::query::job::{QueryInfo, QueryJob, QueryJobId, QueryJobInfo, QueryLatch, report_cycle};
26use crate::query::{
27    CycleErrorHandling, QueryContext, QueryMap, QueryStackFrame, SerializedDepNodeIndex,
28};
29
30#[inline]
31fn equivalent_key<K: Eq, V>(k: &K) -> impl Fn(&(K, V)) -> bool + '_ {
32    move |x| x.0 == *k
33}
34
35/// For a particular query, keeps track of "active" keys, i.e. keys whose
36/// evaluation has started but has not yet finished successfully.
37///
38/// (Successful query evaluation for a key is represented by an entry in the
39/// query's in-memory cache.)
40pub struct QueryState<'tcx, K> {
41    active: Sharded<hash_table::HashTable<(K, ActiveKeyStatus<'tcx>)>>,
42}
43
44/// For a particular query and key, tracks the status of a query evaluation
45/// that has started, but has not yet finished successfully.
46///
47/// (Successful query evaluation for a key is represented by an entry in the
48/// query's in-memory cache.)
49enum ActiveKeyStatus<'tcx> {
50    /// Some thread is already evaluating the query for this key.
51    ///
52    /// The enclosed [`QueryJob`] can be used to wait for it to finish.
53    Started(QueryJob<'tcx>),
54
55    /// The query panicked. Queries trying to wait on this will raise a fatal error which will
56    /// silently panic.
57    Poisoned,
58}
59
60impl<'tcx> ActiveKeyStatus<'tcx> {
61    /// Obtains the enclosed [`QueryJob`], or panics if this query evaluation
62    /// was poisoned by a panic.
63    fn expect_job(self) -> QueryJob<'tcx> {
64        match self {
65            Self::Started(job) => job,
66            Self::Poisoned => {
67                {
    ::core::panicking::panic_fmt(format_args!("job for query failed to start and was poisoned"));
}panic!("job for query failed to start and was poisoned")
68            }
69        }
70    }
71}
72
73impl<'tcx, K> QueryState<'tcx, K>
74where
75    K: Eq + Hash + Copy + Debug,
76{
77    pub fn all_inactive(&self) -> bool {
78        self.active.lock_shards().all(|shard| shard.is_empty())
79    }
80
81    /// Internal plumbing for collecting the set of active jobs for this query.
82    ///
83    /// Should only be called from `gather_active_jobs`.
84    pub fn gather_active_jobs_inner<Qcx: Copy>(
85        &self,
86        qcx: Qcx,
87        make_frame: fn(Qcx, K) -> QueryStackFrame<QueryStackDeferred<'tcx>>,
88        jobs: &mut QueryMap<'tcx>,
89        require_complete: bool,
90    ) -> Option<()> {
91        let mut active = Vec::new();
92
93        // Helper to gather active jobs from a single shard.
94        let mut gather_shard_jobs = |shard: &HashTable<(K, ActiveKeyStatus<'tcx>)>| {
95            for (k, v) in shard.iter() {
96                if let ActiveKeyStatus::Started(ref job) = *v {
97                    active.push((*k, job.clone()));
98                }
99            }
100        };
101
102        // Lock shards and gather jobs from each shard.
103        if require_complete {
104            for shard in self.active.lock_shards() {
105                gather_shard_jobs(&shard);
106            }
107        } else {
108            // We use try_lock_shards here since we are called from the
109            // deadlock handler, and this shouldn't be locked.
110            for shard in self.active.try_lock_shards() {
111                let shard = shard?;
112                gather_shard_jobs(&shard);
113            }
114        }
115
116        // Call `make_frame` while we're not holding a `self.active` lock as `make_frame` may call
117        // queries leading to a deadlock.
118        for (key, job) in active {
119            let frame = make_frame(qcx, key);
120            jobs.insert(job.id, QueryJobInfo { frame, job });
121        }
122
123        Some(())
124    }
125}
126
127impl<'tcx, K> Default for QueryState<'tcx, K> {
128    fn default() -> QueryState<'tcx, K> {
129        QueryState { active: Default::default() }
130    }
131}
132
133/// A type representing the responsibility to execute the job in the `job` field.
134/// This will poison the relevant query if dropped.
135struct JobOwner<'tcx, K>
136where
137    K: Eq + Hash + Copy,
138{
139    state: &'tcx QueryState<'tcx, K>,
140    key: K,
141}
142
143#[cold]
144#[inline(never)]
145fn mk_cycle<'tcx, Q>(query: Q, qcx: Q::Qcx, cycle_error: CycleError) -> Q::Value
146where
147    Q: QueryDispatcher<'tcx>,
148{
149    let error = report_cycle(qcx.dep_context().sess(), &cycle_error);
150    handle_cycle_error(query, qcx, &cycle_error, error)
151}
152
153fn handle_cycle_error<'tcx, Q>(
154    query: Q,
155    qcx: Q::Qcx,
156    cycle_error: &CycleError,
157    error: Diag<'_>,
158) -> Q::Value
159where
160    Q: QueryDispatcher<'tcx>,
161{
162    match query.cycle_error_handling() {
163        CycleErrorHandling::Error => {
164            let guar = error.emit();
165            query.value_from_cycle_error(*qcx.dep_context(), cycle_error, guar)
166        }
167        CycleErrorHandling::Fatal => {
168            error.emit();
169            qcx.dep_context().sess().dcx().abort_if_errors();
170            ::core::panicking::panic("internal error: entered unreachable code")unreachable!()
171        }
172        CycleErrorHandling::DelayBug => {
173            let guar = error.delay_as_bug();
174            query.value_from_cycle_error(*qcx.dep_context(), cycle_error, guar)
175        }
176        CycleErrorHandling::Stash => {
177            let guar = if let Some(root) = cycle_error.cycle.first()
178                && let Some(span) = root.frame.info.span
179            {
180                error.stash(span, StashKey::Cycle).unwrap()
181            } else {
182                error.emit()
183            };
184            query.value_from_cycle_error(*qcx.dep_context(), cycle_error, guar)
185        }
186    }
187}
188
189impl<'tcx, K> JobOwner<'tcx, K>
190where
191    K: Eq + Hash + Copy,
192{
193    /// Completes the query by updating the query cache with the `result`,
194    /// signals the waiter and forgets the JobOwner, so it won't poison the query
195    fn complete<C>(self, cache: &C, key_hash: u64, result: C::Value, dep_node_index: DepNodeIndex)
196    where
197        C: QueryCache<Key = K>,
198    {
199        let key = self.key;
200        let state = self.state;
201
202        // Forget ourself so our destructor won't poison the query
203        mem::forget(self);
204
205        // Mark as complete before we remove the job from the active state
206        // so no other thread can re-execute this query.
207        cache.complete(key, result, dep_node_index);
208
209        let job = {
210            // don't keep the lock during the `unwrap()` of the retrieved value, or we taint the
211            // underlying shard.
212            // since unwinding also wants to look at this map, this can also prevent a double
213            // panic.
214            let mut shard = state.active.lock_shard_by_hash(key_hash);
215            match shard.find_entry(key_hash, equivalent_key(&key)) {
216                Err(_) => None,
217                Ok(occupied) => Some(occupied.remove().0.1),
218            }
219        };
220        let job = job.expect("active query job entry").expect_job();
221
222        job.signal_complete();
223    }
224}
225
226impl<'tcx, K> Drop for JobOwner<'tcx, K>
227where
228    K: Eq + Hash + Copy,
229{
230    #[inline(never)]
231    #[cold]
232    fn drop(&mut self) {
233        // Poison the query so jobs waiting on it panic.
234        let state = self.state;
235        let job = {
236            let key_hash = sharded::make_hash(&self.key);
237            let mut shard = state.active.lock_shard_by_hash(key_hash);
238            match shard.find_entry(key_hash, equivalent_key(&self.key)) {
239                Err(_) => ::core::panicking::panic("explicit panic")panic!(),
240                Ok(occupied) => {
241                    let ((key, value), vacant) = occupied.remove();
242                    vacant.insert((key, ActiveKeyStatus::Poisoned));
243                    value.expect_job()
244                }
245            }
246        };
247        // Also signal the completion of the job, so waiters
248        // will continue execution.
249        job.signal_complete();
250    }
251}
252
253#[derive(#[automatically_derived]
impl<I: ::core::clone::Clone> ::core::clone::Clone for CycleError<I> {
    #[inline]
    fn clone(&self) -> CycleError<I> {
        CycleError {
            usage: ::core::clone::Clone::clone(&self.usage),
            cycle: ::core::clone::Clone::clone(&self.cycle),
        }
    }
}Clone, #[automatically_derived]
impl<I: ::core::fmt::Debug> ::core::fmt::Debug for CycleError<I> {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        ::core::fmt::Formatter::debug_struct_field2_finish(f, "CycleError",
            "usage", &self.usage, "cycle", &&self.cycle)
    }
}Debug)]
254pub struct CycleError<I = QueryStackFrameExtra> {
255    /// The query and related span that uses the cycle.
256    pub usage: Option<(Span, QueryStackFrame<I>)>,
257    pub cycle: Vec<QueryInfo<I>>,
258}
259
260impl<'tcx> CycleError<QueryStackDeferred<'tcx>> {
261    fn lift(&self) -> CycleError<QueryStackFrameExtra> {
262        CycleError {
263            usage: self.usage.as_ref().map(|(span, frame)| (*span, frame.lift())),
264            cycle: self.cycle.iter().map(|info| info.lift()).collect(),
265        }
266    }
267}
268
269/// Checks whether there is already a value for this key in the in-memory
270/// query cache, returning that value if present.
271///
272/// (Also performs some associated bookkeeping, if a value was found.)
273#[inline(always)]
274pub fn try_get_cached<Tcx, C>(tcx: Tcx, cache: &C, key: &C::Key) -> Option<C::Value>
275where
276    C: QueryCache,
277    Tcx: DepContext,
278{
279    match cache.lookup(key) {
280        Some((value, index)) => {
281            tcx.profiler().query_cache_hit(index.into());
282            tcx.dep_graph().read_index(index);
283            Some(value)
284        }
285        None => None,
286    }
287}
288
289#[cold]
290#[inline(never)]
291fn cycle_error<'tcx, Q>(
292    query: Q,
293    qcx: Q::Qcx,
294    try_execute: QueryJobId,
295    span: Span,
296) -> (Q::Value, Option<DepNodeIndex>)
297where
298    Q: QueryDispatcher<'tcx>,
299{
300    // Ensure there was no errors collecting all active jobs.
301    // We need the complete map to ensure we find a cycle to break.
302    let query_map = qcx
303        .collect_active_jobs_from_all_queries(false)
304        .ok()
305        .expect("failed to collect active queries");
306
307    let error = try_execute.find_cycle_in_stack(query_map, &qcx.current_query_job(), span);
308    (mk_cycle(query, qcx, error.lift()), None)
309}
310
311#[inline(always)]
312fn wait_for_query<'tcx, Q>(
313    query: Q,
314    qcx: Q::Qcx,
315    span: Span,
316    key: Q::Key,
317    latch: QueryLatch<'tcx>,
318    current: Option<QueryJobId>,
319) -> (Q::Value, Option<DepNodeIndex>)
320where
321    Q: QueryDispatcher<'tcx>,
322{
323    // For parallel queries, we'll block and wait until the query running
324    // in another thread has completed. Record how long we wait in the
325    // self-profiler.
326    let query_blocked_prof_timer = qcx.dep_context().profiler().query_blocked();
327
328    // With parallel queries we might just have to wait on some other
329    // thread.
330    let result = latch.wait_on(qcx, current, span);
331
332    match result {
333        Ok(()) => {
334            let Some((v, index)) = query.query_cache(qcx).lookup(&key) else {
335                outline(|| {
336                    // We didn't find the query result in the query cache. Check if it was
337                    // poisoned due to a panic instead.
338                    let key_hash = sharded::make_hash(&key);
339                    let shard = query.query_state(qcx).active.lock_shard_by_hash(key_hash);
340                    match shard.find(key_hash, equivalent_key(&key)) {
341                        // The query we waited on panicked. Continue unwinding here.
342                        Some((_, ActiveKeyStatus::Poisoned)) => FatalError.raise(),
343                        _ => {
    ::core::panicking::panic_fmt(format_args!("query \'{0}\' result must be in the cache or the query must be poisoned after a wait",
            query.name()));
}panic!(
344                            "query '{}' result must be in the cache or the query must be poisoned after a wait",
345                            query.name()
346                        ),
347                    }
348                })
349            };
350
351            qcx.dep_context().profiler().query_cache_hit(index.into());
352            query_blocked_prof_timer.finish_with_query_invocation_id(index.into());
353
354            (v, Some(index))
355        }
356        Err(cycle) => (mk_cycle(query, qcx, cycle.lift()), None),
357    }
358}
359
360#[inline(never)]
361fn try_execute_query<'tcx, Q, const INCR: bool>(
362    query: Q,
363    qcx: Q::Qcx,
364    span: Span,
365    key: Q::Key,
366    dep_node: Option<DepNode>,
367) -> (Q::Value, Option<DepNodeIndex>)
368where
369    Q: QueryDispatcher<'tcx>,
370{
371    let state = query.query_state(qcx);
372    let key_hash = sharded::make_hash(&key);
373    let mut state_lock = state.active.lock_shard_by_hash(key_hash);
374
375    // For the parallel compiler we need to check both the query cache and query state structures
376    // while holding the state lock to ensure that 1) the query has not yet completed and 2) the
377    // query is not still executing. Without checking the query cache here, we can end up
378    // re-executing the query since `try_start` only checks that the query is not currently
379    // executing, but another thread may have already completed the query and stores it result
380    // in the query cache.
381    if qcx.dep_context().sess().threads() > 1 {
382        if let Some((value, index)) = query.query_cache(qcx).lookup(&key) {
383            qcx.dep_context().profiler().query_cache_hit(index.into());
384            return (value, Some(index));
385        }
386    }
387
388    let current_job_id = qcx.current_query_job();
389
390    match state_lock.entry(key_hash, equivalent_key(&key), |(k, _)| sharded::make_hash(k)) {
391        Entry::Vacant(entry) => {
392            // Nothing has computed or is computing the query, so we start a new job and insert it in the
393            // state map.
394            let id = qcx.next_job_id();
395            let job = QueryJob::new(id, span, current_job_id);
396            entry.insert((key, ActiveKeyStatus::Started(job)));
397
398            // Drop the lock before we start executing the query
399            drop(state_lock);
400
401            execute_job::<Q, INCR>(query, qcx, state, key, key_hash, id, dep_node)
402        }
403        Entry::Occupied(mut entry) => {
404            match &mut entry.get_mut().1 {
405                ActiveKeyStatus::Started(job) => {
406                    if sync::is_dyn_thread_safe() {
407                        // Get the latch out
408                        let latch = job.latch();
409                        drop(state_lock);
410
411                        // Only call `wait_for_query` if we're using a Rayon thread pool
412                        // as it will attempt to mark the worker thread as blocked.
413                        return wait_for_query(query, qcx, span, key, latch, current_job_id);
414                    }
415
416                    let id = job.id;
417                    drop(state_lock);
418
419                    // If we are single-threaded we know that we have cycle error,
420                    // so we just return the error.
421                    cycle_error(query, qcx, id, span)
422                }
423                ActiveKeyStatus::Poisoned => FatalError.raise(),
424            }
425        }
426    }
427}
428
429#[inline(always)]
430fn execute_job<'tcx, Q, const INCR: bool>(
431    query: Q,
432    qcx: Q::Qcx,
433    state: &'tcx QueryState<'tcx, Q::Key>,
434    key: Q::Key,
435    key_hash: u64,
436    id: QueryJobId,
437    dep_node: Option<DepNode>,
438) -> (Q::Value, Option<DepNodeIndex>)
439where
440    Q: QueryDispatcher<'tcx>,
441{
442    // Use `JobOwner` so the query will be poisoned if executing it panics.
443    let job_owner = JobOwner { state, key };
444
445    if true {
    match (&qcx.dep_context().dep_graph().is_fully_enabled(), &INCR) {
        (left_val, right_val) => {
            if !(*left_val == *right_val) {
                let kind = ::core::panicking::AssertKind::Eq;
                ::core::panicking::assert_failed(kind, &*left_val,
                    &*right_val, ::core::option::Option::None);
            }
        }
    };
};debug_assert_eq!(qcx.dep_context().dep_graph().is_fully_enabled(), INCR);
446
447    let (result, dep_node_index) = if INCR {
448        execute_job_incr(
449            query,
450            qcx,
451            qcx.dep_context().dep_graph().data().unwrap(),
452            key,
453            dep_node,
454            id,
455        )
456    } else {
457        execute_job_non_incr(query, qcx, key, id)
458    };
459
460    let cache = query.query_cache(qcx);
461    if query.feedable() {
462        // We should not compute queries that also got a value via feeding.
463        // This can't happen, as query feeding adds the very dependencies to the fed query
464        // as its feeding query had. So if the fed query is red, so is its feeder, which will
465        // get evaluated first, and re-feed the query.
466        if let Some((cached_result, _)) = cache.lookup(&key) {
467            let Some(hasher) = query.hash_result() else {
468                {
    ::core::panicking::panic_fmt(format_args!("no_hash fed query later has its value computed.\nRemove `no_hash` modifier to allow recomputation.\nThe already cached value: {0}",
            (query.format_value())(&cached_result)));
};panic!(
469                    "no_hash fed query later has its value computed.\n\
470                    Remove `no_hash` modifier to allow recomputation.\n\
471                    The already cached value: {}",
472                    (query.format_value())(&cached_result)
473                );
474            };
475
476            let (old_hash, new_hash) = qcx.dep_context().with_stable_hashing_context(|mut hcx| {
477                (hasher(&mut hcx, &cached_result), hasher(&mut hcx, &result))
478            });
479            let formatter = query.format_value();
480            if old_hash != new_hash {
481                // We have an inconsistency. This can happen if one of the two
482                // results is tainted by errors.
483                if !qcx.dep_context().sess().dcx().has_errors().is_some() {
    {
        ::core::panicking::panic_fmt(format_args!("Computed query value for {0:?}({1:?}) is inconsistent with fed value,\ncomputed={2:#?}\nfed={3:#?}",
                query.dep_kind(), key, formatter(&result),
                formatter(&cached_result)));
    }
};assert!(
484                    qcx.dep_context().sess().dcx().has_errors().is_some(),
485                    "Computed query value for {:?}({:?}) is inconsistent with fed value,\n\
486                        computed={:#?}\nfed={:#?}",
487                    query.dep_kind(),
488                    key,
489                    formatter(&result),
490                    formatter(&cached_result),
491                );
492            }
493        }
494    }
495    job_owner.complete(cache, key_hash, result, dep_node_index);
496
497    (result, Some(dep_node_index))
498}
499
500// Fast path for when incr. comp. is off.
501#[inline(always)]
502fn execute_job_non_incr<'tcx, Q>(
503    query: Q,
504    qcx: Q::Qcx,
505    key: Q::Key,
506    job_id: QueryJobId,
507) -> (Q::Value, DepNodeIndex)
508where
509    Q: QueryDispatcher<'tcx>,
510{
511    if true {
    if !!qcx.dep_context().dep_graph().is_fully_enabled() {
        ::core::panicking::panic("assertion failed: !qcx.dep_context().dep_graph().is_fully_enabled()")
    };
};debug_assert!(!qcx.dep_context().dep_graph().is_fully_enabled());
512
513    // Fingerprint the key, just to assert that it doesn't
514    // have anything we don't consider hashable
515    if truecfg!(debug_assertions) {
516        let _ = key.to_fingerprint(*qcx.dep_context());
517    }
518
519    let prof_timer = qcx.dep_context().profiler().query_provider();
520    let result = qcx.start_query(job_id, query.depth_limit(), || query.compute(qcx, key));
521    let dep_node_index = qcx.dep_context().dep_graph().next_virtual_depnode_index();
522    prof_timer.finish_with_query_invocation_id(dep_node_index.into());
523
524    // Similarly, fingerprint the result to assert that
525    // it doesn't have anything not considered hashable.
526    if truecfg!(debug_assertions)
527        && let Some(hash_result) = query.hash_result()
528    {
529        qcx.dep_context().with_stable_hashing_context(|mut hcx| {
530            hash_result(&mut hcx, &result);
531        });
532    }
533
534    (result, dep_node_index)
535}
536
537#[inline(always)]
538fn execute_job_incr<'tcx, Q>(
539    query: Q,
540    qcx: Q::Qcx,
541    dep_graph_data: &DepGraphData<<Q::Qcx as HasDepContext>::Deps>,
542    key: Q::Key,
543    mut dep_node_opt: Option<DepNode>,
544    job_id: QueryJobId,
545) -> (Q::Value, DepNodeIndex)
546where
547    Q: QueryDispatcher<'tcx>,
548{
549    if !query.anon() && !query.eval_always() {
550        // `to_dep_node` is expensive for some `DepKind`s.
551        let dep_node =
552            dep_node_opt.get_or_insert_with(|| query.construct_dep_node(*qcx.dep_context(), &key));
553
554        // The diagnostics for this query will be promoted to the current session during
555        // `try_mark_green()`, so we can ignore them here.
556        if let Some(ret) = qcx.start_query(job_id, false, || {
557            try_load_from_disk_and_cache_in_memory(query, dep_graph_data, qcx, &key, dep_node)
558        }) {
559            return ret;
560        }
561    }
562
563    let prof_timer = qcx.dep_context().profiler().query_provider();
564
565    let (result, dep_node_index) = qcx.start_query(job_id, query.depth_limit(), || {
566        if query.anon() {
567            return dep_graph_data.with_anon_task_inner(
568                *qcx.dep_context(),
569                query.dep_kind(),
570                || query.compute(qcx, key),
571            );
572        }
573
574        // `to_dep_node` is expensive for some `DepKind`s.
575        let dep_node =
576            dep_node_opt.unwrap_or_else(|| query.construct_dep_node(*qcx.dep_context(), &key));
577
578        dep_graph_data.with_task(
579            dep_node,
580            (qcx, query),
581            key,
582            |(qcx, query), key| query.compute(qcx, key),
583            query.hash_result(),
584        )
585    });
586
587    prof_timer.finish_with_query_invocation_id(dep_node_index.into());
588
589    (result, dep_node_index)
590}
591
592#[inline(always)]
593fn try_load_from_disk_and_cache_in_memory<'tcx, Q>(
594    query: Q,
595    dep_graph_data: &DepGraphData<<Q::Qcx as HasDepContext>::Deps>,
596    qcx: Q::Qcx,
597    key: &Q::Key,
598    dep_node: &DepNode,
599) -> Option<(Q::Value, DepNodeIndex)>
600where
601    Q: QueryDispatcher<'tcx>,
602{
603    // Note this function can be called concurrently from the same query
604    // We must ensure that this is handled correctly.
605
606    let (prev_dep_node_index, dep_node_index) = dep_graph_data.try_mark_green(qcx, dep_node)?;
607
608    if true {
    if !dep_graph_data.is_index_green(prev_dep_node_index) {
        ::core::panicking::panic("assertion failed: dep_graph_data.is_index_green(prev_dep_node_index)")
    };
};debug_assert!(dep_graph_data.is_index_green(prev_dep_node_index));
609
610    // First we try to load the result from the on-disk cache.
611    // Some things are never cached on disk.
612    if let Some(result) = query.try_load_from_disk(qcx, key, prev_dep_node_index, dep_node_index) {
613        if std::intrinsics::unlikely(qcx.dep_context().sess().opts.unstable_opts.query_dep_graph) {
614            dep_graph_data.mark_debug_loaded_from_disk(*dep_node)
615        }
616
617        let prev_fingerprint = dep_graph_data.prev_fingerprint_of(prev_dep_node_index);
618        // If `-Zincremental-verify-ich` is specified, re-hash results from
619        // the cache and make sure that they have the expected fingerprint.
620        //
621        // If not, we still seek to verify a subset of fingerprints loaded
622        // from disk. Re-hashing results is fairly expensive, so we can't
623        // currently afford to verify every hash. This subset should still
624        // give us some coverage of potential bugs though.
625        let try_verify = prev_fingerprint.split().1.as_u64().is_multiple_of(32);
626        if std::intrinsics::unlikely(
627            try_verify || qcx.dep_context().sess().opts.unstable_opts.incremental_verify_ich,
628        ) {
629            incremental_verify_ich(
630                *qcx.dep_context(),
631                dep_graph_data,
632                &result,
633                prev_dep_node_index,
634                query.hash_result(),
635                query.format_value(),
636            );
637        }
638
639        return Some((result, dep_node_index));
640    }
641
642    // We always expect to find a cached result for things that
643    // can be forced from `DepNode`.
644    if true {
    if !(!query.will_cache_on_disk_for_key(*qcx.dep_context(), key) ||
                !qcx.dep_context().fingerprint_style(dep_node.kind).reconstructible())
        {
        {
            ::core::panicking::panic_fmt(format_args!("missing on-disk cache entry for {0:?}",
                    dep_node));
        }
    };
};debug_assert!(
645        !query.will_cache_on_disk_for_key(*qcx.dep_context(), key)
646            || !qcx.dep_context().fingerprint_style(dep_node.kind).reconstructible(),
647        "missing on-disk cache entry for {dep_node:?}"
648    );
649
650    // Sanity check for the logic in `ensure`: if the node is green and the result loadable,
651    // we should actually be able to load it.
652    if true {
    if !!query.is_loadable_from_disk(qcx, key, prev_dep_node_index) {
        {
            ::core::panicking::panic_fmt(format_args!("missing on-disk cache entry for loadable {0:?}",
                    dep_node));
        }
    };
};debug_assert!(
653        !query.is_loadable_from_disk(qcx, key, prev_dep_node_index),
654        "missing on-disk cache entry for loadable {dep_node:?}"
655    );
656
657    // We could not load a result from the on-disk cache, so
658    // recompute.
659    let prof_timer = qcx.dep_context().profiler().query_provider();
660
661    // The dep-graph for this computation is already in-place.
662    let result = qcx.dep_context().dep_graph().with_ignore(|| query.compute(qcx, *key));
663
664    prof_timer.finish_with_query_invocation_id(dep_node_index.into());
665
666    // Verify that re-running the query produced a result with the expected hash
667    // This catches bugs in query implementations, turning them into ICEs.
668    // For example, a query might sort its result by `DefId` - since `DefId`s are
669    // not stable across compilation sessions, the result could get up getting sorted
670    // in a different order when the query is re-run, even though all of the inputs
671    // (e.g. `DefPathHash` values) were green.
672    //
673    // See issue #82920 for an example of a miscompilation that would get turned into
674    // an ICE by this check
675    incremental_verify_ich(
676        *qcx.dep_context(),
677        dep_graph_data,
678        &result,
679        prev_dep_node_index,
680        query.hash_result(),
681        query.format_value(),
682    );
683
684    Some((result, dep_node_index))
685}
686
687#[inline]
688#[allow(clippy :: suspicious_else_formatting)]
{
    let __tracing_attr_span;
    let __tracing_attr_guard;
    if ::tracing::Level::DEBUG <= ::tracing::level_filters::STATIC_MAX_LEVEL
                &&
                ::tracing::Level::DEBUG <=
                    ::tracing::level_filters::LevelFilter::current() ||
            { false } {
        __tracing_attr_span =
            {
                use ::tracing::__macro_support::Callsite as _;
                static __CALLSITE: ::tracing::callsite::DefaultCallsite =
                    {
                        static META: ::tracing::Metadata<'static> =
                            {
                                ::tracing_core::metadata::Metadata::new("incremental_verify_ich",
                                    "rustc_query_system::query::plumbing",
                                    ::tracing::Level::DEBUG,
                                    ::tracing_core::__macro_support::Option::Some("compiler/rustc_query_system/src/query/plumbing.rs"),
                                    ::tracing_core::__macro_support::Option::Some(688u32),
                                    ::tracing_core::__macro_support::Option::Some("rustc_query_system::query::plumbing"),
                                    ::tracing_core::field::FieldSet::new(&["prev_index"],
                                        ::tracing_core::callsite::Identifier(&__CALLSITE)),
                                    ::tracing::metadata::Kind::SPAN)
                            };
                        ::tracing::callsite::DefaultCallsite::new(&META)
                    };
                let mut interest = ::tracing::subscriber::Interest::never();
                if ::tracing::Level::DEBUG <=
                                    ::tracing::level_filters::STATIC_MAX_LEVEL &&
                                ::tracing::Level::DEBUG <=
                                    ::tracing::level_filters::LevelFilter::current() &&
                            { interest = __CALLSITE.interest(); !interest.is_never() }
                        &&
                        ::tracing::__macro_support::__is_enabled(__CALLSITE.metadata(),
                            interest) {
                    let meta = __CALLSITE.metadata();
                    ::tracing::Span::new(meta,
                        &{
                                #[allow(unused_imports)]
                                use ::tracing::field::{debug, display, Value};
                                let mut iter = meta.fields().iter();
                                meta.fields().value_set(&[(&::tracing::__macro_support::Iterator::next(&mut iter).expect("FieldSet corrupted (this is a bug)"),
                                                    ::tracing::__macro_support::Option::Some(&::tracing::field::debug(&prev_index)
                                                            as &dyn Value))])
                            })
                } else {
                    let span =
                        ::tracing::__macro_support::__disabled_span(__CALLSITE.metadata());
                    {};
                    span
                }
            };
        __tracing_attr_guard = __tracing_attr_span.enter();
    }

    #[warn(clippy :: suspicious_else_formatting)]
    {

        #[allow(unknown_lints, unreachable_code, clippy ::
        diverging_sub_expression, clippy :: empty_loop, clippy ::
        let_unit_value, clippy :: let_with_type_underscore, clippy ::
        needless_return, clippy :: unreachable)]
        if false {
            let __tracing_attr_fake_return: () = loop {};
            return __tracing_attr_fake_return;
        }
        {
            if !dep_graph_data.is_index_green(prev_index) {
                incremental_verify_ich_not_green(tcx, prev_index)
            }
            let new_hash =
                hash_result.map_or(Fingerprint::ZERO,
                    |f|
                        {
                            tcx.with_stable_hashing_context(|mut hcx|
                                    f(&mut hcx, result))
                        });
            let old_hash = dep_graph_data.prev_fingerprint_of(prev_index);
            if new_hash != old_hash {
                incremental_verify_ich_failed(tcx, prev_index,
                    &|| format_value(result));
            }
        }
    }
}#[instrument(skip(tcx, dep_graph_data, result, hash_result, format_value), level = "debug")]
689pub(crate) fn incremental_verify_ich<Tcx, V>(
690    tcx: Tcx,
691    dep_graph_data: &DepGraphData<Tcx::Deps>,
692    result: &V,
693    prev_index: SerializedDepNodeIndex,
694    hash_result: Option<fn(&mut StableHashingContext<'_>, &V) -> Fingerprint>,
695    format_value: fn(&V) -> String,
696) where
697    Tcx: DepContext,
698{
699    if !dep_graph_data.is_index_green(prev_index) {
700        incremental_verify_ich_not_green(tcx, prev_index)
701    }
702
703    let new_hash = hash_result.map_or(Fingerprint::ZERO, |f| {
704        tcx.with_stable_hashing_context(|mut hcx| f(&mut hcx, result))
705    });
706
707    let old_hash = dep_graph_data.prev_fingerprint_of(prev_index);
708
709    if new_hash != old_hash {
710        incremental_verify_ich_failed(tcx, prev_index, &|| format_value(result));
711    }
712}
713
714#[cold]
715#[inline(never)]
716fn incremental_verify_ich_not_green<Tcx>(tcx: Tcx, prev_index: SerializedDepNodeIndex)
717where
718    Tcx: DepContext,
719{
720    {
    ::core::panicking::panic_fmt(format_args!("fingerprint for green query instance not loaded from cache: {0:?}",
            tcx.dep_graph().data().unwrap().prev_node_of(prev_index)));
}panic!(
721        "fingerprint for green query instance not loaded from cache: {:?}",
722        tcx.dep_graph().data().unwrap().prev_node_of(prev_index)
723    )
724}
725
726// Note that this is marked #[cold] and intentionally takes `dyn Debug` for `result`,
727// as we want to avoid generating a bunch of different implementations for LLVM to
728// chew on (and filling up the final binary, too).
729#[cold]
730#[inline(never)]
731fn incremental_verify_ich_failed<Tcx>(
732    tcx: Tcx,
733    prev_index: SerializedDepNodeIndex,
734    result: &dyn Fn() -> String,
735) where
736    Tcx: DepContext,
737{
738    // When we emit an error message and panic, we try to debug-print the `DepNode`
739    // and query result. Unfortunately, this can cause us to run additional queries,
740    // which may result in another fingerprint mismatch while we're in the middle
741    // of processing this one. To avoid a double-panic (which kills the process
742    // before we can print out the query static), we print out a terse
743    // but 'safe' message if we detect a reentrant call to this method.
744    const INSIDE_VERIFY_PANIC: ::std::thread::LocalKey<Cell<bool>> =
    {
        const __RUST_STD_INTERNAL_INIT: Cell<bool> = { Cell::new(false) };
        unsafe {
            ::std::thread::LocalKey::new(const {
                        if ::std::mem::needs_drop::<Cell<bool>>() {
                            |_|
                                {
                                    #[thread_local]
                                    static __RUST_STD_INTERNAL_VAL:
                                        ::std::thread::local_impl::EagerStorage<Cell<bool>> =
                                        ::std::thread::local_impl::EagerStorage::new(__RUST_STD_INTERNAL_INIT);
                                    __RUST_STD_INTERNAL_VAL.get()
                                }
                        } else {
                            |_|
                                {
                                    #[thread_local]
                                    static __RUST_STD_INTERNAL_VAL: Cell<bool> =
                                        __RUST_STD_INTERNAL_INIT;
                                    &__RUST_STD_INTERNAL_VAL
                                }
                        }
                    })
        }
    };thread_local! {
745        static INSIDE_VERIFY_PANIC: Cell<bool> = const { Cell::new(false) };
746    };
747
748    let old_in_panic = INSIDE_VERIFY_PANIC.replace(true);
749
750    if old_in_panic {
751        tcx.sess().dcx().emit_err(crate::error::Reentrant);
752    } else {
753        let run_cmd = if let Some(crate_name) = &tcx.sess().opts.crate_name {
754            ::alloc::__export::must_use({
        ::alloc::fmt::format(format_args!("`cargo clean -p {0}` or `cargo clean`",
                crate_name))
    })format!("`cargo clean -p {crate_name}` or `cargo clean`")
755        } else {
756            "`cargo clean`".to_string()
757        };
758
759        let dep_node = tcx.dep_graph().data().unwrap().prev_node_of(prev_index);
760        tcx.sess().dcx().emit_err(crate::error::IncrementCompilation {
761            run_cmd,
762            dep_node: ::alloc::__export::must_use({
        ::alloc::fmt::format(format_args!("{0:?}", dep_node))
    })format!("{dep_node:?}"),
763        });
764        {
    ::core::panicking::panic_fmt(format_args!("Found unstable fingerprints for {1:?}: {0}",
            result(), dep_node));
};panic!("Found unstable fingerprints for {dep_node:?}: {}", result());
765    }
766
767    INSIDE_VERIFY_PANIC.set(old_in_panic);
768}
769
770/// Ensure that either this query has all green inputs or been executed.
771/// Executing `query::ensure(D)` is considered a read of the dep-node `D`.
772/// Returns true if the query should still run.
773///
774/// This function is particularly useful when executing passes for their
775/// side-effects -- e.g., in order to report errors for erroneous programs.
776///
777/// Note: The optimization is only available during incr. comp.
778#[inline(never)]
779fn ensure_must_run<'tcx, Q>(
780    query: Q,
781    qcx: Q::Qcx,
782    key: &Q::Key,
783    check_cache: bool,
784) -> (bool, Option<DepNode>)
785where
786    Q: QueryDispatcher<'tcx>,
787{
788    if query.eval_always() {
789        return (true, None);
790    }
791
792    // Ensuring an anonymous query makes no sense
793    if !!query.anon() {
    ::core::panicking::panic("assertion failed: !query.anon()")
};assert!(!query.anon());
794
795    let dep_node = query.construct_dep_node(*qcx.dep_context(), key);
796
797    let dep_graph = qcx.dep_context().dep_graph();
798    let serialized_dep_node_index = match dep_graph.try_mark_green(qcx, &dep_node) {
799        None => {
800            // A None return from `try_mark_green` means that this is either
801            // a new dep node or that the dep node has already been marked red.
802            // Either way, we can't call `dep_graph.read()` as we don't have the
803            // DepNodeIndex. We must invoke the query itself. The performance cost
804            // this introduces should be negligible as we'll immediately hit the
805            // in-memory cache, or another query down the line will.
806            return (true, Some(dep_node));
807        }
808        Some((serialized_dep_node_index, dep_node_index)) => {
809            dep_graph.read_index(dep_node_index);
810            qcx.dep_context().profiler().query_cache_hit(dep_node_index.into());
811            serialized_dep_node_index
812        }
813    };
814
815    // We do not need the value at all, so do not check the cache.
816    if !check_cache {
817        return (false, None);
818    }
819
820    let loadable = query.is_loadable_from_disk(qcx, key, serialized_dep_node_index);
821    (!loadable, Some(dep_node))
822}
823
824#[derive(#[automatically_derived]
impl ::core::fmt::Debug for QueryMode {
    #[inline]
    fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
        match self {
            QueryMode::Get => ::core::fmt::Formatter::write_str(f, "Get"),
            QueryMode::Ensure { check_cache: __self_0 } =>
                ::core::fmt::Formatter::debug_struct_field1_finish(f,
                    "Ensure", "check_cache", &__self_0),
        }
    }
}Debug)]
825pub enum QueryMode {
826    Get,
827    Ensure { check_cache: bool },
828}
829
830#[inline(always)]
831pub fn get_query_non_incr<'tcx, Q>(query: Q, qcx: Q::Qcx, span: Span, key: Q::Key) -> Q::Value
832where
833    Q: QueryDispatcher<'tcx>,
834{
835    if true {
    if !!qcx.dep_context().dep_graph().is_fully_enabled() {
        ::core::panicking::panic("assertion failed: !qcx.dep_context().dep_graph().is_fully_enabled()")
    };
};debug_assert!(!qcx.dep_context().dep_graph().is_fully_enabled());
836
837    ensure_sufficient_stack(|| try_execute_query::<Q, false>(query, qcx, span, key, None).0)
838}
839
840#[inline(always)]
841pub fn get_query_incr<'tcx, Q>(
842    query: Q,
843    qcx: Q::Qcx,
844    span: Span,
845    key: Q::Key,
846    mode: QueryMode,
847) -> Option<Q::Value>
848where
849    Q: QueryDispatcher<'tcx>,
850{
851    if true {
    if !qcx.dep_context().dep_graph().is_fully_enabled() {
        ::core::panicking::panic("assertion failed: qcx.dep_context().dep_graph().is_fully_enabled()")
    };
};debug_assert!(qcx.dep_context().dep_graph().is_fully_enabled());
852
853    let dep_node = if let QueryMode::Ensure { check_cache } = mode {
854        let (must_run, dep_node) = ensure_must_run(query, qcx, &key, check_cache);
855        if !must_run {
856            return None;
857        }
858        dep_node
859    } else {
860        None
861    };
862
863    let (result, dep_node_index) =
864        ensure_sufficient_stack(|| try_execute_query::<Q, true>(query, qcx, span, key, dep_node));
865    if let Some(dep_node_index) = dep_node_index {
866        qcx.dep_context().dep_graph().read_index(dep_node_index)
867    }
868    Some(result)
869}
870
871pub fn force_query<'tcx, Q>(query: Q, qcx: Q::Qcx, key: Q::Key, dep_node: DepNode)
872where
873    Q: QueryDispatcher<'tcx>,
874{
875    // We may be concurrently trying both execute and force a query.
876    // Ensure that only one of them runs the query.
877    if let Some((_, index)) = query.query_cache(qcx).lookup(&key) {
878        qcx.dep_context().profiler().query_cache_hit(index.into());
879        return;
880    }
881
882    if true {
    if !!query.anon() {
        ::core::panicking::panic("assertion failed: !query.anon()")
    };
};debug_assert!(!query.anon());
883
884    ensure_sufficient_stack(|| {
885        try_execute_query::<Q, true>(query, qcx, DUMMY_SP, key, Some(dep_node))
886    });
887}