rustc_query_system/dep_graph/
graph.rs

1use std::assert_matches::assert_matches;
2use std::fmt::Debug;
3use std::hash::Hash;
4use std::marker::PhantomData;
5use std::sync::Arc;
6use std::sync::atomic::{AtomicU32, Ordering};
7
8use rustc_data_structures::fingerprint::{Fingerprint, PackedFingerprint};
9use rustc_data_structures::fx::{FxHashMap, FxHashSet};
10use rustc_data_structures::outline;
11use rustc_data_structures::profiling::QueryInvocationId;
12use rustc_data_structures::sharded::{self, ShardedHashMap};
13use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
14use rustc_data_structures::sync::{AtomicU64, Lock};
15use rustc_data_structures::unord::UnordMap;
16use rustc_errors::DiagInner;
17use rustc_index::IndexVec;
18use rustc_macros::{Decodable, Encodable};
19use rustc_serialize::opaque::{FileEncodeResult, FileEncoder};
20use rustc_session::Session;
21use tracing::{debug, instrument};
22#[cfg(debug_assertions)]
23use {super::debug::EdgeFilter, std::env};
24
25use super::query::DepGraphQuery;
26use super::serialized::{GraphEncoder, SerializedDepGraph, SerializedDepNodeIndex};
27use super::{DepContext, DepKind, DepNode, Deps, HasDepContext, WorkProductId};
28use crate::dep_graph::edges::EdgesVec;
29use crate::ich::StableHashingContext;
30use crate::query::{QueryContext, QuerySideEffect};
31
32#[derive(Clone)]
33pub struct DepGraph<D: Deps> {
34    data: Option<Arc<DepGraphData<D>>>,
35
36    /// This field is used for assigning DepNodeIndices when running in
37    /// non-incremental mode. Even in non-incremental mode we make sure that
38    /// each task has a `DepNodeIndex` that uniquely identifies it. This unique
39    /// ID is used for self-profiling.
40    virtual_dep_node_index: Arc<AtomicU32>,
41}
42
43rustc_index::newtype_index! {
44    pub struct DepNodeIndex {}
45}
46
47// We store a large collection of these in `prev_index_to_index` during
48// non-full incremental builds, and want to ensure that the element size
49// doesn't inadvertently increase.
50rustc_data_structures::static_assert_size!(Option<DepNodeIndex>, 4);
51
52impl DepNodeIndex {
53    const SINGLETON_DEPENDENCYLESS_ANON_NODE: DepNodeIndex = DepNodeIndex::ZERO;
54    pub const FOREVER_RED_NODE: DepNodeIndex = DepNodeIndex::from_u32(1);
55}
56
57impl From<DepNodeIndex> for QueryInvocationId {
58    #[inline(always)]
59    fn from(dep_node_index: DepNodeIndex) -> Self {
60        QueryInvocationId(dep_node_index.as_u32())
61    }
62}
63
64pub struct MarkFrame<'a> {
65    index: SerializedDepNodeIndex,
66    parent: Option<&'a MarkFrame<'a>>,
67}
68
69pub(super) enum DepNodeColor {
70    Red,
71    Green(DepNodeIndex),
72}
73
74impl DepNodeColor {
75    #[inline]
76    fn is_green(self) -> bool {
77        match self {
78            DepNodeColor::Red => false,
79            DepNodeColor::Green(_) => true,
80        }
81    }
82}
83
84pub(crate) struct DepGraphData<D: Deps> {
85    /// The new encoding of the dependency graph, optimized for red/green
86    /// tracking. The `current` field is the dependency graph of only the
87    /// current compilation session: We don't merge the previous dep-graph into
88    /// current one anymore, but we do reference shared data to save space.
89    current: CurrentDepGraph<D>,
90
91    /// The dep-graph from the previous compilation session. It contains all
92    /// nodes and edges as well as all fingerprints of nodes that have them.
93    previous: Arc<SerializedDepGraph>,
94
95    colors: DepNodeColorMap,
96
97    /// When we load, there may be `.o` files, cached MIR, or other such
98    /// things available to us. If we find that they are not dirty, we
99    /// load the path to the file storing those work-products here into
100    /// this map. We can later look for and extract that data.
101    previous_work_products: WorkProductMap,
102
103    dep_node_debug: Lock<FxHashMap<DepNode, String>>,
104
105    /// Used by incremental compilation tests to assert that
106    /// a particular query result was decoded from disk
107    /// (not just marked green)
108    debug_loaded_from_disk: Lock<FxHashSet<DepNode>>,
109}
110
111pub fn hash_result<R>(hcx: &mut StableHashingContext<'_>, result: &R) -> Fingerprint
112where
113    R: for<'a> HashStable<StableHashingContext<'a>>,
114{
115    let mut stable_hasher = StableHasher::new();
116    result.hash_stable(hcx, &mut stable_hasher);
117    stable_hasher.finish()
118}
119
120impl<D: Deps> DepGraph<D> {
121    pub fn new(
122        session: &Session,
123        prev_graph: Arc<SerializedDepGraph>,
124        prev_work_products: WorkProductMap,
125        encoder: FileEncoder,
126        record_graph: bool,
127        record_stats: bool,
128    ) -> DepGraph<D> {
129        let prev_graph_node_count = prev_graph.node_count();
130
131        let current = CurrentDepGraph::new(
132            session,
133            prev_graph_node_count,
134            encoder,
135            record_graph,
136            record_stats,
137            Arc::clone(&prev_graph),
138        );
139
140        let colors = DepNodeColorMap::new(prev_graph_node_count);
141
142        // Instantiate a dependy-less node only once for anonymous queries.
143        let _green_node_index = current.alloc_node(
144            DepNode { kind: D::DEP_KIND_NULL, hash: current.anon_id_seed.into() },
145            EdgesVec::new(),
146            Fingerprint::ZERO,
147        );
148        assert_eq!(_green_node_index, DepNodeIndex::SINGLETON_DEPENDENCYLESS_ANON_NODE);
149
150        // Instantiate a dependy-less red node only once for anonymous queries.
151        let red_node_index = current.alloc_node(
152            DepNode { kind: D::DEP_KIND_RED, hash: Fingerprint::ZERO.into() },
153            EdgesVec::new(),
154            Fingerprint::ZERO,
155        );
156        assert_eq!(red_node_index, DepNodeIndex::FOREVER_RED_NODE);
157        if prev_graph_node_count > 0 {
158            colors.insert(
159                SerializedDepNodeIndex::from_u32(DepNodeIndex::FOREVER_RED_NODE.as_u32()),
160                DepNodeColor::Red,
161            );
162        }
163
164        DepGraph {
165            data: Some(Arc::new(DepGraphData {
166                previous_work_products: prev_work_products,
167                dep_node_debug: Default::default(),
168                current,
169                previous: prev_graph,
170                colors,
171                debug_loaded_from_disk: Default::default(),
172            })),
173            virtual_dep_node_index: Arc::new(AtomicU32::new(0)),
174        }
175    }
176
177    pub fn new_disabled() -> DepGraph<D> {
178        DepGraph { data: None, virtual_dep_node_index: Arc::new(AtomicU32::new(0)) }
179    }
180
181    #[inline]
182    pub(crate) fn data(&self) -> Option<&DepGraphData<D>> {
183        self.data.as_deref()
184    }
185
186    /// Returns `true` if we are actually building the full dep-graph, and `false` otherwise.
187    #[inline]
188    pub fn is_fully_enabled(&self) -> bool {
189        self.data.is_some()
190    }
191
192    pub fn with_query(&self, f: impl Fn(&DepGraphQuery)) {
193        if let Some(data) = &self.data {
194            data.current.encoder.with_query(f)
195        }
196    }
197
198    pub fn assert_ignored(&self) {
199        if let Some(..) = self.data {
200            D::read_deps(|task_deps| {
201                assert_matches!(
202                    task_deps,
203                    TaskDepsRef::Ignore,
204                    "expected no task dependency tracking"
205                );
206            })
207        }
208    }
209
210    pub fn with_ignore<OP, R>(&self, op: OP) -> R
211    where
212        OP: FnOnce() -> R,
213    {
214        D::with_deps(TaskDepsRef::Ignore, op)
215    }
216
217    /// Used to wrap the deserialization of a query result from disk,
218    /// This method enforces that no new `DepNodes` are created during
219    /// query result deserialization.
220    ///
221    /// Enforcing this makes the query dep graph simpler - all nodes
222    /// must be created during the query execution, and should be
223    /// created from inside the 'body' of a query (the implementation
224    /// provided by a particular compiler crate).
225    ///
226    /// Consider the case of three queries `A`, `B`, and `C`, where
227    /// `A` invokes `B` and `B` invokes `C`:
228    ///
229    /// `A -> B -> C`
230    ///
231    /// Suppose that decoding the result of query `B` required re-computing
232    /// the query `C`. If we did not create a fresh `TaskDeps` when
233    /// decoding `B`, we would still be using the `TaskDeps` for query `A`
234    /// (if we needed to re-execute `A`). This would cause us to create
235    /// a new edge `A -> C`. If this edge did not previously
236    /// exist in the `DepGraph`, then we could end up with a different
237    /// `DepGraph` at the end of compilation, even if there were no
238    /// meaningful changes to the overall program (e.g. a newline was added).
239    /// In addition, this edge might cause a subsequent compilation run
240    /// to try to force `C` before marking other necessary nodes green. If
241    /// `C` did not exist in the new compilation session, then we could
242    /// get an ICE. Normally, we would have tried (and failed) to mark
243    /// some other query green (e.g. `item_children`) which was used
244    /// to obtain `C`, which would prevent us from ever trying to force
245    /// a nonexistent `D`.
246    ///
247    /// It might be possible to enforce that all `DepNode`s read during
248    /// deserialization already exist in the previous `DepGraph`. In
249    /// the above example, we would invoke `D` during the deserialization
250    /// of `B`. Since we correctly create a new `TaskDeps` from the decoding
251    /// of `B`, this would result in an edge `B -> D`. If that edge already
252    /// existed (with the same `DepPathHash`es), then it should be correct
253    /// to allow the invocation of the query to proceed during deserialization
254    /// of a query result. We would merely assert that the dep-graph fragment
255    /// that would have been added by invoking `C` while decoding `B`
256    /// is equivalent to the dep-graph fragment that we already instantiated for B
257    /// (at the point where we successfully marked B as green).
258    ///
259    /// However, this would require additional complexity
260    /// in the query infrastructure, and is not currently needed by the
261    /// decoding of any query results. Should the need arise in the future,
262    /// we should consider extending the query system with this functionality.
263    pub fn with_query_deserialization<OP, R>(&self, op: OP) -> R
264    where
265        OP: FnOnce() -> R,
266    {
267        D::with_deps(TaskDepsRef::Forbid, op)
268    }
269
270    #[inline(always)]
271    pub fn with_task<Ctxt: HasDepContext<Deps = D>, A: Debug, R>(
272        &self,
273        key: DepNode,
274        cx: Ctxt,
275        arg: A,
276        task: fn(Ctxt, A) -> R,
277        hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
278    ) -> (R, DepNodeIndex) {
279        match self.data() {
280            Some(data) => data.with_task(key, cx, arg, task, hash_result),
281            None => (task(cx, arg), self.next_virtual_depnode_index()),
282        }
283    }
284
285    pub fn with_anon_task<Tcx: DepContext<Deps = D>, OP, R>(
286        &self,
287        cx: Tcx,
288        dep_kind: DepKind,
289        op: OP,
290    ) -> (R, DepNodeIndex)
291    where
292        OP: FnOnce() -> R,
293    {
294        match self.data() {
295            Some(data) => {
296                let (result, index) = data.with_anon_task_inner(cx, dep_kind, op);
297                self.read_index(index);
298                (result, index)
299            }
300            None => (op(), self.next_virtual_depnode_index()),
301        }
302    }
303}
304
305impl<D: Deps> DepGraphData<D> {
306    /// Starts a new dep-graph task. Dep-graph tasks are specified
307    /// using a free function (`task`) and **not** a closure -- this
308    /// is intentional because we want to exercise tight control over
309    /// what state they have access to. In particular, we want to
310    /// prevent implicit 'leaks' of tracked state into the task (which
311    /// could then be read without generating correct edges in the
312    /// dep-graph -- see the [rustc dev guide] for more details on
313    /// the dep-graph). To this end, the task function gets exactly two
314    /// pieces of state: the context `cx` and an argument `arg`. Both
315    /// of these bits of state must be of some type that implements
316    /// `DepGraphSafe` and hence does not leak.
317    ///
318    /// The choice of two arguments is not fundamental. One argument
319    /// would work just as well, since multiple values can be
320    /// collected using tuples. However, using two arguments works out
321    /// to be quite convenient, since it is common to need a context
322    /// (`cx`) and some argument (e.g., a `DefId` identifying what
323    /// item to process).
324    ///
325    /// For cases where you need some other number of arguments:
326    ///
327    /// - If you only need one argument, just use `()` for the `arg`
328    ///   parameter.
329    /// - If you need 3+ arguments, use a tuple for the
330    ///   `arg` parameter.
331    ///
332    /// [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/queries/incremental-compilation.html
333    #[inline(always)]
334    pub(crate) fn with_task<Ctxt: HasDepContext<Deps = D>, A: Debug, R>(
335        &self,
336        key: DepNode,
337        cx: Ctxt,
338        arg: A,
339        task: fn(Ctxt, A) -> R,
340        hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
341    ) -> (R, DepNodeIndex) {
342        // If the following assertion triggers, it can have two reasons:
343        // 1. Something is wrong with DepNode creation, either here or
344        //    in `DepGraph::try_mark_green()`.
345        // 2. Two distinct query keys get mapped to the same `DepNode`
346        //    (see for example #48923).
347        self.assert_dep_node_not_yet_allocated_in_current_session(&key, || {
348            format!(
349                "forcing query with already existing `DepNode`\n\
350                 - query-key: {arg:?}\n\
351                 - dep-node: {key:?}"
352            )
353        });
354
355        let with_deps = |task_deps| D::with_deps(task_deps, || task(cx, arg));
356        let (result, edges) = if cx.dep_context().is_eval_always(key.kind) {
357            (with_deps(TaskDepsRef::EvalAlways), EdgesVec::new())
358        } else {
359            let task_deps = Lock::new(TaskDeps {
360                #[cfg(debug_assertions)]
361                node: Some(key),
362                reads: EdgesVec::new(),
363                read_set: Default::default(),
364                phantom_data: PhantomData,
365            });
366            (with_deps(TaskDepsRef::Allow(&task_deps)), task_deps.into_inner().reads)
367        };
368
369        let dcx = cx.dep_context();
370        let dep_node_index = self.hash_result_and_alloc_node(dcx, key, edges, &result, hash_result);
371
372        (result, dep_node_index)
373    }
374
375    /// Executes something within an "anonymous" task, that is, a task the
376    /// `DepNode` of which is determined by the list of inputs it read from.
377    ///
378    /// NOTE: this does not actually count as a read of the DepNode here.
379    /// Using the result of this task without reading the DepNode will result
380    /// in untracked dependencies which may lead to ICEs as nodes are
381    /// incorrectly marked green.
382    ///
383    /// FIXME: This could perhaps return a `WithDepNode` to ensure that the
384    /// user of this function actually performs the read; we'll have to see
385    /// how to make that work with `anon` in `execute_job_incr`, though.
386    pub(crate) fn with_anon_task_inner<Tcx: DepContext<Deps = D>, OP, R>(
387        &self,
388        cx: Tcx,
389        dep_kind: DepKind,
390        op: OP,
391    ) -> (R, DepNodeIndex)
392    where
393        OP: FnOnce() -> R,
394    {
395        debug_assert!(!cx.is_eval_always(dep_kind));
396
397        let task_deps = Lock::new(TaskDeps::default());
398        let result = D::with_deps(TaskDepsRef::Allow(&task_deps), op);
399        let task_deps = task_deps.into_inner();
400        let task_deps = task_deps.reads;
401
402        let dep_node_index = match task_deps.len() {
403            0 => {
404                // Because the dep-node id of anon nodes is computed from the sets of its
405                // dependencies we already know what the ID of this dependency-less node is
406                // going to be (i.e. equal to the precomputed
407                // `SINGLETON_DEPENDENCYLESS_ANON_NODE`). As a consequence we can skip creating
408                // a `StableHasher` and sending the node through interning.
409                DepNodeIndex::SINGLETON_DEPENDENCYLESS_ANON_NODE
410            }
411            1 => {
412                // When there is only one dependency, don't bother creating a node.
413                task_deps[0]
414            }
415            _ => {
416                // The dep node indices are hashed here instead of hashing the dep nodes of the
417                // dependencies. These indices may refer to different nodes per session, but this isn't
418                // a problem here because we that ensure the final dep node hash is per session only by
419                // combining it with the per session random number `anon_id_seed`. This hash only need
420                // to map the dependencies to a single value on a per session basis.
421                let mut hasher = StableHasher::new();
422                task_deps.hash(&mut hasher);
423
424                let target_dep_node = DepNode {
425                    kind: dep_kind,
426                    // Fingerprint::combine() is faster than sending Fingerprint
427                    // through the StableHasher (at least as long as StableHasher
428                    // is so slow).
429                    hash: self.current.anon_id_seed.combine(hasher.finish()).into(),
430                };
431
432                // The DepNodes generated by the process above are not unique. 2 queries could
433                // have exactly the same dependencies. However, deserialization does not handle
434                // duplicated nodes, so we do the deduplication here directly.
435                //
436                // As anonymous nodes are a small quantity compared to the full dep-graph, the
437                // memory impact of this `anon_node_to_index` map remains tolerable, and helps
438                // us avoid useless growth of the graph with almost-equivalent nodes.
439                self.current.anon_node_to_index.get_or_insert_with(target_dep_node, || {
440                    self.current.alloc_node(target_dep_node, task_deps, Fingerprint::ZERO)
441                })
442            }
443        };
444
445        (result, dep_node_index)
446    }
447
448    /// Intern the new `DepNode` with the dependencies up-to-now.
449    fn hash_result_and_alloc_node<Ctxt: DepContext<Deps = D>, R>(
450        &self,
451        cx: &Ctxt,
452        node: DepNode,
453        edges: EdgesVec,
454        result: &R,
455        hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
456    ) -> DepNodeIndex {
457        let hashing_timer = cx.profiler().incr_result_hashing();
458        let current_fingerprint = hash_result.map(|hash_result| {
459            cx.with_stable_hashing_context(|mut hcx| hash_result(&mut hcx, result))
460        });
461        let dep_node_index = self.alloc_and_color_node(node, edges, current_fingerprint);
462        hashing_timer.finish_with_query_invocation_id(dep_node_index.into());
463        dep_node_index
464    }
465}
466
467impl<D: Deps> DepGraph<D> {
468    #[inline]
469    pub fn read_index(&self, dep_node_index: DepNodeIndex) {
470        if let Some(ref data) = self.data {
471            D::read_deps(|task_deps| {
472                let mut task_deps = match task_deps {
473                    TaskDepsRef::Allow(deps) => deps.lock(),
474                    TaskDepsRef::EvalAlways => {
475                        // We don't need to record dependencies of eval_always
476                        // queries. They are re-evaluated unconditionally anyway.
477                        return;
478                    }
479                    TaskDepsRef::Ignore => return,
480                    TaskDepsRef::Forbid => {
481                        // Reading is forbidden in this context. ICE with a useful error message.
482                        panic_on_forbidden_read(data, dep_node_index)
483                    }
484                };
485                let task_deps = &mut *task_deps;
486
487                if cfg!(debug_assertions) {
488                    data.current.total_read_count.fetch_add(1, Ordering::Relaxed);
489                }
490
491                // As long as we only have a low number of reads we can avoid doing a hash
492                // insert and potentially allocating/reallocating the hashmap
493                let new_read = if task_deps.reads.len() < EdgesVec::INLINE_CAPACITY {
494                    task_deps.reads.iter().all(|other| *other != dep_node_index)
495                } else {
496                    task_deps.read_set.insert(dep_node_index)
497                };
498                if new_read {
499                    task_deps.reads.push(dep_node_index);
500                    if task_deps.reads.len() == EdgesVec::INLINE_CAPACITY {
501                        // Fill `read_set` with what we have so far so we can use the hashset
502                        // next time
503                        task_deps.read_set.extend(task_deps.reads.iter().copied());
504                    }
505
506                    #[cfg(debug_assertions)]
507                    {
508                        if let Some(target) = task_deps.node {
509                            if let Some(ref forbidden_edge) = data.current.forbidden_edge {
510                                let src = forbidden_edge.index_to_node.lock()[&dep_node_index];
511                                if forbidden_edge.test(&src, &target) {
512                                    panic!("forbidden edge {:?} -> {:?} created", src, target)
513                                }
514                            }
515                        }
516                    }
517                } else if cfg!(debug_assertions) {
518                    data.current.total_duplicate_read_count.fetch_add(1, Ordering::Relaxed);
519                }
520            })
521        }
522    }
523
524    /// This encodes a diagnostic by creating a node with an unique index and assoicating
525    /// `diagnostic` with it, for use in the next session.
526    #[inline]
527    pub fn record_diagnostic<Qcx: QueryContext>(&self, qcx: Qcx, diagnostic: &DiagInner) {
528        if let Some(ref data) = self.data {
529            D::read_deps(|task_deps| match task_deps {
530                TaskDepsRef::EvalAlways | TaskDepsRef::Ignore => return,
531                TaskDepsRef::Forbid | TaskDepsRef::Allow(..) => {
532                    self.read_index(data.encode_diagnostic(qcx, diagnostic));
533                }
534            })
535        }
536    }
537    /// This forces a diagnostic node green by running its side effect. `prev_index` would
538    /// refer to a node created used `encode_diagnostic` in the previous session.
539    #[inline]
540    pub fn force_diagnostic_node<Qcx: QueryContext>(
541        &self,
542        qcx: Qcx,
543        prev_index: SerializedDepNodeIndex,
544    ) {
545        if let Some(ref data) = self.data {
546            data.force_diagnostic_node(qcx, prev_index);
547        }
548    }
549
550    /// Create a node when we force-feed a value into the query cache.
551    /// This is used to remove cycles during type-checking const generic parameters.
552    ///
553    /// As usual in the query system, we consider the current state of the calling query
554    /// only depends on the list of dependencies up to now. As a consequence, the value
555    /// that this query gives us can only depend on those dependencies too. Therefore,
556    /// it is sound to use the current dependency set for the created node.
557    ///
558    /// During replay, the order of the nodes is relevant in the dependency graph.
559    /// So the unchanged replay will mark the caller query before trying to mark this one.
560    /// If there is a change to report, the caller query will be re-executed before this one.
561    ///
562    /// FIXME: If the code is changed enough for this node to be marked before requiring the
563    /// caller's node, we suppose that those changes will be enough to mark this node red and
564    /// force a recomputation using the "normal" way.
565    pub fn with_feed_task<Ctxt: DepContext<Deps = D>, R: Debug>(
566        &self,
567        node: DepNode,
568        cx: Ctxt,
569        result: &R,
570        hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
571    ) -> DepNodeIndex {
572        if let Some(data) = self.data.as_ref() {
573            // The caller query has more dependencies than the node we are creating. We may
574            // encounter a case where this created node is marked as green, but the caller query is
575            // subsequently marked as red or recomputed. In this case, we will end up feeding a
576            // value to an existing node.
577            //
578            // For sanity, we still check that the loaded stable hash and the new one match.
579            if let Some(prev_index) = data.previous.node_to_index_opt(&node) {
580                let dep_node_index = data.colors.current(prev_index);
581                if let Some(dep_node_index) = dep_node_index {
582                    crate::query::incremental_verify_ich(
583                        cx,
584                        data,
585                        result,
586                        prev_index,
587                        hash_result,
588                        |value| format!("{value:?}"),
589                    );
590
591                    #[cfg(debug_assertions)]
592                    if hash_result.is_some() {
593                        data.current.record_edge(
594                            dep_node_index,
595                            node,
596                            data.prev_fingerprint_of(prev_index),
597                        );
598                    }
599
600                    return dep_node_index;
601                }
602            }
603
604            let mut edges = EdgesVec::new();
605            D::read_deps(|task_deps| match task_deps {
606                TaskDepsRef::Allow(deps) => edges.extend(deps.lock().reads.iter().copied()),
607                TaskDepsRef::EvalAlways => {
608                    edges.push(DepNodeIndex::FOREVER_RED_NODE);
609                }
610                TaskDepsRef::Ignore => {}
611                TaskDepsRef::Forbid => {
612                    panic!("Cannot summarize when dependencies are not recorded.")
613                }
614            });
615
616            data.hash_result_and_alloc_node(&cx, node, edges, result, hash_result)
617        } else {
618            // Incremental compilation is turned off. We just execute the task
619            // without tracking. We still provide a dep-node index that uniquely
620            // identifies the task so that we have a cheap way of referring to
621            // the query for self-profiling.
622            self.next_virtual_depnode_index()
623        }
624    }
625}
626
627impl<D: Deps> DepGraphData<D> {
628    fn assert_dep_node_not_yet_allocated_in_current_session<S: std::fmt::Display>(
629        &self,
630        dep_node: &DepNode,
631        msg: impl FnOnce() -> S,
632    ) {
633        if let Some(prev_index) = self.previous.node_to_index_opt(dep_node) {
634            let current = self.colors.get(prev_index);
635            assert!(current.is_none(), "{}", msg())
636        } else if let Some(nodes_in_current_session) = &self.current.nodes_in_current_session {
637            outline(|| {
638                let seen = nodes_in_current_session.lock().contains_key(dep_node);
639                assert!(!seen, "{}", msg());
640            });
641        }
642    }
643
644    fn node_color(&self, dep_node: &DepNode) -> Option<DepNodeColor> {
645        if let Some(prev_index) = self.previous.node_to_index_opt(dep_node) {
646            self.colors.get(prev_index)
647        } else {
648            // This is a node that did not exist in the previous compilation session.
649            None
650        }
651    }
652
653    /// Returns true if the given node has been marked as green during the
654    /// current compilation session. Used in various assertions
655    #[inline]
656    pub(crate) fn is_index_green(&self, prev_index: SerializedDepNodeIndex) -> bool {
657        self.colors.get(prev_index).is_some_and(|c| c.is_green())
658    }
659
660    #[inline]
661    pub(crate) fn prev_fingerprint_of(&self, prev_index: SerializedDepNodeIndex) -> Fingerprint {
662        self.previous.fingerprint_by_index(prev_index)
663    }
664
665    #[inline]
666    pub(crate) fn prev_node_of(&self, prev_index: SerializedDepNodeIndex) -> DepNode {
667        self.previous.index_to_node(prev_index)
668    }
669
670    pub(crate) fn mark_debug_loaded_from_disk(&self, dep_node: DepNode) {
671        self.debug_loaded_from_disk.lock().insert(dep_node);
672    }
673
674    /// This encodes a diagnostic by creating a node with an unique index and assoicating
675    /// `diagnostic` with it, for use in the next session.
676    #[inline]
677    fn encode_diagnostic<Qcx: QueryContext>(
678        &self,
679        qcx: Qcx,
680        diagnostic: &DiagInner,
681    ) -> DepNodeIndex {
682        // Use `send` so we get an unique index, even though the dep node is not.
683        let dep_node_index = self.current.encoder.send(
684            DepNode {
685                kind: D::DEP_KIND_SIDE_EFFECT,
686                hash: PackedFingerprint::from(Fingerprint::ZERO),
687            },
688            Fingerprint::ZERO,
689            // We want the side effect node to always be red so it will be forced and emit the
690            // diagnostic.
691            std::iter::once(DepNodeIndex::FOREVER_RED_NODE).collect(),
692        );
693        let side_effect = QuerySideEffect::Diagnostic(diagnostic.clone());
694        qcx.store_side_effect(dep_node_index, side_effect);
695        dep_node_index
696    }
697
698    /// This forces a diagnostic node green by running its side effect. `prev_index` would
699    /// refer to a node created used `encode_diagnostic` in the previous session.
700    #[inline]
701    fn force_diagnostic_node<Qcx: QueryContext>(
702        &self,
703        qcx: Qcx,
704        prev_index: SerializedDepNodeIndex,
705    ) {
706        D::with_deps(TaskDepsRef::Ignore, || {
707            let side_effect = qcx.load_side_effect(prev_index).unwrap();
708
709            match &side_effect {
710                QuerySideEffect::Diagnostic(diagnostic) => {
711                    qcx.dep_context().sess().dcx().emit_diagnostic(diagnostic.clone());
712                }
713            }
714
715            // Manually recreate the node as `promote_node_and_deps_to_current` expects all
716            // green dependencies.
717            let dep_node_index = self.current.encoder.send(
718                DepNode {
719                    kind: D::DEP_KIND_SIDE_EFFECT,
720                    hash: PackedFingerprint::from(Fingerprint::ZERO),
721                },
722                Fingerprint::ZERO,
723                std::iter::once(DepNodeIndex::FOREVER_RED_NODE).collect(),
724            );
725            qcx.store_side_effect(dep_node_index, side_effect);
726
727            // Mark the node as green.
728            self.colors.insert(prev_index, DepNodeColor::Green(dep_node_index));
729        })
730    }
731
732    fn alloc_and_color_node(
733        &self,
734        key: DepNode,
735        edges: EdgesVec,
736        fingerprint: Option<Fingerprint>,
737    ) -> DepNodeIndex {
738        let dep_node_index =
739            self.current.alloc_node(key, edges, fingerprint.unwrap_or(Fingerprint::ZERO));
740
741        if let Some(prev_index) = self.previous.node_to_index_opt(&key) {
742            // Determine the color and index of the new `DepNode`.
743            let color = if let Some(fingerprint) = fingerprint {
744                if fingerprint == self.previous.fingerprint_by_index(prev_index) {
745                    // This is a green node: it existed in the previous compilation,
746                    // its query was re-executed, and it has the same result as before.
747                    DepNodeColor::Green(dep_node_index)
748                } else {
749                    // This is a red node: it existed in the previous compilation, its query
750                    // was re-executed, but it has a different result from before.
751                    DepNodeColor::Red
752                }
753            } else {
754                // This is a red node, effectively: it existed in the previous compilation
755                // session, its query was re-executed, but it doesn't compute a result hash
756                // (i.e. it represents a `no_hash` query), so we have no way of determining
757                // whether or not the result was the same as before.
758                DepNodeColor::Red
759            };
760
761            debug_assert!(
762                self.colors.get(prev_index).is_none(),
763                "DepGraph::with_task() - Duplicate DepNodeColor insertion for {key:?}",
764            );
765
766            self.colors.insert(prev_index, color);
767        }
768
769        dep_node_index
770    }
771
772    fn promote_node_and_deps_to_current(&self, prev_index: SerializedDepNodeIndex) -> DepNodeIndex {
773        self.current.debug_assert_not_in_new_nodes(&self.previous, prev_index);
774
775        let dep_node_index = self.current.encoder.send_promoted(prev_index, &self.colors);
776
777        #[cfg(debug_assertions)]
778        self.current.record_edge(
779            dep_node_index,
780            self.previous.index_to_node(prev_index),
781            self.previous.fingerprint_by_index(prev_index),
782        );
783
784        dep_node_index
785    }
786}
787
788impl<D: Deps> DepGraph<D> {
789    /// Checks whether a previous work product exists for `v` and, if
790    /// so, return the path that leads to it. Used to skip doing work.
791    pub fn previous_work_product(&self, v: &WorkProductId) -> Option<WorkProduct> {
792        self.data.as_ref().and_then(|data| data.previous_work_products.get(v).cloned())
793    }
794
795    /// Access the map of work-products created during the cached run. Only
796    /// used during saving of the dep-graph.
797    pub fn previous_work_products(&self) -> &WorkProductMap {
798        &self.data.as_ref().unwrap().previous_work_products
799    }
800
801    pub fn debug_was_loaded_from_disk(&self, dep_node: DepNode) -> bool {
802        self.data.as_ref().unwrap().debug_loaded_from_disk.lock().contains(&dep_node)
803    }
804
805    #[cfg(debug_assertions)]
806    #[inline(always)]
807    pub(crate) fn register_dep_node_debug_str<F>(&self, dep_node: DepNode, debug_str_gen: F)
808    where
809        F: FnOnce() -> String,
810    {
811        let dep_node_debug = &self.data.as_ref().unwrap().dep_node_debug;
812
813        if dep_node_debug.borrow().contains_key(&dep_node) {
814            return;
815        }
816        let debug_str = self.with_ignore(debug_str_gen);
817        dep_node_debug.borrow_mut().insert(dep_node, debug_str);
818    }
819
820    pub fn dep_node_debug_str(&self, dep_node: DepNode) -> Option<String> {
821        self.data.as_ref()?.dep_node_debug.borrow().get(&dep_node).cloned()
822    }
823
824    fn node_color(&self, dep_node: &DepNode) -> Option<DepNodeColor> {
825        if let Some(ref data) = self.data {
826            return data.node_color(dep_node);
827        }
828
829        None
830    }
831
832    pub fn try_mark_green<Qcx: QueryContext<Deps = D>>(
833        &self,
834        qcx: Qcx,
835        dep_node: &DepNode,
836    ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
837        self.data().and_then(|data| data.try_mark_green(qcx, dep_node))
838    }
839}
840
841impl<D: Deps> DepGraphData<D> {
842    /// Try to mark a node index for the node dep_node.
843    ///
844    /// A node will have an index, when it's already been marked green, or when we can mark it
845    /// green. This function will mark the current task as a reader of the specified node, when
846    /// a node index can be found for that node.
847    pub(crate) fn try_mark_green<Qcx: QueryContext<Deps = D>>(
848        &self,
849        qcx: Qcx,
850        dep_node: &DepNode,
851    ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
852        debug_assert!(!qcx.dep_context().is_eval_always(dep_node.kind));
853
854        // Return None if the dep node didn't exist in the previous session
855        let prev_index = self.previous.node_to_index_opt(dep_node)?;
856
857        match self.colors.get(prev_index) {
858            Some(DepNodeColor::Green(dep_node_index)) => Some((prev_index, dep_node_index)),
859            Some(DepNodeColor::Red) => None,
860            None => {
861                // This DepNode and the corresponding query invocation existed
862                // in the previous compilation session too, so we can try to
863                // mark it as green by recursively marking all of its
864                // dependencies green.
865                self.try_mark_previous_green(qcx, prev_index, dep_node, None)
866                    .map(|dep_node_index| (prev_index, dep_node_index))
867            }
868        }
869    }
870
871    #[instrument(skip(self, qcx, parent_dep_node_index, frame), level = "debug")]
872    fn try_mark_parent_green<Qcx: QueryContext<Deps = D>>(
873        &self,
874        qcx: Qcx,
875        parent_dep_node_index: SerializedDepNodeIndex,
876        frame: Option<&MarkFrame<'_>>,
877    ) -> Option<()> {
878        let dep_dep_node_color = self.colors.get(parent_dep_node_index);
879        let dep_dep_node = &self.previous.index_to_node(parent_dep_node_index);
880
881        match dep_dep_node_color {
882            Some(DepNodeColor::Green(_)) => {
883                // This dependency has been marked as green before, we are
884                // still fine and can continue with checking the other
885                // dependencies.
886                debug!("dependency {dep_dep_node:?} was immediately green");
887                return Some(());
888            }
889            Some(DepNodeColor::Red) => {
890                // We found a dependency the value of which has changed
891                // compared to the previous compilation session. We cannot
892                // mark the DepNode as green and also don't need to bother
893                // with checking any of the other dependencies.
894                debug!("dependency {dep_dep_node:?} was immediately red");
895                return None;
896            }
897            None => {}
898        }
899
900        // We don't know the state of this dependency. If it isn't
901        // an eval_always node, let's try to mark it green recursively.
902        if !qcx.dep_context().is_eval_always(dep_dep_node.kind) {
903            debug!(
904                "state of dependency {:?} ({}) is unknown, trying to mark it green",
905                dep_dep_node, dep_dep_node.hash,
906            );
907
908            let node_index =
909                self.try_mark_previous_green(qcx, parent_dep_node_index, dep_dep_node, frame);
910
911            if node_index.is_some() {
912                debug!("managed to MARK dependency {dep_dep_node:?} as green",);
913                return Some(());
914            }
915        }
916
917        // We failed to mark it green, so we try to force the query.
918        debug!("trying to force dependency {dep_dep_node:?}");
919        if !qcx.dep_context().try_force_from_dep_node(*dep_dep_node, parent_dep_node_index, frame) {
920            // The DepNode could not be forced.
921            debug!("dependency {dep_dep_node:?} could not be forced");
922            return None;
923        }
924
925        let dep_dep_node_color = self.colors.get(parent_dep_node_index);
926
927        match dep_dep_node_color {
928            Some(DepNodeColor::Green(_)) => {
929                debug!("managed to FORCE dependency {dep_dep_node:?} to green");
930                return Some(());
931            }
932            Some(DepNodeColor::Red) => {
933                debug!("dependency {dep_dep_node:?} was red after forcing",);
934                return None;
935            }
936            None => {}
937        }
938
939        if let None = qcx.dep_context().sess().dcx().has_errors_or_delayed_bugs() {
940            panic!("try_mark_previous_green() - Forcing the DepNode should have set its color")
941        }
942
943        // If the query we just forced has resulted in
944        // some kind of compilation error, we cannot rely on
945        // the dep-node color having been properly updated.
946        // This means that the query system has reached an
947        // invalid state. We let the compiler continue (by
948        // returning `None`) so it can emit error messages
949        // and wind down, but rely on the fact that this
950        // invalid state will not be persisted to the
951        // incremental compilation cache because of
952        // compilation errors being present.
953        debug!("dependency {dep_dep_node:?} resulted in compilation error",);
954        return None;
955    }
956
957    /// Try to mark a dep-node which existed in the previous compilation session as green.
958    #[instrument(skip(self, qcx, prev_dep_node_index, frame), level = "debug")]
959    fn try_mark_previous_green<Qcx: QueryContext<Deps = D>>(
960        &self,
961        qcx: Qcx,
962        prev_dep_node_index: SerializedDepNodeIndex,
963        dep_node: &DepNode,
964        frame: Option<&MarkFrame<'_>>,
965    ) -> Option<DepNodeIndex> {
966        let frame = MarkFrame { index: prev_dep_node_index, parent: frame };
967
968        // We never try to mark eval_always nodes as green
969        debug_assert!(!qcx.dep_context().is_eval_always(dep_node.kind));
970
971        debug_assert_eq!(self.previous.index_to_node(prev_dep_node_index), *dep_node);
972
973        let prev_deps = self.previous.edge_targets_from(prev_dep_node_index);
974
975        for dep_dep_node_index in prev_deps {
976            self.try_mark_parent_green(qcx, dep_dep_node_index, Some(&frame))?;
977        }
978
979        // If we got here without hitting a `return` that means that all
980        // dependencies of this DepNode could be marked as green. Therefore we
981        // can also mark this DepNode as green.
982
983        // There may be multiple threads trying to mark the same dep node green concurrently
984
985        // We allocating an entry for the node in the current dependency graph and
986        // adding all the appropriate edges imported from the previous graph
987        let dep_node_index = self.promote_node_and_deps_to_current(prev_dep_node_index);
988
989        // ... and finally storing a "Green" entry in the color map.
990        // Multiple threads can all write the same color here
991
992        debug!("successfully marked {dep_node:?} as green");
993        Some(dep_node_index)
994    }
995}
996
997impl<D: Deps> DepGraph<D> {
998    /// Returns true if the given node has been marked as red during the
999    /// current compilation session. Used in various assertions
1000    pub fn is_red(&self, dep_node: &DepNode) -> bool {
1001        matches!(self.node_color(dep_node), Some(DepNodeColor::Red))
1002    }
1003
1004    /// Returns true if the given node has been marked as green during the
1005    /// current compilation session. Used in various assertions
1006    pub fn is_green(&self, dep_node: &DepNode) -> bool {
1007        self.node_color(dep_node).is_some_and(|c| c.is_green())
1008    }
1009
1010    pub fn assert_dep_node_not_yet_allocated_in_current_session<S: std::fmt::Display>(
1011        &self,
1012        dep_node: &DepNode,
1013        msg: impl FnOnce() -> S,
1014    ) {
1015        if let Some(data) = &self.data {
1016            data.assert_dep_node_not_yet_allocated_in_current_session(dep_node, msg)
1017        }
1018    }
1019
1020    /// This method loads all on-disk cacheable query results into memory, so
1021    /// they can be written out to the new cache file again. Most query results
1022    /// will already be in memory but in the case where we marked something as
1023    /// green but then did not need the value, that value will never have been
1024    /// loaded from disk.
1025    ///
1026    /// This method will only load queries that will end up in the disk cache.
1027    /// Other queries will not be executed.
1028    pub fn exec_cache_promotions<Tcx: DepContext>(&self, tcx: Tcx) {
1029        let _prof_timer = tcx.profiler().generic_activity("incr_comp_query_cache_promotion");
1030
1031        let data = self.data.as_ref().unwrap();
1032        for prev_index in data.colors.values.indices() {
1033            match data.colors.get(prev_index) {
1034                Some(DepNodeColor::Green(_)) => {
1035                    let dep_node = data.previous.index_to_node(prev_index);
1036                    tcx.try_load_from_on_disk_cache(dep_node);
1037                }
1038                None | Some(DepNodeColor::Red) => {
1039                    // We can skip red nodes because a node can only be marked
1040                    // as red if the query result was recomputed and thus is
1041                    // already in memory.
1042                }
1043            }
1044        }
1045    }
1046
1047    pub fn print_incremental_info(&self) {
1048        if let Some(data) = &self.data {
1049            data.current.encoder.print_incremental_info(
1050                data.current.total_read_count.load(Ordering::Relaxed),
1051                data.current.total_duplicate_read_count.load(Ordering::Relaxed),
1052            )
1053        }
1054    }
1055
1056    pub fn finish_encoding(&self) -> FileEncodeResult {
1057        if let Some(data) = &self.data { data.current.encoder.finish() } else { Ok(0) }
1058    }
1059
1060    pub(crate) fn next_virtual_depnode_index(&self) -> DepNodeIndex {
1061        debug_assert!(self.data.is_none());
1062        let index = self.virtual_dep_node_index.fetch_add(1, Ordering::Relaxed);
1063        DepNodeIndex::from_u32(index)
1064    }
1065}
1066
1067/// A "work product" is an intermediate result that we save into the
1068/// incremental directory for later re-use. The primary example are
1069/// the object files that we save for each partition at code
1070/// generation time.
1071///
1072/// Each work product is associated with a dep-node, representing the
1073/// process that produced the work-product. If that dep-node is found
1074/// to be dirty when we load up, then we will delete the work-product
1075/// at load time. If the work-product is found to be clean, then we
1076/// will keep a record in the `previous_work_products` list.
1077///
1078/// In addition, work products have an associated hash. This hash is
1079/// an extra hash that can be used to decide if the work-product from
1080/// a previous compilation can be re-used (in addition to the dirty
1081/// edges check).
1082///
1083/// As the primary example, consider the object files we generate for
1084/// each partition. In the first run, we create partitions based on
1085/// the symbols that need to be compiled. For each partition P, we
1086/// hash the symbols in P and create a `WorkProduct` record associated
1087/// with `DepNode::CodegenUnit(P)`; the hash is the set of symbols
1088/// in P.
1089///
1090/// The next time we compile, if the `DepNode::CodegenUnit(P)` is
1091/// judged to be clean (which means none of the things we read to
1092/// generate the partition were found to be dirty), it will be loaded
1093/// into previous work products. We will then regenerate the set of
1094/// symbols in the partition P and hash them (note that new symbols
1095/// may be added -- for example, new monomorphizations -- even if
1096/// nothing in P changed!). We will compare that hash against the
1097/// previous hash. If it matches up, we can reuse the object file.
1098#[derive(Clone, Debug, Encodable, Decodable)]
1099pub struct WorkProduct {
1100    pub cgu_name: String,
1101    /// Saved files associated with this CGU. In each key/value pair, the value is the path to the
1102    /// saved file and the key is some identifier for the type of file being saved.
1103    ///
1104    /// By convention, file extensions are currently used as identifiers, i.e. the key "o" maps to
1105    /// the object file's path, and "dwo" to the dwarf object file's path.
1106    pub saved_files: UnordMap<String, String>,
1107}
1108
1109pub type WorkProductMap = UnordMap<WorkProductId, WorkProduct>;
1110
1111// Index type for `DepNodeData`'s edges.
1112rustc_index::newtype_index! {
1113    struct EdgeIndex {}
1114}
1115
1116/// `CurrentDepGraph` stores the dependency graph for the current session. It
1117/// will be populated as we run queries or tasks. We never remove nodes from the
1118/// graph: they are only added.
1119///
1120/// The nodes in it are identified by a `DepNodeIndex`. We avoid keeping the nodes
1121/// in memory. This is important, because these graph structures are some of the
1122/// largest in the compiler.
1123///
1124/// For this reason, we avoid storing `DepNode`s more than once as map
1125/// keys. The `anon_node_to_index` map only contains nodes of anonymous queries not in the previous
1126/// graph, and we map nodes in the previous graph to indices via a two-step
1127/// mapping. `SerializedDepGraph` maps from `DepNode` to `SerializedDepNodeIndex`,
1128/// and the `prev_index_to_index` vector (which is more compact and faster than
1129/// using a map) maps from `SerializedDepNodeIndex` to `DepNodeIndex`.
1130///
1131/// This struct uses three locks internally. The `data`, `anon_node_to_index`,
1132/// and `prev_index_to_index` fields are locked separately. Operations that take
1133/// a `DepNodeIndex` typically just access the `data` field.
1134///
1135/// We only need to manipulate at most two locks simultaneously:
1136/// `anon_node_to_index` and `data`, or `prev_index_to_index` and `data`. When
1137/// manipulating both, we acquire `anon_node_to_index` or `prev_index_to_index`
1138/// first, and `data` second.
1139pub(super) struct CurrentDepGraph<D: Deps> {
1140    encoder: GraphEncoder<D>,
1141    anon_node_to_index: ShardedHashMap<DepNode, DepNodeIndex>,
1142
1143    /// This is used to verify that fingerprints do not change between the creation of a node
1144    /// and its recomputation.
1145    #[cfg(debug_assertions)]
1146    fingerprints: Lock<IndexVec<DepNodeIndex, Option<Fingerprint>>>,
1147
1148    /// Used to trap when a specific edge is added to the graph.
1149    /// This is used for debug purposes and is only active with `debug_assertions`.
1150    #[cfg(debug_assertions)]
1151    forbidden_edge: Option<EdgeFilter>,
1152
1153    /// Used to verify the absence of hash collisions among DepNodes.
1154    /// This field is only `Some` if the `-Z incremental_verify_ich` option is present
1155    /// or if `debug_assertions` are enabled.
1156    ///
1157    /// The map contains all DepNodes that have been allocated in the current session so far.
1158    nodes_in_current_session: Option<Lock<FxHashMap<DepNode, DepNodeIndex>>>,
1159
1160    /// Anonymous `DepNode`s are nodes whose IDs we compute from the list of
1161    /// their edges. This has the beneficial side-effect that multiple anonymous
1162    /// nodes can be coalesced into one without changing the semantics of the
1163    /// dependency graph. However, the merging of nodes can lead to a subtle
1164    /// problem during red-green marking: The color of an anonymous node from
1165    /// the current session might "shadow" the color of the node with the same
1166    /// ID from the previous session. In order to side-step this problem, we make
1167    /// sure that anonymous `NodeId`s allocated in different sessions don't overlap.
1168    /// This is implemented by mixing a session-key into the ID fingerprint of
1169    /// each anon node. The session-key is just a random number generated when
1170    /// the `DepGraph` is created.
1171    anon_id_seed: Fingerprint,
1172
1173    /// These are simple counters that are for profiling and
1174    /// debugging and only active with `debug_assertions`.
1175    total_read_count: AtomicU64,
1176    total_duplicate_read_count: AtomicU64,
1177}
1178
1179impl<D: Deps> CurrentDepGraph<D> {
1180    fn new(
1181        session: &Session,
1182        prev_graph_node_count: usize,
1183        encoder: FileEncoder,
1184        record_graph: bool,
1185        record_stats: bool,
1186        previous: Arc<SerializedDepGraph>,
1187    ) -> Self {
1188        use std::time::{SystemTime, UNIX_EPOCH};
1189
1190        let duration = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
1191        let nanos = duration.as_nanos();
1192        let mut stable_hasher = StableHasher::new();
1193        nanos.hash(&mut stable_hasher);
1194        let anon_id_seed = stable_hasher.finish();
1195
1196        #[cfg(debug_assertions)]
1197        let forbidden_edge = match env::var("RUST_FORBID_DEP_GRAPH_EDGE") {
1198            Ok(s) => match EdgeFilter::new(&s) {
1199                Ok(f) => Some(f),
1200                Err(err) => panic!("RUST_FORBID_DEP_GRAPH_EDGE invalid: {}", err),
1201            },
1202            Err(_) => None,
1203        };
1204
1205        let new_node_count_estimate = 102 * prev_graph_node_count / 100 + 200;
1206
1207        let new_node_dbg =
1208            session.opts.unstable_opts.incremental_verify_ich || cfg!(debug_assertions);
1209
1210        CurrentDepGraph {
1211            encoder: GraphEncoder::new(
1212                encoder,
1213                prev_graph_node_count,
1214                record_graph,
1215                record_stats,
1216                &session.prof,
1217                previous,
1218            ),
1219            anon_node_to_index: ShardedHashMap::with_capacity(
1220                // FIXME: The count estimate is off as anon nodes are only a portion of the nodes.
1221                new_node_count_estimate / sharded::shards(),
1222            ),
1223            anon_id_seed,
1224            #[cfg(debug_assertions)]
1225            forbidden_edge,
1226            #[cfg(debug_assertions)]
1227            fingerprints: Lock::new(IndexVec::from_elem_n(None, new_node_count_estimate)),
1228            nodes_in_current_session: new_node_dbg.then(|| {
1229                Lock::new(FxHashMap::with_capacity_and_hasher(
1230                    new_node_count_estimate,
1231                    Default::default(),
1232                ))
1233            }),
1234            total_read_count: AtomicU64::new(0),
1235            total_duplicate_read_count: AtomicU64::new(0),
1236        }
1237    }
1238
1239    #[cfg(debug_assertions)]
1240    fn record_edge(&self, dep_node_index: DepNodeIndex, key: DepNode, fingerprint: Fingerprint) {
1241        if let Some(forbidden_edge) = &self.forbidden_edge {
1242            forbidden_edge.index_to_node.lock().insert(dep_node_index, key);
1243        }
1244        let previous = *self.fingerprints.lock().get_or_insert_with(dep_node_index, || fingerprint);
1245        assert_eq!(previous, fingerprint, "Unstable fingerprints for {:?}", key);
1246    }
1247
1248    /// Writes the node to the current dep-graph and allocates a `DepNodeIndex` for it.
1249    /// Assumes that this is a node that has no equivalent in the previous dep-graph.
1250    #[inline(always)]
1251    fn alloc_node(
1252        &self,
1253        key: DepNode,
1254        edges: EdgesVec,
1255        current_fingerprint: Fingerprint,
1256    ) -> DepNodeIndex {
1257        let dep_node_index = self.encoder.send(key, current_fingerprint, edges);
1258
1259        #[cfg(debug_assertions)]
1260        self.record_edge(dep_node_index, key, current_fingerprint);
1261
1262        if let Some(ref nodes_in_current_session) = self.nodes_in_current_session {
1263            outline(|| {
1264                if nodes_in_current_session.lock().insert(key, dep_node_index).is_some() {
1265                    panic!("Found duplicate dep-node {key:?}");
1266                }
1267            });
1268        }
1269
1270        dep_node_index
1271    }
1272
1273    #[inline]
1274    fn debug_assert_not_in_new_nodes(
1275        &self,
1276        prev_graph: &SerializedDepGraph,
1277        prev_index: SerializedDepNodeIndex,
1278    ) {
1279        if let Some(ref nodes_in_current_session) = self.nodes_in_current_session {
1280            debug_assert!(
1281                !nodes_in_current_session
1282                    .lock()
1283                    .contains_key(&prev_graph.index_to_node(prev_index)),
1284                "node from previous graph present in new node collection"
1285            );
1286        }
1287    }
1288}
1289
1290#[derive(Debug, Clone, Copy)]
1291pub enum TaskDepsRef<'a> {
1292    /// New dependencies can be added to the
1293    /// `TaskDeps`. This is used when executing a 'normal' query
1294    /// (no `eval_always` modifier)
1295    Allow(&'a Lock<TaskDeps>),
1296    /// This is used when executing an `eval_always` query. We don't
1297    /// need to track dependencies for a query that's always
1298    /// re-executed -- but we need to know that this is an `eval_always`
1299    /// query in order to emit dependencies to `DepNodeIndex::FOREVER_RED_NODE`
1300    /// when directly feeding other queries.
1301    EvalAlways,
1302    /// New dependencies are ignored. This is also used for `dep_graph.with_ignore`.
1303    Ignore,
1304    /// Any attempt to add new dependencies will cause a panic.
1305    /// This is used when decoding a query result from disk,
1306    /// to ensure that the decoding process doesn't itself
1307    /// require the execution of any queries.
1308    Forbid,
1309}
1310
1311#[derive(Debug)]
1312pub struct TaskDeps {
1313    #[cfg(debug_assertions)]
1314    node: Option<DepNode>,
1315    reads: EdgesVec,
1316    read_set: FxHashSet<DepNodeIndex>,
1317    phantom_data: PhantomData<DepNode>,
1318}
1319
1320impl Default for TaskDeps {
1321    fn default() -> Self {
1322        Self {
1323            #[cfg(debug_assertions)]
1324            node: None,
1325            reads: EdgesVec::new(),
1326            read_set: FxHashSet::with_capacity_and_hasher(128, Default::default()),
1327            phantom_data: PhantomData,
1328        }
1329    }
1330}
1331// A data structure that stores Option<DepNodeColor> values as a contiguous
1332// array, using one u32 per entry.
1333pub(super) struct DepNodeColorMap {
1334    values: IndexVec<SerializedDepNodeIndex, AtomicU32>,
1335}
1336
1337const COMPRESSED_NONE: u32 = u32::MAX;
1338const COMPRESSED_RED: u32 = u32::MAX - 1;
1339
1340impl DepNodeColorMap {
1341    fn new(size: usize) -> DepNodeColorMap {
1342        debug_assert!(COMPRESSED_RED > DepNodeIndex::MAX_AS_U32);
1343        DepNodeColorMap { values: (0..size).map(|_| AtomicU32::new(COMPRESSED_NONE)).collect() }
1344    }
1345
1346    #[inline]
1347    pub(super) fn current(&self, index: SerializedDepNodeIndex) -> Option<DepNodeIndex> {
1348        let value = self.values[index].load(Ordering::Relaxed);
1349        if value <= DepNodeIndex::MAX_AS_U32 { Some(DepNodeIndex::from_u32(value)) } else { None }
1350    }
1351
1352    #[inline]
1353    pub(super) fn get(&self, index: SerializedDepNodeIndex) -> Option<DepNodeColor> {
1354        match self.values[index].load(Ordering::Acquire) {
1355            COMPRESSED_NONE => None,
1356            COMPRESSED_RED => Some(DepNodeColor::Red),
1357            value => Some(DepNodeColor::Green(DepNodeIndex::from_u32(value))),
1358        }
1359    }
1360
1361    #[inline]
1362    pub(super) fn insert(&self, index: SerializedDepNodeIndex, color: DepNodeColor) {
1363        self.values[index].store(
1364            match color {
1365                DepNodeColor::Red => COMPRESSED_RED,
1366                DepNodeColor::Green(index) => index.as_u32(),
1367            },
1368            Ordering::Release,
1369        )
1370    }
1371}
1372
1373#[inline(never)]
1374#[cold]
1375pub(crate) fn print_markframe_trace<D: Deps>(graph: &DepGraph<D>, frame: Option<&MarkFrame<'_>>) {
1376    let data = graph.data.as_ref().unwrap();
1377
1378    eprintln!("there was a panic while trying to force a dep node");
1379    eprintln!("try_mark_green dep node stack:");
1380
1381    let mut i = 0;
1382    let mut current = frame;
1383    while let Some(frame) = current {
1384        let node = data.previous.index_to_node(frame.index);
1385        eprintln!("#{i} {node:?}");
1386        current = frame.parent;
1387        i += 1;
1388    }
1389
1390    eprintln!("end of try_mark_green dep node stack");
1391}
1392
1393#[cold]
1394#[inline(never)]
1395fn panic_on_forbidden_read<D: Deps>(data: &DepGraphData<D>, dep_node_index: DepNodeIndex) -> ! {
1396    // We have to do an expensive reverse-lookup of the DepNode that
1397    // corresponds to `dep_node_index`, but that's OK since we are about
1398    // to ICE anyway.
1399    let mut dep_node = None;
1400
1401    // First try to find the dep node among those that already existed in the
1402    // previous session and has been marked green
1403    for prev_index in data.colors.values.indices() {
1404        if data.colors.current(prev_index) == Some(dep_node_index) {
1405            dep_node = Some(data.previous.index_to_node(prev_index));
1406            break;
1407        }
1408    }
1409
1410    if dep_node.is_none()
1411        && let Some(nodes) = &data.current.nodes_in_current_session
1412    {
1413        // Try to find it among the nodes allocated so far in this session
1414        if let Some((node, _)) = nodes.lock().iter().find(|&(_, index)| *index == dep_node_index) {
1415            dep_node = Some(*node);
1416        }
1417    }
1418
1419    let dep_node = dep_node.map_or_else(
1420        || format!("with index {:?}", dep_node_index),
1421        |dep_node| format!("`{:?}`", dep_node),
1422    );
1423
1424    panic!(
1425        "Error: trying to record dependency on DepNode {dep_node} in a \
1426         context that does not allow it (e.g. during query deserialization). \
1427         The most common case of recording a dependency on a DepNode `foo` is \
1428         when the corresponding query `foo` is invoked. Invoking queries is not \
1429         allowed as part of loading something from the incremental on-disk cache. \
1430         See <https://github.com/rust-lang/rust/pull/91919>."
1431    )
1432}