rustc_query_system/dep_graph/graph.rs
1use std::assert_matches::assert_matches;
2use std::fmt::Debug;
3use std::hash::Hash;
4use std::marker::PhantomData;
5use std::sync::Arc;
6use std::sync::atomic::{AtomicU32, Ordering};
7
8use rustc_data_structures::fingerprint::{Fingerprint, PackedFingerprint};
9use rustc_data_structures::fx::{FxHashMap, FxHashSet};
10use rustc_data_structures::outline;
11use rustc_data_structures::profiling::QueryInvocationId;
12use rustc_data_structures::sharded::{self, ShardedHashMap};
13use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
14use rustc_data_structures::sync::{AtomicU64, Lock, is_dyn_thread_safe};
15use rustc_data_structures::unord::UnordMap;
16use rustc_errors::DiagInner;
17use rustc_index::IndexVec;
18use rustc_macros::{Decodable, Encodable};
19use rustc_serialize::opaque::{FileEncodeResult, FileEncoder};
20use rustc_session::Session;
21use tracing::{debug, instrument};
22#[cfg(debug_assertions)]
23use {super::debug::EdgeFilter, std::env};
24
25use super::query::DepGraphQuery;
26use super::serialized::{GraphEncoder, SerializedDepGraph, SerializedDepNodeIndex};
27use super::{DepContext, DepKind, DepNode, Deps, HasDepContext, WorkProductId};
28use crate::dep_graph::edges::EdgesVec;
29use crate::ich::StableHashingContext;
30use crate::query::{QueryContext, QuerySideEffect};
31
32#[derive(Clone)]
33pub struct DepGraph<D: Deps> {
34 data: Option<Arc<DepGraphData<D>>>,
35
36 /// This field is used for assigning DepNodeIndices when running in
37 /// non-incremental mode. Even in non-incremental mode we make sure that
38 /// each task has a `DepNodeIndex` that uniquely identifies it. This unique
39 /// ID is used for self-profiling.
40 virtual_dep_node_index: Arc<AtomicU32>,
41}
42
43rustc_index::newtype_index! {
44 pub struct DepNodeIndex {}
45}
46
47// We store a large collection of these in `prev_index_to_index` during
48// non-full incremental builds, and want to ensure that the element size
49// doesn't inadvertently increase.
50rustc_data_structures::static_assert_size!(Option<DepNodeIndex>, 4);
51
52impl DepNodeIndex {
53 const SINGLETON_ZERO_DEPS_ANON_NODE: DepNodeIndex = DepNodeIndex::ZERO;
54 pub const FOREVER_RED_NODE: DepNodeIndex = DepNodeIndex::from_u32(1);
55}
56
57impl From<DepNodeIndex> for QueryInvocationId {
58 #[inline(always)]
59 fn from(dep_node_index: DepNodeIndex) -> Self {
60 QueryInvocationId(dep_node_index.as_u32())
61 }
62}
63
64pub struct MarkFrame<'a> {
65 index: SerializedDepNodeIndex,
66 parent: Option<&'a MarkFrame<'a>>,
67}
68
69#[derive(Debug)]
70pub(super) enum DepNodeColor {
71 Red,
72 Green(DepNodeIndex),
73}
74
75impl DepNodeColor {
76 #[inline]
77 fn is_green(self) -> bool {
78 match self {
79 DepNodeColor::Red => false,
80 DepNodeColor::Green(_) => true,
81 }
82 }
83}
84
85pub(crate) struct DepGraphData<D: Deps> {
86 /// The new encoding of the dependency graph, optimized for red/green
87 /// tracking. The `current` field is the dependency graph of only the
88 /// current compilation session: We don't merge the previous dep-graph into
89 /// current one anymore, but we do reference shared data to save space.
90 current: CurrentDepGraph<D>,
91
92 /// The dep-graph from the previous compilation session. It contains all
93 /// nodes and edges as well as all fingerprints of nodes that have them.
94 previous: Arc<SerializedDepGraph>,
95
96 colors: DepNodeColorMap,
97
98 /// When we load, there may be `.o` files, cached MIR, or other such
99 /// things available to us. If we find that they are not dirty, we
100 /// load the path to the file storing those work-products here into
101 /// this map. We can later look for and extract that data.
102 previous_work_products: WorkProductMap,
103
104 dep_node_debug: Lock<FxHashMap<DepNode, String>>,
105
106 /// Used by incremental compilation tests to assert that
107 /// a particular query result was decoded from disk
108 /// (not just marked green)
109 debug_loaded_from_disk: Lock<FxHashSet<DepNode>>,
110}
111
112pub fn hash_result<R>(hcx: &mut StableHashingContext<'_>, result: &R) -> Fingerprint
113where
114 R: for<'a> HashStable<StableHashingContext<'a>>,
115{
116 let mut stable_hasher = StableHasher::new();
117 result.hash_stable(hcx, &mut stable_hasher);
118 stable_hasher.finish()
119}
120
121impl<D: Deps> DepGraph<D> {
122 pub fn new(
123 session: &Session,
124 prev_graph: Arc<SerializedDepGraph>,
125 prev_work_products: WorkProductMap,
126 encoder: FileEncoder,
127 ) -> DepGraph<D> {
128 let prev_graph_node_count = prev_graph.node_count();
129
130 let current =
131 CurrentDepGraph::new(session, prev_graph_node_count, encoder, Arc::clone(&prev_graph));
132
133 let colors = DepNodeColorMap::new(prev_graph_node_count);
134
135 // Instantiate a node with zero dependencies only once for anonymous queries.
136 let _green_node_index = current.alloc_new_node(
137 DepNode { kind: D::DEP_KIND_ANON_ZERO_DEPS, hash: current.anon_id_seed.into() },
138 EdgesVec::new(),
139 Fingerprint::ZERO,
140 );
141 assert_eq!(_green_node_index, DepNodeIndex::SINGLETON_ZERO_DEPS_ANON_NODE);
142
143 // Instantiate a dependy-less red node only once for anonymous queries.
144 let red_node_index = current.alloc_new_node(
145 DepNode { kind: D::DEP_KIND_RED, hash: Fingerprint::ZERO.into() },
146 EdgesVec::new(),
147 Fingerprint::ZERO,
148 );
149 assert_eq!(red_node_index, DepNodeIndex::FOREVER_RED_NODE);
150 if prev_graph_node_count > 0 {
151 colors.insert(
152 SerializedDepNodeIndex::from_u32(DepNodeIndex::FOREVER_RED_NODE.as_u32()),
153 DepNodeColor::Red,
154 );
155 }
156
157 DepGraph {
158 data: Some(Arc::new(DepGraphData {
159 previous_work_products: prev_work_products,
160 dep_node_debug: Default::default(),
161 current,
162 previous: prev_graph,
163 colors,
164 debug_loaded_from_disk: Default::default(),
165 })),
166 virtual_dep_node_index: Arc::new(AtomicU32::new(0)),
167 }
168 }
169
170 pub fn new_disabled() -> DepGraph<D> {
171 DepGraph { data: None, virtual_dep_node_index: Arc::new(AtomicU32::new(0)) }
172 }
173
174 #[inline]
175 pub(crate) fn data(&self) -> Option<&DepGraphData<D>> {
176 self.data.as_deref()
177 }
178
179 /// Returns `true` if we are actually building the full dep-graph, and `false` otherwise.
180 #[inline]
181 pub fn is_fully_enabled(&self) -> bool {
182 self.data.is_some()
183 }
184
185 pub fn with_query(&self, f: impl Fn(&DepGraphQuery)) {
186 if let Some(data) = &self.data {
187 data.current.encoder.with_query(f)
188 }
189 }
190
191 pub fn assert_ignored(&self) {
192 if let Some(..) = self.data {
193 D::read_deps(|task_deps| {
194 assert_matches!(
195 task_deps,
196 TaskDepsRef::Ignore,
197 "expected no task dependency tracking"
198 );
199 })
200 }
201 }
202
203 pub fn with_ignore<OP, R>(&self, op: OP) -> R
204 where
205 OP: FnOnce() -> R,
206 {
207 D::with_deps(TaskDepsRef::Ignore, op)
208 }
209
210 /// Used to wrap the deserialization of a query result from disk,
211 /// This method enforces that no new `DepNodes` are created during
212 /// query result deserialization.
213 ///
214 /// Enforcing this makes the query dep graph simpler - all nodes
215 /// must be created during the query execution, and should be
216 /// created from inside the 'body' of a query (the implementation
217 /// provided by a particular compiler crate).
218 ///
219 /// Consider the case of three queries `A`, `B`, and `C`, where
220 /// `A` invokes `B` and `B` invokes `C`:
221 ///
222 /// `A -> B -> C`
223 ///
224 /// Suppose that decoding the result of query `B` required re-computing
225 /// the query `C`. If we did not create a fresh `TaskDeps` when
226 /// decoding `B`, we would still be using the `TaskDeps` for query `A`
227 /// (if we needed to re-execute `A`). This would cause us to create
228 /// a new edge `A -> C`. If this edge did not previously
229 /// exist in the `DepGraph`, then we could end up with a different
230 /// `DepGraph` at the end of compilation, even if there were no
231 /// meaningful changes to the overall program (e.g. a newline was added).
232 /// In addition, this edge might cause a subsequent compilation run
233 /// to try to force `C` before marking other necessary nodes green. If
234 /// `C` did not exist in the new compilation session, then we could
235 /// get an ICE. Normally, we would have tried (and failed) to mark
236 /// some other query green (e.g. `item_children`) which was used
237 /// to obtain `C`, which would prevent us from ever trying to force
238 /// a nonexistent `D`.
239 ///
240 /// It might be possible to enforce that all `DepNode`s read during
241 /// deserialization already exist in the previous `DepGraph`. In
242 /// the above example, we would invoke `D` during the deserialization
243 /// of `B`. Since we correctly create a new `TaskDeps` from the decoding
244 /// of `B`, this would result in an edge `B -> D`. If that edge already
245 /// existed (with the same `DepPathHash`es), then it should be correct
246 /// to allow the invocation of the query to proceed during deserialization
247 /// of a query result. We would merely assert that the dep-graph fragment
248 /// that would have been added by invoking `C` while decoding `B`
249 /// is equivalent to the dep-graph fragment that we already instantiated for B
250 /// (at the point where we successfully marked B as green).
251 ///
252 /// However, this would require additional complexity
253 /// in the query infrastructure, and is not currently needed by the
254 /// decoding of any query results. Should the need arise in the future,
255 /// we should consider extending the query system with this functionality.
256 pub fn with_query_deserialization<OP, R>(&self, op: OP) -> R
257 where
258 OP: FnOnce() -> R,
259 {
260 D::with_deps(TaskDepsRef::Forbid, op)
261 }
262
263 #[inline(always)]
264 pub fn with_task<Ctxt: HasDepContext<Deps = D>, A: Debug, R>(
265 &self,
266 key: DepNode,
267 cx: Ctxt,
268 arg: A,
269 task: fn(Ctxt, A) -> R,
270 hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
271 ) -> (R, DepNodeIndex) {
272 match self.data() {
273 Some(data) => data.with_task(key, cx, arg, task, hash_result),
274 None => (task(cx, arg), self.next_virtual_depnode_index()),
275 }
276 }
277
278 pub fn with_anon_task<Tcx: DepContext<Deps = D>, OP, R>(
279 &self,
280 cx: Tcx,
281 dep_kind: DepKind,
282 op: OP,
283 ) -> (R, DepNodeIndex)
284 where
285 OP: FnOnce() -> R,
286 {
287 match self.data() {
288 Some(data) => {
289 let (result, index) = data.with_anon_task_inner(cx, dep_kind, op);
290 self.read_index(index);
291 (result, index)
292 }
293 None => (op(), self.next_virtual_depnode_index()),
294 }
295 }
296}
297
298impl<D: Deps> DepGraphData<D> {
299 /// Starts a new dep-graph task. Dep-graph tasks are specified
300 /// using a free function (`task`) and **not** a closure -- this
301 /// is intentional because we want to exercise tight control over
302 /// what state they have access to. In particular, we want to
303 /// prevent implicit 'leaks' of tracked state into the task (which
304 /// could then be read without generating correct edges in the
305 /// dep-graph -- see the [rustc dev guide] for more details on
306 /// the dep-graph). To this end, the task function gets exactly two
307 /// pieces of state: the context `cx` and an argument `arg`. Both
308 /// of these bits of state must be of some type that implements
309 /// `DepGraphSafe` and hence does not leak.
310 ///
311 /// The choice of two arguments is not fundamental. One argument
312 /// would work just as well, since multiple values can be
313 /// collected using tuples. However, using two arguments works out
314 /// to be quite convenient, since it is common to need a context
315 /// (`cx`) and some argument (e.g., a `DefId` identifying what
316 /// item to process).
317 ///
318 /// For cases where you need some other number of arguments:
319 ///
320 /// - If you only need one argument, just use `()` for the `arg`
321 /// parameter.
322 /// - If you need 3+ arguments, use a tuple for the
323 /// `arg` parameter.
324 ///
325 /// [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/queries/incremental-compilation.html
326 #[inline(always)]
327 pub(crate) fn with_task<Ctxt: HasDepContext<Deps = D>, A: Debug, R>(
328 &self,
329 key: DepNode,
330 cx: Ctxt,
331 arg: A,
332 task: fn(Ctxt, A) -> R,
333 hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
334 ) -> (R, DepNodeIndex) {
335 // If the following assertion triggers, it can have two reasons:
336 // 1. Something is wrong with DepNode creation, either here or
337 // in `DepGraph::try_mark_green()`.
338 // 2. Two distinct query keys get mapped to the same `DepNode`
339 // (see for example #48923).
340 self.assert_dep_node_not_yet_allocated_in_current_session(&key, || {
341 format!(
342 "forcing query with already existing `DepNode`\n\
343 - query-key: {arg:?}\n\
344 - dep-node: {key:?}"
345 )
346 });
347
348 let with_deps = |task_deps| D::with_deps(task_deps, || task(cx, arg));
349 let (result, edges) = if cx.dep_context().is_eval_always(key.kind) {
350 (with_deps(TaskDepsRef::EvalAlways), EdgesVec::new())
351 } else {
352 let task_deps = Lock::new(TaskDeps {
353 #[cfg(debug_assertions)]
354 node: Some(key),
355 reads: EdgesVec::new(),
356 read_set: Default::default(),
357 phantom_data: PhantomData,
358 });
359 (with_deps(TaskDepsRef::Allow(&task_deps)), task_deps.into_inner().reads)
360 };
361
362 let dcx = cx.dep_context();
363 let dep_node_index = self.hash_result_and_alloc_node(dcx, key, edges, &result, hash_result);
364
365 (result, dep_node_index)
366 }
367
368 /// Executes something within an "anonymous" task, that is, a task the
369 /// `DepNode` of which is determined by the list of inputs it read from.
370 ///
371 /// NOTE: this does not actually count as a read of the DepNode here.
372 /// Using the result of this task without reading the DepNode will result
373 /// in untracked dependencies which may lead to ICEs as nodes are
374 /// incorrectly marked green.
375 ///
376 /// FIXME: This could perhaps return a `WithDepNode` to ensure that the
377 /// user of this function actually performs the read; we'll have to see
378 /// how to make that work with `anon` in `execute_job_incr`, though.
379 pub(crate) fn with_anon_task_inner<Tcx: DepContext<Deps = D>, OP, R>(
380 &self,
381 cx: Tcx,
382 dep_kind: DepKind,
383 op: OP,
384 ) -> (R, DepNodeIndex)
385 where
386 OP: FnOnce() -> R,
387 {
388 debug_assert!(!cx.is_eval_always(dep_kind));
389
390 let task_deps = Lock::new(TaskDeps::default());
391 let result = D::with_deps(TaskDepsRef::Allow(&task_deps), op);
392 let task_deps = task_deps.into_inner();
393 let task_deps = task_deps.reads;
394
395 let dep_node_index = match task_deps.len() {
396 0 => {
397 // Because the dep-node id of anon nodes is computed from the sets of its
398 // dependencies we already know what the ID of this dependency-less node is
399 // going to be (i.e. equal to the precomputed
400 // `SINGLETON_DEPENDENCYLESS_ANON_NODE`). As a consequence we can skip creating
401 // a `StableHasher` and sending the node through interning.
402 DepNodeIndex::SINGLETON_ZERO_DEPS_ANON_NODE
403 }
404 1 => {
405 // When there is only one dependency, don't bother creating a node.
406 task_deps[0]
407 }
408 _ => {
409 // The dep node indices are hashed here instead of hashing the dep nodes of the
410 // dependencies. These indices may refer to different nodes per session, but this isn't
411 // a problem here because we that ensure the final dep node hash is per session only by
412 // combining it with the per session random number `anon_id_seed`. This hash only need
413 // to map the dependencies to a single value on a per session basis.
414 let mut hasher = StableHasher::new();
415 task_deps.hash(&mut hasher);
416
417 let target_dep_node = DepNode {
418 kind: dep_kind,
419 // Fingerprint::combine() is faster than sending Fingerprint
420 // through the StableHasher (at least as long as StableHasher
421 // is so slow).
422 hash: self.current.anon_id_seed.combine(hasher.finish()).into(),
423 };
424
425 // The DepNodes generated by the process above are not unique. 2 queries could
426 // have exactly the same dependencies. However, deserialization does not handle
427 // duplicated nodes, so we do the deduplication here directly.
428 //
429 // As anonymous nodes are a small quantity compared to the full dep-graph, the
430 // memory impact of this `anon_node_to_index` map remains tolerable, and helps
431 // us avoid useless growth of the graph with almost-equivalent nodes.
432 self.current.anon_node_to_index.get_or_insert_with(target_dep_node, || {
433 self.current.alloc_new_node(target_dep_node, task_deps, Fingerprint::ZERO)
434 })
435 }
436 };
437
438 (result, dep_node_index)
439 }
440
441 /// Intern the new `DepNode` with the dependencies up-to-now.
442 fn hash_result_and_alloc_node<Ctxt: DepContext<Deps = D>, R>(
443 &self,
444 cx: &Ctxt,
445 node: DepNode,
446 edges: EdgesVec,
447 result: &R,
448 hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
449 ) -> DepNodeIndex {
450 let hashing_timer = cx.profiler().incr_result_hashing();
451 let current_fingerprint = hash_result.map(|hash_result| {
452 cx.with_stable_hashing_context(|mut hcx| hash_result(&mut hcx, result))
453 });
454 let dep_node_index = self.alloc_and_color_node(node, edges, current_fingerprint);
455 hashing_timer.finish_with_query_invocation_id(dep_node_index.into());
456 dep_node_index
457 }
458}
459
460impl<D: Deps> DepGraph<D> {
461 #[inline]
462 pub fn read_index(&self, dep_node_index: DepNodeIndex) {
463 if let Some(ref data) = self.data {
464 D::read_deps(|task_deps| {
465 let mut task_deps = match task_deps {
466 TaskDepsRef::Allow(deps) => deps.lock(),
467 TaskDepsRef::EvalAlways => {
468 // We don't need to record dependencies of eval_always
469 // queries. They are re-evaluated unconditionally anyway.
470 return;
471 }
472 TaskDepsRef::Ignore => return,
473 TaskDepsRef::Forbid => {
474 // Reading is forbidden in this context. ICE with a useful error message.
475 panic_on_forbidden_read(data, dep_node_index)
476 }
477 };
478 let task_deps = &mut *task_deps;
479
480 if cfg!(debug_assertions) {
481 data.current.total_read_count.fetch_add(1, Ordering::Relaxed);
482 }
483
484 // As long as we only have a low number of reads we can avoid doing a hash
485 // insert and potentially allocating/reallocating the hashmap
486 let new_read = if task_deps.reads.len() < EdgesVec::INLINE_CAPACITY {
487 task_deps.reads.iter().all(|other| *other != dep_node_index)
488 } else {
489 task_deps.read_set.insert(dep_node_index)
490 };
491 if new_read {
492 task_deps.reads.push(dep_node_index);
493 if task_deps.reads.len() == EdgesVec::INLINE_CAPACITY {
494 // Fill `read_set` with what we have so far so we can use the hashset
495 // next time
496 task_deps.read_set.extend(task_deps.reads.iter().copied());
497 }
498
499 #[cfg(debug_assertions)]
500 {
501 if let Some(target) = task_deps.node {
502 if let Some(ref forbidden_edge) = data.current.forbidden_edge {
503 let src = forbidden_edge.index_to_node.lock()[&dep_node_index];
504 if forbidden_edge.test(&src, &target) {
505 panic!("forbidden edge {:?} -> {:?} created", src, target)
506 }
507 }
508 }
509 }
510 } else if cfg!(debug_assertions) {
511 data.current.total_duplicate_read_count.fetch_add(1, Ordering::Relaxed);
512 }
513 })
514 }
515 }
516
517 /// This encodes a diagnostic by creating a node with an unique index and assoicating
518 /// `diagnostic` with it, for use in the next session.
519 #[inline]
520 pub fn record_diagnostic<Qcx: QueryContext>(&self, qcx: Qcx, diagnostic: &DiagInner) {
521 if let Some(ref data) = self.data {
522 D::read_deps(|task_deps| match task_deps {
523 TaskDepsRef::EvalAlways | TaskDepsRef::Ignore => return,
524 TaskDepsRef::Forbid | TaskDepsRef::Allow(..) => {
525 self.read_index(data.encode_diagnostic(qcx, diagnostic));
526 }
527 })
528 }
529 }
530 /// This forces a diagnostic node green by running its side effect. `prev_index` would
531 /// refer to a node created used `encode_diagnostic` in the previous session.
532 #[inline]
533 pub fn force_diagnostic_node<Qcx: QueryContext>(
534 &self,
535 qcx: Qcx,
536 prev_index: SerializedDepNodeIndex,
537 ) {
538 if let Some(ref data) = self.data {
539 data.force_diagnostic_node(qcx, prev_index);
540 }
541 }
542
543 /// Create a node when we force-feed a value into the query cache.
544 /// This is used to remove cycles during type-checking const generic parameters.
545 ///
546 /// As usual in the query system, we consider the current state of the calling query
547 /// only depends on the list of dependencies up to now. As a consequence, the value
548 /// that this query gives us can only depend on those dependencies too. Therefore,
549 /// it is sound to use the current dependency set for the created node.
550 ///
551 /// During replay, the order of the nodes is relevant in the dependency graph.
552 /// So the unchanged replay will mark the caller query before trying to mark this one.
553 /// If there is a change to report, the caller query will be re-executed before this one.
554 ///
555 /// FIXME: If the code is changed enough for this node to be marked before requiring the
556 /// caller's node, we suppose that those changes will be enough to mark this node red and
557 /// force a recomputation using the "normal" way.
558 pub fn with_feed_task<Ctxt: DepContext<Deps = D>, R: Debug>(
559 &self,
560 node: DepNode,
561 cx: Ctxt,
562 result: &R,
563 hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
564 ) -> DepNodeIndex {
565 if let Some(data) = self.data.as_ref() {
566 // The caller query has more dependencies than the node we are creating. We may
567 // encounter a case where this created node is marked as green, but the caller query is
568 // subsequently marked as red or recomputed. In this case, we will end up feeding a
569 // value to an existing node.
570 //
571 // For sanity, we still check that the loaded stable hash and the new one match.
572 if let Some(prev_index) = data.previous.node_to_index_opt(&node) {
573 let dep_node_index = data.colors.current(prev_index);
574 if let Some(dep_node_index) = dep_node_index {
575 crate::query::incremental_verify_ich(
576 cx,
577 data,
578 result,
579 prev_index,
580 hash_result,
581 |value| format!("{value:?}"),
582 );
583
584 #[cfg(debug_assertions)]
585 if hash_result.is_some() {
586 data.current.record_edge(
587 dep_node_index,
588 node,
589 data.prev_fingerprint_of(prev_index),
590 );
591 }
592
593 return dep_node_index;
594 }
595 }
596
597 let mut edges = EdgesVec::new();
598 D::read_deps(|task_deps| match task_deps {
599 TaskDepsRef::Allow(deps) => edges.extend(deps.lock().reads.iter().copied()),
600 TaskDepsRef::EvalAlways => {
601 edges.push(DepNodeIndex::FOREVER_RED_NODE);
602 }
603 TaskDepsRef::Ignore => {}
604 TaskDepsRef::Forbid => {
605 panic!("Cannot summarize when dependencies are not recorded.")
606 }
607 });
608
609 data.hash_result_and_alloc_node(&cx, node, edges, result, hash_result)
610 } else {
611 // Incremental compilation is turned off. We just execute the task
612 // without tracking. We still provide a dep-node index that uniquely
613 // identifies the task so that we have a cheap way of referring to
614 // the query for self-profiling.
615 self.next_virtual_depnode_index()
616 }
617 }
618}
619
620impl<D: Deps> DepGraphData<D> {
621 fn assert_dep_node_not_yet_allocated_in_current_session<S: std::fmt::Display>(
622 &self,
623 dep_node: &DepNode,
624 msg: impl FnOnce() -> S,
625 ) {
626 if let Some(prev_index) = self.previous.node_to_index_opt(dep_node) {
627 let current = self.colors.get(prev_index);
628 assert!(current.is_none(), "{}", msg())
629 } else if let Some(nodes_in_current_session) = &self.current.nodes_in_current_session {
630 outline(|| {
631 let seen = nodes_in_current_session.lock().contains_key(dep_node);
632 assert!(!seen, "{}", msg());
633 });
634 }
635 }
636
637 fn node_color(&self, dep_node: &DepNode) -> Option<DepNodeColor> {
638 if let Some(prev_index) = self.previous.node_to_index_opt(dep_node) {
639 self.colors.get(prev_index)
640 } else {
641 // This is a node that did not exist in the previous compilation session.
642 None
643 }
644 }
645
646 /// Returns true if the given node has been marked as green during the
647 /// current compilation session. Used in various assertions
648 #[inline]
649 pub(crate) fn is_index_green(&self, prev_index: SerializedDepNodeIndex) -> bool {
650 self.colors.get(prev_index).is_some_and(|c| c.is_green())
651 }
652
653 #[inline]
654 pub(crate) fn prev_fingerprint_of(&self, prev_index: SerializedDepNodeIndex) -> Fingerprint {
655 self.previous.fingerprint_by_index(prev_index)
656 }
657
658 #[inline]
659 pub(crate) fn prev_node_of(&self, prev_index: SerializedDepNodeIndex) -> DepNode {
660 self.previous.index_to_node(prev_index)
661 }
662
663 pub(crate) fn mark_debug_loaded_from_disk(&self, dep_node: DepNode) {
664 self.debug_loaded_from_disk.lock().insert(dep_node);
665 }
666
667 /// This encodes a diagnostic by creating a node with an unique index and assoicating
668 /// `diagnostic` with it, for use in the next session.
669 #[inline]
670 fn encode_diagnostic<Qcx: QueryContext>(
671 &self,
672 qcx: Qcx,
673 diagnostic: &DiagInner,
674 ) -> DepNodeIndex {
675 // Use `send_new` so we get an unique index, even though the dep node is not.
676 let dep_node_index = self.current.encoder.send_new(
677 DepNode {
678 kind: D::DEP_KIND_SIDE_EFFECT,
679 hash: PackedFingerprint::from(Fingerprint::ZERO),
680 },
681 Fingerprint::ZERO,
682 // We want the side effect node to always be red so it will be forced and emit the
683 // diagnostic.
684 std::iter::once(DepNodeIndex::FOREVER_RED_NODE).collect(),
685 );
686 let side_effect = QuerySideEffect::Diagnostic(diagnostic.clone());
687 qcx.store_side_effect(dep_node_index, side_effect);
688 dep_node_index
689 }
690
691 /// This forces a diagnostic node green by running its side effect. `prev_index` would
692 /// refer to a node created used `encode_diagnostic` in the previous session.
693 #[inline]
694 fn force_diagnostic_node<Qcx: QueryContext>(
695 &self,
696 qcx: Qcx,
697 prev_index: SerializedDepNodeIndex,
698 ) {
699 D::with_deps(TaskDepsRef::Ignore, || {
700 let side_effect = qcx.load_side_effect(prev_index).unwrap();
701
702 match &side_effect {
703 QuerySideEffect::Diagnostic(diagnostic) => {
704 qcx.dep_context().sess().dcx().emit_diagnostic(diagnostic.clone());
705 }
706 }
707
708 // Use `send_and_color` as `promote_node_and_deps_to_current` expects all
709 // green dependencies. `send_and_color` will also prevent multiple nodes
710 // being encoded for concurrent calls.
711 let dep_node_index = self.current.encoder.send_and_color(
712 prev_index,
713 &self.colors,
714 DepNode {
715 kind: D::DEP_KIND_SIDE_EFFECT,
716 hash: PackedFingerprint::from(Fingerprint::ZERO),
717 },
718 Fingerprint::ZERO,
719 std::iter::once(DepNodeIndex::FOREVER_RED_NODE).collect(),
720 true,
721 );
722 // This will just overwrite the same value for concurrent calls.
723 qcx.store_side_effect(dep_node_index, side_effect);
724 })
725 }
726
727 fn alloc_and_color_node(
728 &self,
729 key: DepNode,
730 edges: EdgesVec,
731 fingerprint: Option<Fingerprint>,
732 ) -> DepNodeIndex {
733 if let Some(prev_index) = self.previous.node_to_index_opt(&key) {
734 // Determine the color and index of the new `DepNode`.
735 let is_green = if let Some(fingerprint) = fingerprint {
736 if fingerprint == self.previous.fingerprint_by_index(prev_index) {
737 // This is a green node: it existed in the previous compilation,
738 // its query was re-executed, and it has the same result as before.
739 true
740 } else {
741 // This is a red node: it existed in the previous compilation, its query
742 // was re-executed, but it has a different result from before.
743 false
744 }
745 } else {
746 // This is a red node, effectively: it existed in the previous compilation
747 // session, its query was re-executed, but it doesn't compute a result hash
748 // (i.e. it represents a `no_hash` query), so we have no way of determining
749 // whether or not the result was the same as before.
750 false
751 };
752
753 let fingerprint = fingerprint.unwrap_or(Fingerprint::ZERO);
754
755 let dep_node_index = self.current.encoder.send_and_color(
756 prev_index,
757 &self.colors,
758 key,
759 fingerprint,
760 edges,
761 is_green,
762 );
763
764 self.current.record_node(dep_node_index, key, fingerprint);
765
766 dep_node_index
767 } else {
768 self.current.alloc_new_node(key, edges, fingerprint.unwrap_or(Fingerprint::ZERO))
769 }
770 }
771
772 fn promote_node_and_deps_to_current(&self, prev_index: SerializedDepNodeIndex) -> DepNodeIndex {
773 self.current.debug_assert_not_in_new_nodes(&self.previous, prev_index);
774
775 let dep_node_index = self.current.encoder.send_promoted(prev_index, &self.colors);
776
777 #[cfg(debug_assertions)]
778 self.current.record_edge(
779 dep_node_index,
780 self.previous.index_to_node(prev_index),
781 self.previous.fingerprint_by_index(prev_index),
782 );
783
784 dep_node_index
785 }
786}
787
788impl<D: Deps> DepGraph<D> {
789 /// Checks whether a previous work product exists for `v` and, if
790 /// so, return the path that leads to it. Used to skip doing work.
791 pub fn previous_work_product(&self, v: &WorkProductId) -> Option<WorkProduct> {
792 self.data.as_ref().and_then(|data| data.previous_work_products.get(v).cloned())
793 }
794
795 /// Access the map of work-products created during the cached run. Only
796 /// used during saving of the dep-graph.
797 pub fn previous_work_products(&self) -> &WorkProductMap {
798 &self.data.as_ref().unwrap().previous_work_products
799 }
800
801 pub fn debug_was_loaded_from_disk(&self, dep_node: DepNode) -> bool {
802 self.data.as_ref().unwrap().debug_loaded_from_disk.lock().contains(&dep_node)
803 }
804
805 #[cfg(debug_assertions)]
806 #[inline(always)]
807 pub(crate) fn register_dep_node_debug_str<F>(&self, dep_node: DepNode, debug_str_gen: F)
808 where
809 F: FnOnce() -> String,
810 {
811 let dep_node_debug = &self.data.as_ref().unwrap().dep_node_debug;
812
813 if dep_node_debug.borrow().contains_key(&dep_node) {
814 return;
815 }
816 let debug_str = self.with_ignore(debug_str_gen);
817 dep_node_debug.borrow_mut().insert(dep_node, debug_str);
818 }
819
820 pub fn dep_node_debug_str(&self, dep_node: DepNode) -> Option<String> {
821 self.data.as_ref()?.dep_node_debug.borrow().get(&dep_node).cloned()
822 }
823
824 fn node_color(&self, dep_node: &DepNode) -> Option<DepNodeColor> {
825 if let Some(ref data) = self.data {
826 return data.node_color(dep_node);
827 }
828
829 None
830 }
831
832 pub fn try_mark_green<Qcx: QueryContext<Deps = D>>(
833 &self,
834 qcx: Qcx,
835 dep_node: &DepNode,
836 ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
837 self.data().and_then(|data| data.try_mark_green(qcx, dep_node))
838 }
839}
840
841impl<D: Deps> DepGraphData<D> {
842 /// Try to mark a node index for the node dep_node.
843 ///
844 /// A node will have an index, when it's already been marked green, or when we can mark it
845 /// green. This function will mark the current task as a reader of the specified node, when
846 /// a node index can be found for that node.
847 pub(crate) fn try_mark_green<Qcx: QueryContext<Deps = D>>(
848 &self,
849 qcx: Qcx,
850 dep_node: &DepNode,
851 ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
852 debug_assert!(!qcx.dep_context().is_eval_always(dep_node.kind));
853
854 // Return None if the dep node didn't exist in the previous session
855 let prev_index = self.previous.node_to_index_opt(dep_node)?;
856
857 match self.colors.get(prev_index) {
858 Some(DepNodeColor::Green(dep_node_index)) => Some((prev_index, dep_node_index)),
859 Some(DepNodeColor::Red) => None,
860 None => {
861 // This DepNode and the corresponding query invocation existed
862 // in the previous compilation session too, so we can try to
863 // mark it as green by recursively marking all of its
864 // dependencies green.
865 self.try_mark_previous_green(qcx, prev_index, dep_node, None)
866 .map(|dep_node_index| (prev_index, dep_node_index))
867 }
868 }
869 }
870
871 #[instrument(skip(self, qcx, parent_dep_node_index, frame), level = "debug")]
872 fn try_mark_parent_green<Qcx: QueryContext<Deps = D>>(
873 &self,
874 qcx: Qcx,
875 parent_dep_node_index: SerializedDepNodeIndex,
876 frame: Option<&MarkFrame<'_>>,
877 ) -> Option<()> {
878 let dep_dep_node_color = self.colors.get(parent_dep_node_index);
879 let dep_dep_node = &self.previous.index_to_node(parent_dep_node_index);
880
881 match dep_dep_node_color {
882 Some(DepNodeColor::Green(_)) => {
883 // This dependency has been marked as green before, we are
884 // still fine and can continue with checking the other
885 // dependencies.
886 debug!("dependency {dep_dep_node:?} was immediately green");
887 return Some(());
888 }
889 Some(DepNodeColor::Red) => {
890 // We found a dependency the value of which has changed
891 // compared to the previous compilation session. We cannot
892 // mark the DepNode as green and also don't need to bother
893 // with checking any of the other dependencies.
894 debug!("dependency {dep_dep_node:?} was immediately red");
895 return None;
896 }
897 None => {}
898 }
899
900 // We don't know the state of this dependency. If it isn't
901 // an eval_always node, let's try to mark it green recursively.
902 if !qcx.dep_context().is_eval_always(dep_dep_node.kind) {
903 debug!(
904 "state of dependency {:?} ({}) is unknown, trying to mark it green",
905 dep_dep_node, dep_dep_node.hash,
906 );
907
908 let node_index =
909 self.try_mark_previous_green(qcx, parent_dep_node_index, dep_dep_node, frame);
910
911 if node_index.is_some() {
912 debug!("managed to MARK dependency {dep_dep_node:?} as green");
913 return Some(());
914 }
915 }
916
917 // We failed to mark it green, so we try to force the query.
918 debug!("trying to force dependency {dep_dep_node:?}");
919 if !qcx.dep_context().try_force_from_dep_node(*dep_dep_node, parent_dep_node_index, frame) {
920 // The DepNode could not be forced.
921 debug!("dependency {dep_dep_node:?} could not be forced");
922 return None;
923 }
924
925 let dep_dep_node_color = self.colors.get(parent_dep_node_index);
926
927 match dep_dep_node_color {
928 Some(DepNodeColor::Green(_)) => {
929 debug!("managed to FORCE dependency {dep_dep_node:?} to green");
930 return Some(());
931 }
932 Some(DepNodeColor::Red) => {
933 debug!("dependency {dep_dep_node:?} was red after forcing");
934 return None;
935 }
936 None => {}
937 }
938
939 if let None = qcx.dep_context().sess().dcx().has_errors_or_delayed_bugs() {
940 panic!("try_mark_previous_green() - Forcing the DepNode should have set its color")
941 }
942
943 // If the query we just forced has resulted in
944 // some kind of compilation error, we cannot rely on
945 // the dep-node color having been properly updated.
946 // This means that the query system has reached an
947 // invalid state. We let the compiler continue (by
948 // returning `None`) so it can emit error messages
949 // and wind down, but rely on the fact that this
950 // invalid state will not be persisted to the
951 // incremental compilation cache because of
952 // compilation errors being present.
953 debug!("dependency {dep_dep_node:?} resulted in compilation error");
954 return None;
955 }
956
957 /// Try to mark a dep-node which existed in the previous compilation session as green.
958 #[instrument(skip(self, qcx, prev_dep_node_index, frame), level = "debug")]
959 fn try_mark_previous_green<Qcx: QueryContext<Deps = D>>(
960 &self,
961 qcx: Qcx,
962 prev_dep_node_index: SerializedDepNodeIndex,
963 dep_node: &DepNode,
964 frame: Option<&MarkFrame<'_>>,
965 ) -> Option<DepNodeIndex> {
966 let frame = MarkFrame { index: prev_dep_node_index, parent: frame };
967
968 // We never try to mark eval_always nodes as green
969 debug_assert!(!qcx.dep_context().is_eval_always(dep_node.kind));
970
971 debug_assert_eq!(self.previous.index_to_node(prev_dep_node_index), *dep_node);
972
973 let prev_deps = self.previous.edge_targets_from(prev_dep_node_index);
974
975 for dep_dep_node_index in prev_deps {
976 self.try_mark_parent_green(qcx, dep_dep_node_index, Some(&frame))?;
977 }
978
979 // If we got here without hitting a `return` that means that all
980 // dependencies of this DepNode could be marked as green. Therefore we
981 // can also mark this DepNode as green.
982
983 // There may be multiple threads trying to mark the same dep node green concurrently
984
985 // We allocating an entry for the node in the current dependency graph and
986 // adding all the appropriate edges imported from the previous graph
987 let dep_node_index = self.promote_node_and_deps_to_current(prev_dep_node_index);
988
989 // ... and finally storing a "Green" entry in the color map.
990 // Multiple threads can all write the same color here
991
992 debug!("successfully marked {dep_node:?} as green");
993 Some(dep_node_index)
994 }
995}
996
997impl<D: Deps> DepGraph<D> {
998 /// Returns true if the given node has been marked as red during the
999 /// current compilation session. Used in various assertions
1000 pub fn is_red(&self, dep_node: &DepNode) -> bool {
1001 matches!(self.node_color(dep_node), Some(DepNodeColor::Red))
1002 }
1003
1004 /// Returns true if the given node has been marked as green during the
1005 /// current compilation session. Used in various assertions
1006 pub fn is_green(&self, dep_node: &DepNode) -> bool {
1007 self.node_color(dep_node).is_some_and(|c| c.is_green())
1008 }
1009
1010 pub fn assert_dep_node_not_yet_allocated_in_current_session<S: std::fmt::Display>(
1011 &self,
1012 dep_node: &DepNode,
1013 msg: impl FnOnce() -> S,
1014 ) {
1015 if let Some(data) = &self.data {
1016 data.assert_dep_node_not_yet_allocated_in_current_session(dep_node, msg)
1017 }
1018 }
1019
1020 /// This method loads all on-disk cacheable query results into memory, so
1021 /// they can be written out to the new cache file again. Most query results
1022 /// will already be in memory but in the case where we marked something as
1023 /// green but then did not need the value, that value will never have been
1024 /// loaded from disk.
1025 ///
1026 /// This method will only load queries that will end up in the disk cache.
1027 /// Other queries will not be executed.
1028 pub fn exec_cache_promotions<Tcx: DepContext>(&self, tcx: Tcx) {
1029 let _prof_timer = tcx.profiler().generic_activity("incr_comp_query_cache_promotion");
1030
1031 let data = self.data.as_ref().unwrap();
1032 for prev_index in data.colors.values.indices() {
1033 match data.colors.get(prev_index) {
1034 Some(DepNodeColor::Green(_)) => {
1035 let dep_node = data.previous.index_to_node(prev_index);
1036 tcx.try_load_from_on_disk_cache(dep_node);
1037 }
1038 None | Some(DepNodeColor::Red) => {
1039 // We can skip red nodes because a node can only be marked
1040 // as red if the query result was recomputed and thus is
1041 // already in memory.
1042 }
1043 }
1044 }
1045 }
1046
1047 pub fn finish_encoding(&self) -> FileEncodeResult {
1048 if let Some(data) = &self.data { data.current.encoder.finish(&data.current) } else { Ok(0) }
1049 }
1050
1051 pub(crate) fn next_virtual_depnode_index(&self) -> DepNodeIndex {
1052 debug_assert!(self.data.is_none());
1053 let index = self.virtual_dep_node_index.fetch_add(1, Ordering::Relaxed);
1054 DepNodeIndex::from_u32(index)
1055 }
1056}
1057
1058/// A "work product" is an intermediate result that we save into the
1059/// incremental directory for later re-use. The primary example are
1060/// the object files that we save for each partition at code
1061/// generation time.
1062///
1063/// Each work product is associated with a dep-node, representing the
1064/// process that produced the work-product. If that dep-node is found
1065/// to be dirty when we load up, then we will delete the work-product
1066/// at load time. If the work-product is found to be clean, then we
1067/// will keep a record in the `previous_work_products` list.
1068///
1069/// In addition, work products have an associated hash. This hash is
1070/// an extra hash that can be used to decide if the work-product from
1071/// a previous compilation can be re-used (in addition to the dirty
1072/// edges check).
1073///
1074/// As the primary example, consider the object files we generate for
1075/// each partition. In the first run, we create partitions based on
1076/// the symbols that need to be compiled. For each partition P, we
1077/// hash the symbols in P and create a `WorkProduct` record associated
1078/// with `DepNode::CodegenUnit(P)`; the hash is the set of symbols
1079/// in P.
1080///
1081/// The next time we compile, if the `DepNode::CodegenUnit(P)` is
1082/// judged to be clean (which means none of the things we read to
1083/// generate the partition were found to be dirty), it will be loaded
1084/// into previous work products. We will then regenerate the set of
1085/// symbols in the partition P and hash them (note that new symbols
1086/// may be added -- for example, new monomorphizations -- even if
1087/// nothing in P changed!). We will compare that hash against the
1088/// previous hash. If it matches up, we can reuse the object file.
1089#[derive(Clone, Debug, Encodable, Decodable)]
1090pub struct WorkProduct {
1091 pub cgu_name: String,
1092 /// Saved files associated with this CGU. In each key/value pair, the value is the path to the
1093 /// saved file and the key is some identifier for the type of file being saved.
1094 ///
1095 /// By convention, file extensions are currently used as identifiers, i.e. the key "o" maps to
1096 /// the object file's path, and "dwo" to the dwarf object file's path.
1097 pub saved_files: UnordMap<String, String>,
1098}
1099
1100pub type WorkProductMap = UnordMap<WorkProductId, WorkProduct>;
1101
1102// Index type for `DepNodeData`'s edges.
1103rustc_index::newtype_index! {
1104 struct EdgeIndex {}
1105}
1106
1107/// `CurrentDepGraph` stores the dependency graph for the current session. It
1108/// will be populated as we run queries or tasks. We never remove nodes from the
1109/// graph: they are only added.
1110///
1111/// The nodes in it are identified by a `DepNodeIndex`. We avoid keeping the nodes
1112/// in memory. This is important, because these graph structures are some of the
1113/// largest in the compiler.
1114///
1115/// For this reason, we avoid storing `DepNode`s more than once as map
1116/// keys. The `anon_node_to_index` map only contains nodes of anonymous queries not in the previous
1117/// graph, and we map nodes in the previous graph to indices via a two-step
1118/// mapping. `SerializedDepGraph` maps from `DepNode` to `SerializedDepNodeIndex`,
1119/// and the `prev_index_to_index` vector (which is more compact and faster than
1120/// using a map) maps from `SerializedDepNodeIndex` to `DepNodeIndex`.
1121///
1122/// This struct uses three locks internally. The `data`, `anon_node_to_index`,
1123/// and `prev_index_to_index` fields are locked separately. Operations that take
1124/// a `DepNodeIndex` typically just access the `data` field.
1125///
1126/// We only need to manipulate at most two locks simultaneously:
1127/// `anon_node_to_index` and `data`, or `prev_index_to_index` and `data`. When
1128/// manipulating both, we acquire `anon_node_to_index` or `prev_index_to_index`
1129/// first, and `data` second.
1130pub(super) struct CurrentDepGraph<D: Deps> {
1131 encoder: GraphEncoder<D>,
1132 anon_node_to_index: ShardedHashMap<DepNode, DepNodeIndex>,
1133
1134 /// This is used to verify that fingerprints do not change between the creation of a node
1135 /// and its recomputation.
1136 #[cfg(debug_assertions)]
1137 fingerprints: Lock<IndexVec<DepNodeIndex, Option<Fingerprint>>>,
1138
1139 /// Used to trap when a specific edge is added to the graph.
1140 /// This is used for debug purposes and is only active with `debug_assertions`.
1141 #[cfg(debug_assertions)]
1142 forbidden_edge: Option<EdgeFilter>,
1143
1144 /// Used to verify the absence of hash collisions among DepNodes.
1145 /// This field is only `Some` if the `-Z incremental_verify_ich` option is present
1146 /// or if `debug_assertions` are enabled.
1147 ///
1148 /// The map contains all DepNodes that have been allocated in the current session so far.
1149 nodes_in_current_session: Option<Lock<FxHashMap<DepNode, DepNodeIndex>>>,
1150
1151 /// Anonymous `DepNode`s are nodes whose IDs we compute from the list of
1152 /// their edges. This has the beneficial side-effect that multiple anonymous
1153 /// nodes can be coalesced into one without changing the semantics of the
1154 /// dependency graph. However, the merging of nodes can lead to a subtle
1155 /// problem during red-green marking: The color of an anonymous node from
1156 /// the current session might "shadow" the color of the node with the same
1157 /// ID from the previous session. In order to side-step this problem, we make
1158 /// sure that anonymous `NodeId`s allocated in different sessions don't overlap.
1159 /// This is implemented by mixing a session-key into the ID fingerprint of
1160 /// each anon node. The session-key is a hash of the number of previous sessions.
1161 anon_id_seed: Fingerprint,
1162
1163 /// These are simple counters that are for profiling and
1164 /// debugging and only active with `debug_assertions`.
1165 pub(super) total_read_count: AtomicU64,
1166 pub(super) total_duplicate_read_count: AtomicU64,
1167}
1168
1169impl<D: Deps> CurrentDepGraph<D> {
1170 fn new(
1171 session: &Session,
1172 prev_graph_node_count: usize,
1173 encoder: FileEncoder,
1174 previous: Arc<SerializedDepGraph>,
1175 ) -> Self {
1176 let mut stable_hasher = StableHasher::new();
1177 previous.session_count().hash(&mut stable_hasher);
1178 let anon_id_seed = stable_hasher.finish();
1179
1180 #[cfg(debug_assertions)]
1181 let forbidden_edge = match env::var("RUST_FORBID_DEP_GRAPH_EDGE") {
1182 Ok(s) => match EdgeFilter::new(&s) {
1183 Ok(f) => Some(f),
1184 Err(err) => panic!("RUST_FORBID_DEP_GRAPH_EDGE invalid: {}", err),
1185 },
1186 Err(_) => None,
1187 };
1188
1189 let new_node_count_estimate = 102 * prev_graph_node_count / 100 + 200;
1190
1191 let new_node_dbg =
1192 session.opts.unstable_opts.incremental_verify_ich || cfg!(debug_assertions);
1193
1194 CurrentDepGraph {
1195 encoder: GraphEncoder::new(session, encoder, prev_graph_node_count, previous),
1196 anon_node_to_index: ShardedHashMap::with_capacity(
1197 // FIXME: The count estimate is off as anon nodes are only a portion of the nodes.
1198 new_node_count_estimate / sharded::shards(),
1199 ),
1200 anon_id_seed,
1201 #[cfg(debug_assertions)]
1202 forbidden_edge,
1203 #[cfg(debug_assertions)]
1204 fingerprints: Lock::new(IndexVec::from_elem_n(None, new_node_count_estimate)),
1205 nodes_in_current_session: new_node_dbg.then(|| {
1206 Lock::new(FxHashMap::with_capacity_and_hasher(
1207 new_node_count_estimate,
1208 Default::default(),
1209 ))
1210 }),
1211 total_read_count: AtomicU64::new(0),
1212 total_duplicate_read_count: AtomicU64::new(0),
1213 }
1214 }
1215
1216 #[cfg(debug_assertions)]
1217 fn record_edge(&self, dep_node_index: DepNodeIndex, key: DepNode, fingerprint: Fingerprint) {
1218 if let Some(forbidden_edge) = &self.forbidden_edge {
1219 forbidden_edge.index_to_node.lock().insert(dep_node_index, key);
1220 }
1221 let previous = *self.fingerprints.lock().get_or_insert_with(dep_node_index, || fingerprint);
1222 assert_eq!(previous, fingerprint, "Unstable fingerprints for {:?}", key);
1223 }
1224
1225 #[inline(always)]
1226 fn record_node(
1227 &self,
1228 dep_node_index: DepNodeIndex,
1229 key: DepNode,
1230 _current_fingerprint: Fingerprint,
1231 ) {
1232 #[cfg(debug_assertions)]
1233 self.record_edge(dep_node_index, key, _current_fingerprint);
1234
1235 if let Some(ref nodes_in_current_session) = self.nodes_in_current_session {
1236 outline(|| {
1237 if nodes_in_current_session.lock().insert(key, dep_node_index).is_some() {
1238 panic!("Found duplicate dep-node {key:?}");
1239 }
1240 });
1241 }
1242 }
1243
1244 /// Writes the node to the current dep-graph and allocates a `DepNodeIndex` for it.
1245 /// Assumes that this is a node that has no equivalent in the previous dep-graph.
1246 #[inline(always)]
1247 fn alloc_new_node(
1248 &self,
1249 key: DepNode,
1250 edges: EdgesVec,
1251 current_fingerprint: Fingerprint,
1252 ) -> DepNodeIndex {
1253 let dep_node_index = self.encoder.send_new(key, current_fingerprint, edges);
1254
1255 self.record_node(dep_node_index, key, current_fingerprint);
1256
1257 dep_node_index
1258 }
1259
1260 #[inline]
1261 fn debug_assert_not_in_new_nodes(
1262 &self,
1263 prev_graph: &SerializedDepGraph,
1264 prev_index: SerializedDepNodeIndex,
1265 ) {
1266 if let Some(ref nodes_in_current_session) = self.nodes_in_current_session {
1267 debug_assert!(
1268 !nodes_in_current_session
1269 .lock()
1270 .contains_key(&prev_graph.index_to_node(prev_index)),
1271 "node from previous graph present in new node collection"
1272 );
1273 }
1274 }
1275}
1276
1277#[derive(Debug, Clone, Copy)]
1278pub enum TaskDepsRef<'a> {
1279 /// New dependencies can be added to the
1280 /// `TaskDeps`. This is used when executing a 'normal' query
1281 /// (no `eval_always` modifier)
1282 Allow(&'a Lock<TaskDeps>),
1283 /// This is used when executing an `eval_always` query. We don't
1284 /// need to track dependencies for a query that's always
1285 /// re-executed -- but we need to know that this is an `eval_always`
1286 /// query in order to emit dependencies to `DepNodeIndex::FOREVER_RED_NODE`
1287 /// when directly feeding other queries.
1288 EvalAlways,
1289 /// New dependencies are ignored. This is also used for `dep_graph.with_ignore`.
1290 Ignore,
1291 /// Any attempt to add new dependencies will cause a panic.
1292 /// This is used when decoding a query result from disk,
1293 /// to ensure that the decoding process doesn't itself
1294 /// require the execution of any queries.
1295 Forbid,
1296}
1297
1298#[derive(Debug)]
1299pub struct TaskDeps {
1300 #[cfg(debug_assertions)]
1301 node: Option<DepNode>,
1302 reads: EdgesVec,
1303 read_set: FxHashSet<DepNodeIndex>,
1304 phantom_data: PhantomData<DepNode>,
1305}
1306
1307impl Default for TaskDeps {
1308 fn default() -> Self {
1309 Self {
1310 #[cfg(debug_assertions)]
1311 node: None,
1312 reads: EdgesVec::new(),
1313 read_set: FxHashSet::with_capacity_and_hasher(128, Default::default()),
1314 phantom_data: PhantomData,
1315 }
1316 }
1317}
1318// A data structure that stores Option<DepNodeColor> values as a contiguous
1319// array, using one u32 per entry.
1320pub(super) struct DepNodeColorMap {
1321 values: IndexVec<SerializedDepNodeIndex, AtomicU32>,
1322 sync: bool,
1323}
1324
1325const COMPRESSED_NONE: u32 = u32::MAX;
1326const COMPRESSED_RED: u32 = u32::MAX - 1;
1327
1328impl DepNodeColorMap {
1329 fn new(size: usize) -> DepNodeColorMap {
1330 debug_assert!(COMPRESSED_RED > DepNodeIndex::MAX_AS_U32);
1331 DepNodeColorMap {
1332 values: (0..size).map(|_| AtomicU32::new(COMPRESSED_NONE)).collect(),
1333 sync: is_dyn_thread_safe(),
1334 }
1335 }
1336
1337 #[inline]
1338 pub(super) fn current(&self, index: SerializedDepNodeIndex) -> Option<DepNodeIndex> {
1339 let value = self.values[index].load(Ordering::Relaxed);
1340 if value <= DepNodeIndex::MAX_AS_U32 { Some(DepNodeIndex::from_u32(value)) } else { None }
1341 }
1342
1343 /// This tries to atomically mark a node green and assign `index` as the new
1344 /// index. This returns `Ok` if `index` gets assigned, otherwise it returns
1345 /// the alreadly allocated index in `Err`.
1346 #[inline]
1347 pub(super) fn try_mark_green(
1348 &self,
1349 prev_index: SerializedDepNodeIndex,
1350 index: DepNodeIndex,
1351 ) -> Result<(), DepNodeIndex> {
1352 let value = &self.values[prev_index];
1353 if self.sync {
1354 match value.compare_exchange(
1355 COMPRESSED_NONE,
1356 index.as_u32(),
1357 Ordering::Relaxed,
1358 Ordering::Relaxed,
1359 ) {
1360 Ok(_) => Ok(()),
1361 Err(v) => Err(DepNodeIndex::from_u32(v)),
1362 }
1363 } else {
1364 let v = value.load(Ordering::Relaxed);
1365 if v == COMPRESSED_NONE {
1366 value.store(index.as_u32(), Ordering::Relaxed);
1367 Ok(())
1368 } else {
1369 Err(DepNodeIndex::from_u32(v))
1370 }
1371 }
1372 }
1373
1374 #[inline]
1375 pub(super) fn get(&self, index: SerializedDepNodeIndex) -> Option<DepNodeColor> {
1376 match self.values[index].load(Ordering::Acquire) {
1377 COMPRESSED_NONE => None,
1378 COMPRESSED_RED => Some(DepNodeColor::Red),
1379 value => Some(DepNodeColor::Green(DepNodeIndex::from_u32(value))),
1380 }
1381 }
1382
1383 #[inline]
1384 pub(super) fn insert(&self, index: SerializedDepNodeIndex, color: DepNodeColor) {
1385 self.values[index].store(
1386 match color {
1387 DepNodeColor::Red => COMPRESSED_RED,
1388 DepNodeColor::Green(index) => index.as_u32(),
1389 },
1390 Ordering::Release,
1391 )
1392 }
1393}
1394
1395#[inline(never)]
1396#[cold]
1397pub(crate) fn print_markframe_trace<D: Deps>(graph: &DepGraph<D>, frame: Option<&MarkFrame<'_>>) {
1398 let data = graph.data.as_ref().unwrap();
1399
1400 eprintln!("there was a panic while trying to force a dep node");
1401 eprintln!("try_mark_green dep node stack:");
1402
1403 let mut i = 0;
1404 let mut current = frame;
1405 while let Some(frame) = current {
1406 let node = data.previous.index_to_node(frame.index);
1407 eprintln!("#{i} {node:?}");
1408 current = frame.parent;
1409 i += 1;
1410 }
1411
1412 eprintln!("end of try_mark_green dep node stack");
1413}
1414
1415#[cold]
1416#[inline(never)]
1417fn panic_on_forbidden_read<D: Deps>(data: &DepGraphData<D>, dep_node_index: DepNodeIndex) -> ! {
1418 // We have to do an expensive reverse-lookup of the DepNode that
1419 // corresponds to `dep_node_index`, but that's OK since we are about
1420 // to ICE anyway.
1421 let mut dep_node = None;
1422
1423 // First try to find the dep node among those that already existed in the
1424 // previous session and has been marked green
1425 for prev_index in data.colors.values.indices() {
1426 if data.colors.current(prev_index) == Some(dep_node_index) {
1427 dep_node = Some(data.previous.index_to_node(prev_index));
1428 break;
1429 }
1430 }
1431
1432 if dep_node.is_none()
1433 && let Some(nodes) = &data.current.nodes_in_current_session
1434 {
1435 // Try to find it among the nodes allocated so far in this session
1436 // This is OK, there's only ever one node result possible so this is deterministic.
1437 #[allow(rustc::potential_query_instability)]
1438 if let Some((node, _)) = nodes.lock().iter().find(|&(_, index)| *index == dep_node_index) {
1439 dep_node = Some(*node);
1440 }
1441 }
1442
1443 let dep_node = dep_node.map_or_else(
1444 || format!("with index {:?}", dep_node_index),
1445 |dep_node| format!("`{:?}`", dep_node),
1446 );
1447
1448 panic!(
1449 "Error: trying to record dependency on DepNode {dep_node} in a \
1450 context that does not allow it (e.g. during query deserialization). \
1451 The most common case of recording a dependency on a DepNode `foo` is \
1452 when the corresponding query `foo` is invoked. Invoking queries is not \
1453 allowed as part of loading something from the incremental on-disk cache. \
1454 See <https://github.com/rust-lang/rust/pull/91919>."
1455 )
1456}