rustc_query_system/query/
job.rs

1use std::fmt::Debug;
2use std::hash::Hash;
3use std::io::Write;
4use std::iter;
5use std::num::NonZero;
6use std::sync::Arc;
7
8use parking_lot::{Condvar, Mutex};
9use rustc_data_structures::fx::{FxHashMap, FxHashSet};
10use rustc_errors::{Diag, DiagCtxtHandle};
11use rustc_hir::def::DefKind;
12use rustc_session::Session;
13use rustc_span::{DUMMY_SP, Span};
14
15use super::QueryStackFrameExtra;
16use crate::dep_graph::DepContext;
17use crate::error::CycleStack;
18use crate::query::plumbing::CycleError;
19use crate::query::{QueryContext, QueryStackFrame};
20
21/// Represents a span and a query key.
22#[derive(Clone, Debug)]
23pub struct QueryInfo<I> {
24    /// The span corresponding to the reason for which this query was required.
25    pub span: Span,
26    pub query: QueryStackFrame<I>,
27}
28
29impl<I> QueryInfo<I> {
30    pub(crate) fn lift<Qcx: QueryContext<QueryInfo = I>>(
31        &self,
32        qcx: Qcx,
33    ) -> QueryInfo<QueryStackFrameExtra> {
34        QueryInfo { span: self.span, query: self.query.lift(qcx) }
35    }
36}
37
38pub type QueryMap<I> = FxHashMap<QueryJobId, QueryJobInfo<I>>;
39
40/// A value uniquely identifying an active query job.
41#[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)]
42pub struct QueryJobId(pub NonZero<u64>);
43
44impl QueryJobId {
45    fn query<I: Clone>(self, map: &QueryMap<I>) -> QueryStackFrame<I> {
46        map.get(&self).unwrap().query.clone()
47    }
48
49    fn span<I>(self, map: &QueryMap<I>) -> Span {
50        map.get(&self).unwrap().job.span
51    }
52
53    fn parent<I>(self, map: &QueryMap<I>) -> Option<QueryJobId> {
54        map.get(&self).unwrap().job.parent
55    }
56
57    fn latch<I>(self, map: &QueryMap<I>) -> Option<&QueryLatch<I>> {
58        map.get(&self).unwrap().job.latch.as_ref()
59    }
60}
61
62#[derive(Clone, Debug)]
63pub struct QueryJobInfo<I> {
64    pub query: QueryStackFrame<I>,
65    pub job: QueryJob<I>,
66}
67
68/// Represents an active query job.
69#[derive(Debug)]
70pub struct QueryJob<I> {
71    pub id: QueryJobId,
72
73    /// The span corresponding to the reason for which this query was required.
74    pub span: Span,
75
76    /// The parent query job which created this job and is implicitly waiting on it.
77    pub parent: Option<QueryJobId>,
78
79    /// The latch that is used to wait on this job.
80    latch: Option<QueryLatch<I>>,
81}
82
83impl<I> Clone for QueryJob<I> {
84    fn clone(&self) -> Self {
85        Self { id: self.id, span: self.span, parent: self.parent, latch: self.latch.clone() }
86    }
87}
88
89impl<I> QueryJob<I> {
90    /// Creates a new query job.
91    #[inline]
92    pub fn new(id: QueryJobId, span: Span, parent: Option<QueryJobId>) -> Self {
93        QueryJob { id, span, parent, latch: None }
94    }
95
96    pub(super) fn latch(&mut self) -> QueryLatch<I> {
97        if self.latch.is_none() {
98            self.latch = Some(QueryLatch::new());
99        }
100        self.latch.as_ref().unwrap().clone()
101    }
102
103    /// Signals to waiters that the query is complete.
104    ///
105    /// This does nothing for single threaded rustc,
106    /// as there are no concurrent jobs which could be waiting on us
107    #[inline]
108    pub fn signal_complete(self) {
109        if let Some(latch) = self.latch {
110            latch.set();
111        }
112    }
113}
114
115impl QueryJobId {
116    pub(super) fn find_cycle_in_stack<I: Clone>(
117        &self,
118        query_map: QueryMap<I>,
119        current_job: &Option<QueryJobId>,
120        span: Span,
121    ) -> CycleError<I> {
122        // Find the waitee amongst `current_job` parents
123        let mut cycle = Vec::new();
124        let mut current_job = Option::clone(current_job);
125
126        while let Some(job) = current_job {
127            let info = query_map.get(&job).unwrap();
128            cycle.push(QueryInfo { span: info.job.span, query: info.query.clone() });
129
130            if job == *self {
131                cycle.reverse();
132
133                // This is the end of the cycle
134                // The span entry we included was for the usage
135                // of the cycle itself, and not part of the cycle
136                // Replace it with the span which caused the cycle to form
137                cycle[0].span = span;
138                // Find out why the cycle itself was used
139                let usage = info
140                    .job
141                    .parent
142                    .as_ref()
143                    .map(|parent| (info.job.span, parent.query(&query_map)));
144                return CycleError { usage, cycle };
145            }
146
147            current_job = info.job.parent;
148        }
149
150        panic!("did not find a cycle")
151    }
152
153    #[cold]
154    #[inline(never)]
155    pub fn find_dep_kind_root<I: Clone>(&self, query_map: QueryMap<I>) -> (QueryJobInfo<I>, usize) {
156        let mut depth = 1;
157        let info = query_map.get(&self).unwrap();
158        let dep_kind = info.query.dep_kind;
159        let mut current_id = info.job.parent;
160        let mut last_layout = (info.clone(), depth);
161
162        while let Some(id) = current_id {
163            let info = query_map.get(&id).unwrap();
164            if info.query.dep_kind == dep_kind {
165                depth += 1;
166                last_layout = (info.clone(), depth);
167            }
168            current_id = info.job.parent;
169        }
170        last_layout
171    }
172}
173
174#[derive(Debug)]
175struct QueryWaiter<I> {
176    query: Option<QueryJobId>,
177    condvar: Condvar,
178    span: Span,
179    cycle: Mutex<Option<CycleError<I>>>,
180}
181
182#[derive(Debug)]
183struct QueryLatchInfo<I> {
184    complete: bool,
185    waiters: Vec<Arc<QueryWaiter<I>>>,
186}
187
188#[derive(Debug)]
189pub(super) struct QueryLatch<I> {
190    info: Arc<Mutex<QueryLatchInfo<I>>>,
191}
192
193impl<I> Clone for QueryLatch<I> {
194    fn clone(&self) -> Self {
195        Self { info: Arc::clone(&self.info) }
196    }
197}
198
199impl<I> QueryLatch<I> {
200    fn new() -> Self {
201        QueryLatch {
202            info: Arc::new(Mutex::new(QueryLatchInfo { complete: false, waiters: Vec::new() })),
203        }
204    }
205
206    /// Awaits for the query job to complete.
207    pub(super) fn wait_on(
208        &self,
209        qcx: impl QueryContext,
210        query: Option<QueryJobId>,
211        span: Span,
212    ) -> Result<(), CycleError<I>> {
213        let waiter =
214            Arc::new(QueryWaiter { query, span, cycle: Mutex::new(None), condvar: Condvar::new() });
215        self.wait_on_inner(qcx, &waiter);
216        // FIXME: Get rid of this lock. We have ownership of the QueryWaiter
217        // although another thread may still have a Arc reference so we cannot
218        // use Arc::get_mut
219        let mut cycle = waiter.cycle.lock();
220        match cycle.take() {
221            None => Ok(()),
222            Some(cycle) => Err(cycle),
223        }
224    }
225
226    /// Awaits the caller on this latch by blocking the current thread.
227    fn wait_on_inner(&self, qcx: impl QueryContext, waiter: &Arc<QueryWaiter<I>>) {
228        let mut info = self.info.lock();
229        if !info.complete {
230            // We push the waiter on to the `waiters` list. It can be accessed inside
231            // the `wait` call below, by 1) the `set` method or 2) by deadlock detection.
232            // Both of these will remove it from the `waiters` list before resuming
233            // this thread.
234            info.waiters.push(Arc::clone(waiter));
235
236            // If this detects a deadlock and the deadlock handler wants to resume this thread
237            // we have to be in the `wait` call. This is ensured by the deadlock handler
238            // getting the self.info lock.
239            rayon_core::mark_blocked();
240            let proxy = qcx.jobserver_proxy();
241            proxy.release_thread();
242            waiter.condvar.wait(&mut info);
243            // Release the lock before we potentially block in `acquire_thread`
244            drop(info);
245            proxy.acquire_thread();
246        }
247    }
248
249    /// Sets the latch and resumes all waiters on it
250    fn set(&self) {
251        let mut info = self.info.lock();
252        debug_assert!(!info.complete);
253        info.complete = true;
254        let registry = rayon_core::Registry::current();
255        for waiter in info.waiters.drain(..) {
256            rayon_core::mark_unblocked(&registry);
257            waiter.condvar.notify_one();
258        }
259    }
260
261    /// Removes a single waiter from the list of waiters.
262    /// This is used to break query cycles.
263    fn extract_waiter(&self, waiter: usize) -> Arc<QueryWaiter<I>> {
264        let mut info = self.info.lock();
265        debug_assert!(!info.complete);
266        // Remove the waiter from the list of waiters
267        info.waiters.remove(waiter)
268    }
269}
270
271/// A resumable waiter of a query. The usize is the index into waiters in the query's latch
272type Waiter = (QueryJobId, usize);
273
274/// Visits all the non-resumable and resumable waiters of a query.
275/// Only waiters in a query are visited.
276/// `visit` is called for every waiter and is passed a query waiting on `query_ref`
277/// and a span indicating the reason the query waited on `query_ref`.
278/// If `visit` returns Some, this function returns.
279/// For visits of non-resumable waiters it returns the return value of `visit`.
280/// For visits of resumable waiters it returns Some(Some(Waiter)) which has the
281/// required information to resume the waiter.
282/// If all `visit` calls returns None, this function also returns None.
283fn visit_waiters<I, F>(
284    query_map: &QueryMap<I>,
285    query: QueryJobId,
286    mut visit: F,
287) -> Option<Option<Waiter>>
288where
289    F: FnMut(Span, QueryJobId) -> Option<Option<Waiter>>,
290{
291    // Visit the parent query which is a non-resumable waiter since it's on the same stack
292    if let Some(parent) = query.parent(query_map) {
293        if let Some(cycle) = visit(query.span(query_map), parent) {
294            return Some(cycle);
295        }
296    }
297
298    // Visit the explicit waiters which use condvars and are resumable
299    if let Some(latch) = query.latch(query_map) {
300        for (i, waiter) in latch.info.lock().waiters.iter().enumerate() {
301            if let Some(waiter_query) = waiter.query {
302                if visit(waiter.span, waiter_query).is_some() {
303                    // Return a value which indicates that this waiter can be resumed
304                    return Some(Some((query, i)));
305                }
306            }
307        }
308    }
309
310    None
311}
312
313/// Look for query cycles by doing a depth first search starting at `query`.
314/// `span` is the reason for the `query` to execute. This is initially DUMMY_SP.
315/// If a cycle is detected, this initial value is replaced with the span causing
316/// the cycle.
317fn cycle_check<I>(
318    query_map: &QueryMap<I>,
319    query: QueryJobId,
320    span: Span,
321    stack: &mut Vec<(Span, QueryJobId)>,
322    visited: &mut FxHashSet<QueryJobId>,
323) -> Option<Option<Waiter>> {
324    if !visited.insert(query) {
325        return if let Some(p) = stack.iter().position(|q| q.1 == query) {
326            // We detected a query cycle, fix up the initial span and return Some
327
328            // Remove previous stack entries
329            stack.drain(0..p);
330            // Replace the span for the first query with the cycle cause
331            stack[0].0 = span;
332            Some(None)
333        } else {
334            None
335        };
336    }
337
338    // Query marked as visited is added it to the stack
339    stack.push((span, query));
340
341    // Visit all the waiters
342    let r = visit_waiters(query_map, query, |span, successor| {
343        cycle_check(query_map, successor, span, stack, visited)
344    });
345
346    // Remove the entry in our stack if we didn't find a cycle
347    if r.is_none() {
348        stack.pop();
349    }
350
351    r
352}
353
354/// Finds out if there's a path to the compiler root (aka. code which isn't in a query)
355/// from `query` without going through any of the queries in `visited`.
356/// This is achieved with a depth first search.
357fn connected_to_root<I>(
358    query_map: &QueryMap<I>,
359    query: QueryJobId,
360    visited: &mut FxHashSet<QueryJobId>,
361) -> bool {
362    // We already visited this or we're deliberately ignoring it
363    if !visited.insert(query) {
364        return false;
365    }
366
367    // This query is connected to the root (it has no query parent), return true
368    if query.parent(query_map).is_none() {
369        return true;
370    }
371
372    visit_waiters(query_map, query, |_, successor| {
373        connected_to_root(query_map, successor, visited).then_some(None)
374    })
375    .is_some()
376}
377
378// Deterministically pick an query from a list
379fn pick_query<'a, I: Clone, T, F>(query_map: &QueryMap<I>, queries: &'a [T], f: F) -> &'a T
380where
381    F: Fn(&T) -> (Span, QueryJobId),
382{
383    // Deterministically pick an entry point
384    // FIXME: Sort this instead
385    queries
386        .iter()
387        .min_by_key(|v| {
388            let (span, query) = f(v);
389            let hash = query.query(query_map).hash;
390            // Prefer entry points which have valid spans for nicer error messages
391            // We add an integer to the tuple ensuring that entry points
392            // with valid spans are picked first
393            let span_cmp = if span == DUMMY_SP { 1 } else { 0 };
394            (span_cmp, hash)
395        })
396        .unwrap()
397}
398
399/// Looks for query cycles starting from the last query in `jobs`.
400/// If a cycle is found, all queries in the cycle is removed from `jobs` and
401/// the function return true.
402/// If a cycle was not found, the starting query is removed from `jobs` and
403/// the function returns false.
404fn remove_cycle<I: Clone>(
405    query_map: &QueryMap<I>,
406    jobs: &mut Vec<QueryJobId>,
407    wakelist: &mut Vec<Arc<QueryWaiter<I>>>,
408) -> bool {
409    let mut visited = FxHashSet::default();
410    let mut stack = Vec::new();
411    // Look for a cycle starting with the last query in `jobs`
412    if let Some(waiter) =
413        cycle_check(query_map, jobs.pop().unwrap(), DUMMY_SP, &mut stack, &mut visited)
414    {
415        // The stack is a vector of pairs of spans and queries; reverse it so that
416        // the earlier entries require later entries
417        let (mut spans, queries): (Vec<_>, Vec<_>) = stack.into_iter().rev().unzip();
418
419        // Shift the spans so that queries are matched with the span for their waitee
420        spans.rotate_right(1);
421
422        // Zip them back together
423        let mut stack: Vec<_> = iter::zip(spans, queries).collect();
424
425        // Remove the queries in our cycle from the list of jobs to look at
426        for r in &stack {
427            if let Some(pos) = jobs.iter().position(|j| j == &r.1) {
428                jobs.remove(pos);
429            }
430        }
431
432        // Find the queries in the cycle which are
433        // connected to queries outside the cycle
434        let entry_points = stack
435            .iter()
436            .filter_map(|&(span, query)| {
437                if query.parent(query_map).is_none() {
438                    // This query is connected to the root (it has no query parent)
439                    Some((span, query, None))
440                } else {
441                    let mut waiters = Vec::new();
442                    // Find all the direct waiters who lead to the root
443                    visit_waiters(query_map, query, |span, waiter| {
444                        // Mark all the other queries in the cycle as already visited
445                        let mut visited = FxHashSet::from_iter(stack.iter().map(|q| q.1));
446
447                        if connected_to_root(query_map, waiter, &mut visited) {
448                            waiters.push((span, waiter));
449                        }
450
451                        None
452                    });
453                    if waiters.is_empty() {
454                        None
455                    } else {
456                        // Deterministically pick one of the waiters to show to the user
457                        let waiter = *pick_query(query_map, &waiters, |s| *s);
458                        Some((span, query, Some(waiter)))
459                    }
460                }
461            })
462            .collect::<Vec<(Span, QueryJobId, Option<(Span, QueryJobId)>)>>();
463
464        // Deterministically pick an entry point
465        let (_, entry_point, usage) = pick_query(query_map, &entry_points, |e| (e.0, e.1));
466
467        // Shift the stack so that our entry point is first
468        let entry_point_pos = stack.iter().position(|(_, query)| query == entry_point);
469        if let Some(pos) = entry_point_pos {
470            stack.rotate_left(pos);
471        }
472
473        let usage = usage.as_ref().map(|(span, query)| (*span, query.query(query_map)));
474
475        // Create the cycle error
476        let error = CycleError {
477            usage,
478            cycle: stack
479                .iter()
480                .map(|&(s, ref q)| QueryInfo { span: s, query: q.query(query_map) })
481                .collect(),
482        };
483
484        // We unwrap `waiter` here since there must always be one
485        // edge which is resumable / waited using a query latch
486        let (waitee_query, waiter_idx) = waiter.unwrap();
487
488        // Extract the waiter we want to resume
489        let waiter = waitee_query.latch(query_map).unwrap().extract_waiter(waiter_idx);
490
491        // Set the cycle error so it will be picked up when resumed
492        *waiter.cycle.lock() = Some(error);
493
494        // Put the waiter on the list of things to resume
495        wakelist.push(waiter);
496
497        true
498    } else {
499        false
500    }
501}
502
503/// Detects query cycles by using depth first search over all active query jobs.
504/// If a query cycle is found it will break the cycle by finding an edge which
505/// uses a query latch and then resuming that waiter.
506/// There may be multiple cycles involved in a deadlock, so this searches
507/// all active queries for cycles before finally resuming all the waiters at once.
508pub fn break_query_cycles<I: Clone + Debug>(
509    query_map: QueryMap<I>,
510    registry: &rayon_core::Registry,
511) {
512    let mut wakelist = Vec::new();
513    // It is OK per the comments:
514    // - https://github.com/rust-lang/rust/pull/131200#issuecomment-2798854932
515    // - https://github.com/rust-lang/rust/pull/131200#issuecomment-2798866392
516    #[allow(rustc::potential_query_instability)]
517    let mut jobs: Vec<QueryJobId> = query_map.keys().cloned().collect();
518
519    let mut found_cycle = false;
520
521    while jobs.len() > 0 {
522        if remove_cycle(&query_map, &mut jobs, &mut wakelist) {
523            found_cycle = true;
524        }
525    }
526
527    // Check that a cycle was found. It is possible for a deadlock to occur without
528    // a query cycle if a query which can be waited on uses Rayon to do multithreading
529    // internally. Such a query (X) may be executing on 2 threads (A and B) and A may
530    // wait using Rayon on B. Rayon may then switch to executing another query (Y)
531    // which in turn will wait on X causing a deadlock. We have a false dependency from
532    // X to Y due to Rayon waiting and a true dependency from Y to X. The algorithm here
533    // only considers the true dependency and won't detect a cycle.
534    if !found_cycle {
535        panic!(
536            "deadlock detected as we're unable to find a query cycle to break\n\
537            current query map:\n{:#?}",
538            query_map
539        );
540    }
541
542    // Mark all the thread we're about to wake up as unblocked. This needs to be done before
543    // we wake the threads up as otherwise Rayon could detect a deadlock if a thread we
544    // resumed fell asleep and this thread had yet to mark the remaining threads as unblocked.
545    for _ in 0..wakelist.len() {
546        rayon_core::mark_unblocked(registry);
547    }
548
549    for waiter in wakelist.into_iter() {
550        waiter.condvar.notify_one();
551    }
552}
553
554#[inline(never)]
555#[cold]
556pub fn report_cycle<'a>(
557    sess: &'a Session,
558    CycleError { usage, cycle: stack }: &CycleError,
559) -> Diag<'a> {
560    assert!(!stack.is_empty());
561
562    let span = stack[0].query.info.default_span(stack[1 % stack.len()].span);
563
564    let mut cycle_stack = Vec::new();
565
566    use crate::error::StackCount;
567    let stack_count = if stack.len() == 1 { StackCount::Single } else { StackCount::Multiple };
568
569    for i in 1..stack.len() {
570        let query = &stack[i].query;
571        let span = query.info.default_span(stack[(i + 1) % stack.len()].span);
572        cycle_stack.push(CycleStack { span, desc: query.info.description.to_owned() });
573    }
574
575    let mut cycle_usage = None;
576    if let Some((span, ref query)) = *usage {
577        cycle_usage = Some(crate::error::CycleUsage {
578            span: query.info.default_span(span),
579            usage: query.info.description.to_string(),
580        });
581    }
582
583    let alias =
584        if stack.iter().all(|entry| matches!(entry.query.info.def_kind, Some(DefKind::TyAlias))) {
585            Some(crate::error::Alias::Ty)
586        } else if stack.iter().all(|entry| entry.query.info.def_kind == Some(DefKind::TraitAlias)) {
587            Some(crate::error::Alias::Trait)
588        } else {
589            None
590        };
591
592    let cycle_diag = crate::error::Cycle {
593        span,
594        cycle_stack,
595        stack_bottom: stack[0].query.info.description.to_owned(),
596        alias,
597        cycle_usage,
598        stack_count,
599        note_span: (),
600    };
601
602    sess.dcx().create_err(cycle_diag)
603}
604
605pub fn print_query_stack<Qcx: QueryContext>(
606    qcx: Qcx,
607    mut current_query: Option<QueryJobId>,
608    dcx: DiagCtxtHandle<'_>,
609    limit_frames: Option<usize>,
610    mut file: Option<std::fs::File>,
611) -> usize {
612    // Be careful relying on global state here: this code is called from
613    // a panic hook, which means that the global `DiagCtxt` may be in a weird
614    // state if it was responsible for triggering the panic.
615    let mut count_printed = 0;
616    let mut count_total = 0;
617
618    // Make use of a partial query map if we fail to take locks collecting active queries.
619    let query_map = match qcx.collect_active_jobs() {
620        Ok(query_map) => query_map,
621        Err(query_map) => query_map,
622    };
623
624    if let Some(ref mut file) = file {
625        let _ = writeln!(file, "\n\nquery stack during panic:");
626    }
627    while let Some(query) = current_query {
628        let Some(query_info) = query_map.get(&query) else {
629            break;
630        };
631        let query_extra = qcx.lift_query_info(&query_info.query.info);
632        if Some(count_printed) < limit_frames || limit_frames.is_none() {
633            // Only print to stderr as many stack frames as `num_frames` when present.
634            // FIXME: needs translation
635            #[allow(rustc::diagnostic_outside_of_impl)]
636            #[allow(rustc::untranslatable_diagnostic)]
637            dcx.struct_failure_note(format!(
638                "#{} [{:?}] {}",
639                count_printed, query_info.query.dep_kind, query_extra.description
640            ))
641            .with_span(query_info.job.span)
642            .emit();
643            count_printed += 1;
644        }
645
646        if let Some(ref mut file) = file {
647            let _ = writeln!(
648                file,
649                "#{} [{}] {}",
650                count_total,
651                qcx.dep_context().dep_kind_info(query_info.query.dep_kind).name,
652                query_extra.description
653            );
654        }
655
656        current_query = query_info.job.parent;
657        count_total += 1;
658    }
659
660    if let Some(ref mut file) = file {
661        let _ = writeln!(file, "end of query stack");
662    }
663    count_total
664}