rustc_trait_selection/traits/select/mod.rs
1//! Candidate selection. See the [rustc dev guide] for more information on how this works.
2//!
3//! [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/traits/resolution.html#selection
4
5use std::assert_matches::assert_matches;
6use std::cell::{Cell, RefCell};
7use std::fmt::{self, Display};
8use std::ops::ControlFlow;
9use std::{cmp, iter};
10
11use hir::def::DefKind;
12use rustc_data_structures::fx::{FxIndexMap, FxIndexSet};
13use rustc_data_structures::stack::ensure_sufficient_stack;
14use rustc_errors::{Diag, EmissionGuarantee};
15use rustc_hir as hir;
16use rustc_hir::LangItem;
17use rustc_hir::def_id::DefId;
18use rustc_infer::infer::BoundRegionConversionTime::{self, HigherRankedType};
19use rustc_infer::infer::DefineOpaqueTypes;
20use rustc_infer::infer::at::ToTrace;
21use rustc_infer::infer::relate::TypeRelation;
22use rustc_infer::traits::{PredicateObligations, TraitObligation};
23use rustc_middle::bug;
24use rustc_middle::dep_graph::{DepNodeIndex, dep_kinds};
25pub use rustc_middle::traits::select::*;
26use rustc_middle::ty::abstract_const::NotConstEvaluatable;
27use rustc_middle::ty::error::TypeErrorToStringExt;
28use rustc_middle::ty::print::{PrintTraitRefExt as _, with_no_trimmed_paths};
29use rustc_middle::ty::{
30 self, DeepRejectCtxt, GenericArgsRef, PolyProjectionPredicate, SizedTraitKind, Ty, TyCtxt,
31 TypeFoldable, TypeVisitableExt, TypingMode, Upcast, elaborate,
32};
33use rustc_span::{Symbol, sym};
34use tracing::{debug, instrument, trace};
35
36use self::EvaluationResult::*;
37use self::SelectionCandidate::*;
38use super::coherence::{self, Conflict};
39use super::project::ProjectionTermObligation;
40use super::util::closure_trait_ref_and_return_type;
41use super::{
42 ImplDerivedCause, Normalized, Obligation, ObligationCause, ObligationCauseCode,
43 PolyTraitObligation, PredicateObligation, Selection, SelectionError, SelectionResult,
44 TraitQueryMode, const_evaluatable, project, util, wf,
45};
46use crate::error_reporting::InferCtxtErrorExt;
47use crate::infer::{InferCtxt, InferOk, TypeFreshener};
48use crate::solve::InferCtxtSelectExt as _;
49use crate::traits::normalize::{normalize_with_depth, normalize_with_depth_to};
50use crate::traits::project::{ProjectAndUnifyResult, ProjectionCacheKeyExt};
51use crate::traits::{EvaluateConstErr, ProjectionCacheKey, effects, sizedness_fast_path};
52
53mod _match;
54mod candidate_assembly;
55mod confirmation;
56
57#[derive(Clone, Debug, Eq, PartialEq, Hash)]
58pub enum IntercrateAmbiguityCause<'tcx> {
59 DownstreamCrate { trait_ref: ty::TraitRef<'tcx>, self_ty: Option<Ty<'tcx>> },
60 UpstreamCrateUpdate { trait_ref: ty::TraitRef<'tcx>, self_ty: Option<Ty<'tcx>> },
61 ReservationImpl { message: Symbol },
62}
63
64impl<'tcx> IntercrateAmbiguityCause<'tcx> {
65 /// Emits notes when the overlap is caused by complex intercrate ambiguities.
66 /// See #23980 for details.
67 pub fn add_intercrate_ambiguity_hint<G: EmissionGuarantee>(&self, err: &mut Diag<'_, G>) {
68 err.note(self.intercrate_ambiguity_hint());
69 }
70
71 pub fn intercrate_ambiguity_hint(&self) -> String {
72 with_no_trimmed_paths!(match self {
73 IntercrateAmbiguityCause::DownstreamCrate { trait_ref, self_ty } => {
74 format!(
75 "downstream crates may implement trait `{trait_desc}`{self_desc}",
76 trait_desc = trait_ref.print_trait_sugared(),
77 self_desc = if let Some(self_ty) = self_ty {
78 format!(" for type `{self_ty}`")
79 } else {
80 String::new()
81 }
82 )
83 }
84 IntercrateAmbiguityCause::UpstreamCrateUpdate { trait_ref, self_ty } => {
85 format!(
86 "upstream crates may add a new impl of trait `{trait_desc}`{self_desc} \
87 in future versions",
88 trait_desc = trait_ref.print_trait_sugared(),
89 self_desc = if let Some(self_ty) = self_ty {
90 format!(" for type `{self_ty}`")
91 } else {
92 String::new()
93 }
94 )
95 }
96 IntercrateAmbiguityCause::ReservationImpl { message } => message.to_string(),
97 })
98 }
99}
100
101pub struct SelectionContext<'cx, 'tcx> {
102 pub infcx: &'cx InferCtxt<'tcx>,
103
104 /// Freshener used specifically for entries on the obligation
105 /// stack. This ensures that all entries on the stack at one time
106 /// will have the same set of placeholder entries, which is
107 /// important for checking for trait bounds that recursively
108 /// require themselves.
109 freshener: TypeFreshener<'cx, 'tcx>,
110
111 /// If `intercrate` is set, we remember predicates which were
112 /// considered ambiguous because of impls potentially added in other crates.
113 /// This is used in coherence to give improved diagnostics.
114 /// We don't do his until we detect a coherence error because it can
115 /// lead to false overflow results (#47139) and because always
116 /// computing it may negatively impact performance.
117 intercrate_ambiguity_causes: Option<FxIndexSet<IntercrateAmbiguityCause<'tcx>>>,
118
119 /// The mode that trait queries run in, which informs our error handling
120 /// policy. In essence, canonicalized queries need their errors propagated
121 /// rather than immediately reported because we do not have accurate spans.
122 query_mode: TraitQueryMode,
123}
124
125// A stack that walks back up the stack frame.
126struct TraitObligationStack<'prev, 'tcx> {
127 obligation: &'prev PolyTraitObligation<'tcx>,
128
129 /// The trait predicate from `obligation` but "freshened" with the
130 /// selection-context's freshener. Used to check for recursion.
131 fresh_trait_pred: ty::PolyTraitPredicate<'tcx>,
132
133 /// Starts out equal to `depth` -- if, during evaluation, we
134 /// encounter a cycle, then we will set this flag to the minimum
135 /// depth of that cycle for all participants in the cycle. These
136 /// participants will then forego caching their results. This is
137 /// not the most efficient solution, but it addresses #60010. The
138 /// problem we are trying to prevent:
139 ///
140 /// - If you have `A: AutoTrait` requires `B: AutoTrait` and `C: NonAutoTrait`
141 /// - `B: AutoTrait` requires `A: AutoTrait` (coinductive cycle, ok)
142 /// - `C: NonAutoTrait` requires `A: AutoTrait` (non-coinductive cycle, not ok)
143 ///
144 /// you don't want to cache that `B: AutoTrait` or `A: AutoTrait`
145 /// is `EvaluatedToOk`; this is because they were only considered
146 /// ok on the premise that if `A: AutoTrait` held, but we indeed
147 /// encountered a problem (later on) with `A: AutoTrait`. So we
148 /// currently set a flag on the stack node for `B: AutoTrait` (as
149 /// well as the second instance of `A: AutoTrait`) to suppress
150 /// caching.
151 ///
152 /// This is a simple, targeted fix. A more-performant fix requires
153 /// deeper changes, but would permit more caching: we could
154 /// basically defer caching until we have fully evaluated the
155 /// tree, and then cache the entire tree at once. In any case, the
156 /// performance impact here shouldn't be so horrible: every time
157 /// this is hit, we do cache at least one trait, so we only
158 /// evaluate each member of a cycle up to N times, where N is the
159 /// length of the cycle. This means the performance impact is
160 /// bounded and we shouldn't have any terrible worst-cases.
161 reached_depth: Cell<usize>,
162
163 previous: TraitObligationStackList<'prev, 'tcx>,
164
165 /// The number of parent frames plus one (thus, the topmost frame has depth 1).
166 depth: usize,
167
168 /// The depth-first number of this node in the search graph -- a
169 /// pre-order index. Basically, a freshly incremented counter.
170 dfn: usize,
171}
172
173struct SelectionCandidateSet<'tcx> {
174 /// A list of candidates that definitely apply to the current
175 /// obligation (meaning: types unify).
176 vec: Vec<SelectionCandidate<'tcx>>,
177
178 /// If `true`, then there were candidates that might or might
179 /// not have applied, but we couldn't tell. This occurs when some
180 /// of the input types are type variables, in which case there are
181 /// various "builtin" rules that might or might not trigger.
182 ambiguous: bool,
183}
184
185#[derive(PartialEq, Eq, Debug, Clone)]
186struct EvaluatedCandidate<'tcx> {
187 candidate: SelectionCandidate<'tcx>,
188 evaluation: EvaluationResult,
189}
190
191/// When does the builtin impl for `T: Trait` apply?
192#[derive(Debug)]
193enum BuiltinImplConditions<'tcx> {
194 /// The impl is conditional on `T1, T2, ...: Trait`.
195 Where(ty::Binder<'tcx, Vec<Ty<'tcx>>>),
196 /// There is no built-in impl. There may be some other
197 /// candidate (a where-clause or user-defined impl).
198 None,
199 /// It is unknown whether there is an impl.
200 Ambiguous,
201}
202
203impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
204 pub fn new(infcx: &'cx InferCtxt<'tcx>) -> SelectionContext<'cx, 'tcx> {
205 SelectionContext {
206 infcx,
207 freshener: infcx.freshener(),
208 intercrate_ambiguity_causes: None,
209 query_mode: TraitQueryMode::Standard,
210 }
211 }
212
213 pub fn with_query_mode(
214 infcx: &'cx InferCtxt<'tcx>,
215 query_mode: TraitQueryMode,
216 ) -> SelectionContext<'cx, 'tcx> {
217 debug!(?query_mode, "with_query_mode");
218 SelectionContext { query_mode, ..SelectionContext::new(infcx) }
219 }
220
221 /// Enables tracking of intercrate ambiguity causes. See
222 /// the documentation of [`Self::intercrate_ambiguity_causes`] for more.
223 pub fn enable_tracking_intercrate_ambiguity_causes(&mut self) {
224 assert_matches!(self.infcx.typing_mode(), TypingMode::Coherence);
225 assert!(self.intercrate_ambiguity_causes.is_none());
226 self.intercrate_ambiguity_causes = Some(FxIndexSet::default());
227 debug!("selcx: enable_tracking_intercrate_ambiguity_causes");
228 }
229
230 /// Gets the intercrate ambiguity causes collected since tracking
231 /// was enabled and disables tracking at the same time. If
232 /// tracking is not enabled, just returns an empty vector.
233 pub fn take_intercrate_ambiguity_causes(
234 &mut self,
235 ) -> FxIndexSet<IntercrateAmbiguityCause<'tcx>> {
236 assert_matches!(self.infcx.typing_mode(), TypingMode::Coherence);
237 self.intercrate_ambiguity_causes.take().unwrap_or_default()
238 }
239
240 pub fn tcx(&self) -> TyCtxt<'tcx> {
241 self.infcx.tcx
242 }
243
244 ///////////////////////////////////////////////////////////////////////////
245 // Selection
246 //
247 // The selection phase tries to identify *how* an obligation will
248 // be resolved. For example, it will identify which impl or
249 // parameter bound is to be used. The process can be inconclusive
250 // if the self type in the obligation is not fully inferred. Selection
251 // can result in an error in one of two ways:
252 //
253 // 1. If no applicable impl or parameter bound can be found.
254 // 2. If the output type parameters in the obligation do not match
255 // those specified by the impl/bound. For example, if the obligation
256 // is `Vec<Foo>: Iterable<Bar>`, but the impl specifies
257 // `impl<T> Iterable<T> for Vec<T>`, than an error would result.
258
259 /// Attempts to satisfy the obligation. If successful, this will affect the surrounding
260 /// type environment by performing unification.
261 #[instrument(level = "debug", skip(self), ret)]
262 pub fn poly_select(
263 &mut self,
264 obligation: &PolyTraitObligation<'tcx>,
265 ) -> SelectionResult<'tcx, Selection<'tcx>> {
266 assert!(!self.infcx.next_trait_solver());
267
268 let candidate = match self.select_from_obligation(obligation) {
269 Err(SelectionError::Overflow(OverflowError::Canonical)) => {
270 // In standard mode, overflow must have been caught and reported
271 // earlier.
272 assert!(self.query_mode == TraitQueryMode::Canonical);
273 return Err(SelectionError::Overflow(OverflowError::Canonical));
274 }
275 Err(e) => {
276 return Err(e);
277 }
278 Ok(None) => {
279 return Ok(None);
280 }
281 Ok(Some(candidate)) => candidate,
282 };
283
284 match self.confirm_candidate(obligation, candidate) {
285 Err(SelectionError::Overflow(OverflowError::Canonical)) => {
286 assert!(self.query_mode == TraitQueryMode::Canonical);
287 Err(SelectionError::Overflow(OverflowError::Canonical))
288 }
289 Err(e) => Err(e),
290 Ok(candidate) => Ok(Some(candidate)),
291 }
292 }
293
294 pub fn select(
295 &mut self,
296 obligation: &TraitObligation<'tcx>,
297 ) -> SelectionResult<'tcx, Selection<'tcx>> {
298 if self.infcx.next_trait_solver() {
299 return self.infcx.select_in_new_trait_solver(obligation);
300 }
301
302 self.poly_select(&Obligation {
303 cause: obligation.cause.clone(),
304 param_env: obligation.param_env,
305 predicate: ty::Binder::dummy(obligation.predicate),
306 recursion_depth: obligation.recursion_depth,
307 })
308 }
309
310 fn select_from_obligation(
311 &mut self,
312 obligation: &PolyTraitObligation<'tcx>,
313 ) -> SelectionResult<'tcx, SelectionCandidate<'tcx>> {
314 debug_assert!(!obligation.predicate.has_escaping_bound_vars());
315
316 let pec = &ProvisionalEvaluationCache::default();
317 let stack = self.push_stack(TraitObligationStackList::empty(pec), obligation);
318
319 self.candidate_from_obligation(&stack)
320 }
321
322 #[instrument(level = "debug", skip(self), ret)]
323 fn candidate_from_obligation<'o>(
324 &mut self,
325 stack: &TraitObligationStack<'o, 'tcx>,
326 ) -> SelectionResult<'tcx, SelectionCandidate<'tcx>> {
327 debug_assert!(!self.infcx.next_trait_solver());
328 // Watch out for overflow. This intentionally bypasses (and does
329 // not update) the cache.
330 self.check_recursion_limit(stack.obligation, stack.obligation)?;
331
332 // Check the cache. Note that we freshen the trait-ref
333 // separately rather than using `stack.fresh_trait_ref` --
334 // this is because we want the unbound variables to be
335 // replaced with fresh types starting from index 0.
336 let cache_fresh_trait_pred = self.infcx.freshen(stack.obligation.predicate);
337 debug!(?cache_fresh_trait_pred);
338 debug_assert!(!stack.obligation.predicate.has_escaping_bound_vars());
339
340 if let Some(c) =
341 self.check_candidate_cache(stack.obligation.param_env, cache_fresh_trait_pred)
342 {
343 debug!("CACHE HIT");
344 return c;
345 }
346
347 // If no match, compute result and insert into cache.
348 //
349 // FIXME(nikomatsakis) -- this cache is not taking into
350 // account cycles that may have occurred in forming the
351 // candidate. I don't know of any specific problems that
352 // result but it seems awfully suspicious.
353 let (candidate, dep_node) =
354 self.in_task(|this| this.candidate_from_obligation_no_cache(stack));
355
356 debug!("CACHE MISS");
357 self.insert_candidate_cache(
358 stack.obligation.param_env,
359 cache_fresh_trait_pred,
360 dep_node,
361 candidate.clone(),
362 );
363 candidate
364 }
365
366 fn candidate_from_obligation_no_cache<'o>(
367 &mut self,
368 stack: &TraitObligationStack<'o, 'tcx>,
369 ) -> SelectionResult<'tcx, SelectionCandidate<'tcx>> {
370 if let Err(conflict) = self.is_knowable(stack) {
371 debug!("coherence stage: not knowable");
372 if self.intercrate_ambiguity_causes.is_some() {
373 debug!("evaluate_stack: intercrate_ambiguity_causes is some");
374 // Heuristics: show the diagnostics when there are no candidates in crate.
375 if let Ok(candidate_set) = self.assemble_candidates(stack) {
376 let mut no_candidates_apply = true;
377
378 for c in candidate_set.vec.iter() {
379 if self.evaluate_candidate(stack, c)?.may_apply() {
380 no_candidates_apply = false;
381 break;
382 }
383 }
384
385 if !candidate_set.ambiguous && no_candidates_apply {
386 let trait_ref = self.infcx.resolve_vars_if_possible(
387 stack.obligation.predicate.skip_binder().trait_ref,
388 );
389 if !trait_ref.references_error() {
390 let self_ty = trait_ref.self_ty();
391 let self_ty = self_ty.has_concrete_skeleton().then(|| self_ty);
392 let cause = if let Conflict::Upstream = conflict {
393 IntercrateAmbiguityCause::UpstreamCrateUpdate { trait_ref, self_ty }
394 } else {
395 IntercrateAmbiguityCause::DownstreamCrate { trait_ref, self_ty }
396 };
397 debug!(?cause, "evaluate_stack: pushing cause");
398 self.intercrate_ambiguity_causes.as_mut().unwrap().insert(cause);
399 }
400 }
401 }
402 }
403 return Ok(None);
404 }
405
406 let candidate_set = self.assemble_candidates(stack)?;
407
408 if candidate_set.ambiguous {
409 debug!("candidate set contains ambig");
410 return Ok(None);
411 }
412
413 let candidates = candidate_set.vec;
414
415 debug!(?stack, ?candidates, "assembled {} candidates", candidates.len());
416
417 // At this point, we know that each of the entries in the
418 // candidate set is *individually* applicable. Now we have to
419 // figure out if they contain mutual incompatibilities. This
420 // frequently arises if we have an unconstrained input type --
421 // for example, we are looking for `$0: Eq` where `$0` is some
422 // unconstrained type variable. In that case, we'll get a
423 // candidate which assumes $0 == int, one that assumes `$0 ==
424 // usize`, etc. This spells an ambiguity.
425
426 let mut candidates = self.filter_impls(candidates, stack.obligation);
427
428 // If there is more than one candidate, first winnow them down
429 // by considering extra conditions (nested obligations and so
430 // forth). We don't winnow if there is exactly one
431 // candidate. This is a relatively minor distinction but it
432 // can lead to better inference and error-reporting. An
433 // example would be if there was an impl:
434 //
435 // impl<T:Clone> Vec<T> { fn push_clone(...) { ... } }
436 //
437 // and we were to see some code `foo.push_clone()` where `boo`
438 // is a `Vec<Bar>` and `Bar` does not implement `Clone`. If
439 // we were to winnow, we'd wind up with zero candidates.
440 // Instead, we select the right impl now but report "`Bar` does
441 // not implement `Clone`".
442 if candidates.len() == 1 {
443 return self.filter_reservation_impls(candidates.pop().unwrap());
444 }
445
446 // Winnow, but record the exact outcome of evaluation, which
447 // is needed for specialization. Propagate overflow if it occurs.
448 let candidates = candidates
449 .into_iter()
450 .map(|c| match self.evaluate_candidate(stack, &c) {
451 Ok(eval) if eval.may_apply() => {
452 Ok(Some(EvaluatedCandidate { candidate: c, evaluation: eval }))
453 }
454 Ok(_) => Ok(None),
455 Err(OverflowError::Canonical) => {
456 Err(SelectionError::Overflow(OverflowError::Canonical))
457 }
458 Err(OverflowError::Error(e)) => {
459 Err(SelectionError::Overflow(OverflowError::Error(e)))
460 }
461 })
462 .flat_map(Result::transpose)
463 .collect::<Result<Vec<_>, _>>()?;
464
465 debug!(?stack, ?candidates, "{} potentially applicable candidates", candidates.len());
466 // If there are *NO* candidates, then there are no impls --
467 // that we know of, anyway. Note that in the case where there
468 // are unbound type variables within the obligation, it might
469 // be the case that you could still satisfy the obligation
470 // from another crate by instantiating the type variables with
471 // a type from another crate that does have an impl. This case
472 // is checked for in `evaluate_stack` (and hence users
473 // who might care about this case, like coherence, should use
474 // that function).
475 if candidates.is_empty() {
476 // If there's an error type, 'downgrade' our result from
477 // `Err(Unimplemented)` to `Ok(None)`. This helps us avoid
478 // emitting additional spurious errors, since we're guaranteed
479 // to have emitted at least one.
480 if stack.obligation.predicate.references_error() {
481 debug!(?stack.obligation.predicate, "found error type in predicate, treating as ambiguous");
482 Ok(None)
483 } else {
484 Err(SelectionError::Unimplemented)
485 }
486 } else {
487 let has_non_region_infer = stack.obligation.predicate.has_non_region_infer();
488 if let Some(candidate) = self.winnow_candidates(has_non_region_infer, candidates) {
489 self.filter_reservation_impls(candidate)
490 } else {
491 Ok(None)
492 }
493 }
494 }
495
496 ///////////////////////////////////////////////////////////////////////////
497 // EVALUATION
498 //
499 // Tests whether an obligation can be selected or whether an impl
500 // can be applied to particular types. It skips the "confirmation"
501 // step and hence completely ignores output type parameters.
502 //
503 // The result is "true" if the obligation *may* hold and "false" if
504 // we can be sure it does not.
505
506 /// Evaluates whether the obligation `obligation` can be satisfied
507 /// and returns an `EvaluationResult`. This is meant for the
508 /// *initial* call.
509 ///
510 /// Do not use this directly, use `infcx.evaluate_obligation` instead.
511 pub fn evaluate_root_obligation(
512 &mut self,
513 obligation: &PredicateObligation<'tcx>,
514 ) -> Result<EvaluationResult, OverflowError> {
515 debug_assert!(!self.infcx.next_trait_solver());
516 self.evaluation_probe(|this| {
517 let goal =
518 this.infcx.resolve_vars_if_possible((obligation.predicate, obligation.param_env));
519 let mut result = this.evaluate_predicate_recursively(
520 TraitObligationStackList::empty(&ProvisionalEvaluationCache::default()),
521 obligation.clone(),
522 )?;
523 // If the predicate has done any inference, then downgrade the
524 // result to ambiguous.
525 if this.infcx.resolve_vars_if_possible(goal) != goal {
526 result = result.max(EvaluatedToAmbig);
527 }
528 Ok(result)
529 })
530 }
531
532 /// Computes the evaluation result of `op`, discarding any constraints.
533 ///
534 /// This also runs for leak check to allow higher ranked region errors to impact
535 /// selection. By default it checks for leaks from all universes created inside of
536 /// `op`, but this can be overwritten if necessary.
537 fn evaluation_probe(
538 &mut self,
539 op: impl FnOnce(&mut Self) -> Result<EvaluationResult, OverflowError>,
540 ) -> Result<EvaluationResult, OverflowError> {
541 self.infcx.probe(|snapshot| -> Result<EvaluationResult, OverflowError> {
542 let outer_universe = self.infcx.universe();
543 let result = op(self)?;
544
545 match self.infcx.leak_check(outer_universe, Some(snapshot)) {
546 Ok(()) => {}
547 Err(_) => return Ok(EvaluatedToErr),
548 }
549
550 if self.infcx.opaque_types_added_in_snapshot(snapshot) {
551 return Ok(result.max(EvaluatedToOkModuloOpaqueTypes));
552 }
553
554 if self.infcx.region_constraints_added_in_snapshot(snapshot) {
555 Ok(result.max(EvaluatedToOkModuloRegions))
556 } else {
557 Ok(result)
558 }
559 })
560 }
561
562 /// Evaluates the predicates in `predicates` recursively. This may
563 /// guide inference. If this is not desired, run it inside of a
564 /// is run within an inference probe.
565 /// `probe`.
566 #[instrument(skip(self, stack), level = "debug")]
567 fn evaluate_predicates_recursively<'o, I>(
568 &mut self,
569 stack: TraitObligationStackList<'o, 'tcx>,
570 predicates: I,
571 ) -> Result<EvaluationResult, OverflowError>
572 where
573 I: IntoIterator<Item = PredicateObligation<'tcx>> + std::fmt::Debug,
574 {
575 let mut result = EvaluatedToOk;
576 for mut obligation in predicates {
577 obligation.set_depth_from_parent(stack.depth());
578 let eval = self.evaluate_predicate_recursively(stack, obligation.clone())?;
579 if let EvaluatedToErr = eval {
580 // fast-path - EvaluatedToErr is the top of the lattice,
581 // so we don't need to look on the other predicates.
582 return Ok(EvaluatedToErr);
583 } else {
584 result = cmp::max(result, eval);
585 }
586 }
587 Ok(result)
588 }
589
590 #[instrument(
591 level = "debug",
592 skip(self, previous_stack),
593 fields(previous_stack = ?previous_stack.head())
594 ret,
595 )]
596 fn evaluate_predicate_recursively<'o>(
597 &mut self,
598 previous_stack: TraitObligationStackList<'o, 'tcx>,
599 obligation: PredicateObligation<'tcx>,
600 ) -> Result<EvaluationResult, OverflowError> {
601 debug_assert!(!self.infcx.next_trait_solver());
602 // `previous_stack` stores a `PolyTraitObligation`, while `obligation` is
603 // a `PredicateObligation`. These are distinct types, so we can't
604 // use any `Option` combinator method that would force them to be
605 // the same.
606 match previous_stack.head() {
607 Some(h) => self.check_recursion_limit(&obligation, h.obligation)?,
608 None => self.check_recursion_limit(&obligation, &obligation)?,
609 }
610
611 if sizedness_fast_path(self.tcx(), obligation.predicate) {
612 return Ok(EvaluatedToOk);
613 }
614
615 ensure_sufficient_stack(|| {
616 let bound_predicate = obligation.predicate.kind();
617 match bound_predicate.skip_binder() {
618 ty::PredicateKind::Clause(ty::ClauseKind::Trait(t)) => {
619 let t = bound_predicate.rebind(t);
620 debug_assert!(!t.has_escaping_bound_vars());
621 let obligation = obligation.with(self.tcx(), t);
622 self.evaluate_trait_predicate_recursively(previous_stack, obligation)
623 }
624
625 ty::PredicateKind::Clause(ty::ClauseKind::HostEffect(data)) => {
626 self.infcx.enter_forall(bound_predicate.rebind(data), |data| {
627 match effects::evaluate_host_effect_obligation(
628 self,
629 &obligation.with(self.tcx(), data),
630 ) {
631 Ok(nested) => {
632 self.evaluate_predicates_recursively(previous_stack, nested)
633 }
634 Err(effects::EvaluationFailure::Ambiguous) => Ok(EvaluatedToAmbig),
635 Err(effects::EvaluationFailure::NoSolution) => Ok(EvaluatedToErr),
636 }
637 })
638 }
639
640 ty::PredicateKind::Subtype(p) => {
641 let p = bound_predicate.rebind(p);
642 // Does this code ever run?
643 match self.infcx.subtype_predicate(&obligation.cause, obligation.param_env, p) {
644 Ok(Ok(InferOk { obligations, .. })) => {
645 self.evaluate_predicates_recursively(previous_stack, obligations)
646 }
647 Ok(Err(_)) => Ok(EvaluatedToErr),
648 Err(..) => Ok(EvaluatedToAmbig),
649 }
650 }
651
652 ty::PredicateKind::Coerce(p) => {
653 let p = bound_predicate.rebind(p);
654 // Does this code ever run?
655 match self.infcx.coerce_predicate(&obligation.cause, obligation.param_env, p) {
656 Ok(Ok(InferOk { obligations, .. })) => {
657 self.evaluate_predicates_recursively(previous_stack, obligations)
658 }
659 Ok(Err(_)) => Ok(EvaluatedToErr),
660 Err(..) => Ok(EvaluatedToAmbig),
661 }
662 }
663
664 ty::PredicateKind::Clause(ty::ClauseKind::WellFormed(term)) => {
665 if term.is_trivially_wf(self.tcx()) {
666 return Ok(EvaluatedToOk);
667 }
668
669 // So, there is a bit going on here. First, `WellFormed` predicates
670 // are coinductive, like trait predicates with auto traits.
671 // This means that we need to detect if we have recursively
672 // evaluated `WellFormed(X)`. Otherwise, we would run into
673 // a "natural" overflow error.
674 //
675 // Now, the next question is whether we need to do anything
676 // special with caching. Considering the following tree:
677 // - `WF(Foo<T>)`
678 // - `Bar<T>: Send`
679 // - `WF(Foo<T>)`
680 // - `Foo<T>: Trait`
681 // In this case, the innermost `WF(Foo<T>)` should return
682 // `EvaluatedToOk`, since it's coinductive. Then if
683 // `Bar<T>: Send` is resolved to `EvaluatedToOk`, it can be
684 // inserted into a cache (because without thinking about `WF`
685 // goals, it isn't in a cycle). If `Foo<T>: Trait` later doesn't
686 // hold, then `Bar<T>: Send` shouldn't hold. Therefore, we
687 // *do* need to keep track of coinductive cycles.
688
689 let cache = previous_stack.cache;
690 let dfn = cache.next_dfn();
691
692 for stack_term in previous_stack.cache.wf_args.borrow().iter().rev() {
693 if stack_term.0 != term {
694 continue;
695 }
696 debug!("WellFormed({:?}) on stack", term);
697 if let Some(stack) = previous_stack.head {
698 // Okay, let's imagine we have two different stacks:
699 // `T: NonAutoTrait -> WF(T) -> T: NonAutoTrait`
700 // `WF(T) -> T: NonAutoTrait -> WF(T)`
701 // Because of this, we need to check that all
702 // predicates between the WF goals are coinductive.
703 // Otherwise, we can say that `T: NonAutoTrait` is
704 // true.
705 // Let's imagine we have a predicate stack like
706 // `Foo: Bar -> WF(T) -> T: NonAutoTrait -> T: Auto`
707 // depth ^1 ^2 ^3
708 // and the current predicate is `WF(T)`. `wf_args`
709 // would contain `(T, 1)`. We want to check all
710 // trait predicates greater than `1`. The previous
711 // stack would be `T: Auto`.
712 let cycle = stack.iter().take_while(|s| s.depth > stack_term.1);
713 let tcx = self.tcx();
714 let cycle = cycle.map(|stack| stack.obligation.predicate.upcast(tcx));
715 if self.coinductive_match(cycle) {
716 stack.update_reached_depth(stack_term.1);
717 return Ok(EvaluatedToOk);
718 } else {
719 return Ok(EvaluatedToAmbigStackDependent);
720 }
721 }
722 return Ok(EvaluatedToOk);
723 }
724
725 match wf::obligations(
726 self.infcx,
727 obligation.param_env,
728 obligation.cause.body_id,
729 obligation.recursion_depth + 1,
730 term,
731 obligation.cause.span,
732 ) {
733 Some(obligations) => {
734 cache.wf_args.borrow_mut().push((term, previous_stack.depth()));
735 let result =
736 self.evaluate_predicates_recursively(previous_stack, obligations);
737 cache.wf_args.borrow_mut().pop();
738
739 let result = result?;
740
741 if !result.must_apply_modulo_regions() {
742 cache.on_failure(dfn);
743 }
744
745 cache.on_completion(dfn);
746
747 Ok(result)
748 }
749 None => Ok(EvaluatedToAmbig),
750 }
751 }
752
753 ty::PredicateKind::Clause(ty::ClauseKind::TypeOutlives(pred)) => {
754 // A global type with no free lifetimes or generic parameters
755 // outlives anything.
756 if pred.0.has_free_regions()
757 || pred.0.has_bound_regions()
758 || pred.0.has_non_region_infer()
759 || pred.0.has_non_region_infer()
760 {
761 Ok(EvaluatedToOkModuloRegions)
762 } else {
763 Ok(EvaluatedToOk)
764 }
765 }
766
767 ty::PredicateKind::Clause(ty::ClauseKind::RegionOutlives(..)) => {
768 // We do not consider region relationships when evaluating trait matches.
769 Ok(EvaluatedToOkModuloRegions)
770 }
771
772 ty::PredicateKind::DynCompatible(trait_def_id) => {
773 if self.tcx().is_dyn_compatible(trait_def_id) {
774 Ok(EvaluatedToOk)
775 } else {
776 Ok(EvaluatedToErr)
777 }
778 }
779
780 ty::PredicateKind::Clause(ty::ClauseKind::Projection(data)) => {
781 let data = bound_predicate.rebind(data);
782 let project_obligation = obligation.with(self.tcx(), data);
783 match project::poly_project_and_unify_term(self, &project_obligation) {
784 ProjectAndUnifyResult::Holds(mut subobligations) => {
785 'compute_res: {
786 // If we've previously marked this projection as 'complete', then
787 // use the final cached result (either `EvaluatedToOk` or
788 // `EvaluatedToOkModuloRegions`), and skip re-evaluating the
789 // sub-obligations.
790 if let Some(key) =
791 ProjectionCacheKey::from_poly_projection_obligation(
792 self,
793 &project_obligation,
794 )
795 {
796 if let Some(cached_res) = self
797 .infcx
798 .inner
799 .borrow_mut()
800 .projection_cache()
801 .is_complete(key)
802 {
803 break 'compute_res Ok(cached_res);
804 }
805 }
806
807 // Need to explicitly set the depth of nested goals here as
808 // projection obligations can cycle by themselves and in
809 // `evaluate_predicates_recursively` we only add the depth
810 // for parent trait goals because only these get added to the
811 // `TraitObligationStackList`.
812 for subobligation in subobligations.iter_mut() {
813 subobligation.set_depth_from_parent(obligation.recursion_depth);
814 }
815 let res = self.evaluate_predicates_recursively(
816 previous_stack,
817 subobligations,
818 );
819 if let Ok(eval_rslt) = res
820 && (eval_rslt == EvaluatedToOk
821 || eval_rslt == EvaluatedToOkModuloRegions)
822 && let Some(key) =
823 ProjectionCacheKey::from_poly_projection_obligation(
824 self,
825 &project_obligation,
826 )
827 {
828 // If the result is something that we can cache, then mark this
829 // entry as 'complete'. This will allow us to skip evaluating the
830 // subobligations at all the next time we evaluate the projection
831 // predicate.
832 self.infcx
833 .inner
834 .borrow_mut()
835 .projection_cache()
836 .complete(key, eval_rslt);
837 }
838 res
839 }
840 }
841 ProjectAndUnifyResult::FailedNormalization => Ok(EvaluatedToAmbig),
842 ProjectAndUnifyResult::Recursive => Ok(EvaluatedToAmbigStackDependent),
843 ProjectAndUnifyResult::MismatchedProjectionTypes(_) => Ok(EvaluatedToErr),
844 }
845 }
846
847 ty::PredicateKind::Clause(ty::ClauseKind::ConstEvaluatable(uv)) => {
848 match const_evaluatable::is_const_evaluatable(
849 self.infcx,
850 uv,
851 obligation.param_env,
852 obligation.cause.span,
853 ) {
854 Ok(()) => Ok(EvaluatedToOk),
855 Err(NotConstEvaluatable::MentionsInfer) => Ok(EvaluatedToAmbig),
856 Err(NotConstEvaluatable::MentionsParam) => Ok(EvaluatedToErr),
857 Err(_) => Ok(EvaluatedToErr),
858 }
859 }
860
861 ty::PredicateKind::ConstEquate(c1, c2) => {
862 let tcx = self.tcx();
863 assert!(
864 tcx.features().generic_const_exprs(),
865 "`ConstEquate` without a feature gate: {c1:?} {c2:?}",
866 );
867
868 {
869 let c1 = tcx.expand_abstract_consts(c1);
870 let c2 = tcx.expand_abstract_consts(c2);
871 debug!(
872 "evaluate_predicate_recursively: equating consts:\nc1= {:?}\nc2= {:?}",
873 c1, c2
874 );
875
876 use rustc_hir::def::DefKind;
877 match (c1.kind(), c2.kind()) {
878 (ty::ConstKind::Unevaluated(a), ty::ConstKind::Unevaluated(b))
879 if a.def == b.def && tcx.def_kind(a.def) == DefKind::AssocConst =>
880 {
881 if let Ok(InferOk { obligations, value: () }) = self
882 .infcx
883 .at(&obligation.cause, obligation.param_env)
884 // Can define opaque types as this is only reachable with
885 // `generic_const_exprs`
886 .eq(
887 DefineOpaqueTypes::Yes,
888 ty::AliasTerm::from(a),
889 ty::AliasTerm::from(b),
890 )
891 {
892 return self.evaluate_predicates_recursively(
893 previous_stack,
894 obligations,
895 );
896 }
897 }
898 (_, ty::ConstKind::Unevaluated(_))
899 | (ty::ConstKind::Unevaluated(_), _) => (),
900 (_, _) => {
901 if let Ok(InferOk { obligations, value: () }) = self
902 .infcx
903 .at(&obligation.cause, obligation.param_env)
904 // Can define opaque types as this is only reachable with
905 // `generic_const_exprs`
906 .eq(DefineOpaqueTypes::Yes, c1, c2)
907 {
908 return self.evaluate_predicates_recursively(
909 previous_stack,
910 obligations,
911 );
912 }
913 }
914 }
915 }
916
917 let evaluate = |c: ty::Const<'tcx>| {
918 if let ty::ConstKind::Unevaluated(_) = c.kind() {
919 match crate::traits::try_evaluate_const(
920 self.infcx,
921 c,
922 obligation.param_env,
923 ) {
924 Ok(val) => Ok(val),
925 Err(e) => Err(e),
926 }
927 } else {
928 Ok(c)
929 }
930 };
931
932 match (evaluate(c1), evaluate(c2)) {
933 (Ok(c1), Ok(c2)) => {
934 match self.infcx.at(&obligation.cause, obligation.param_env).eq(
935 // Can define opaque types as this is only reachable with
936 // `generic_const_exprs`
937 DefineOpaqueTypes::Yes,
938 c1,
939 c2,
940 ) {
941 Ok(inf_ok) => self.evaluate_predicates_recursively(
942 previous_stack,
943 inf_ok.into_obligations(),
944 ),
945 Err(_) => Ok(EvaluatedToErr),
946 }
947 }
948 (Err(EvaluateConstErr::InvalidConstParamTy(..)), _)
949 | (_, Err(EvaluateConstErr::InvalidConstParamTy(..))) => Ok(EvaluatedToErr),
950 (Err(EvaluateConstErr::EvaluationFailure(..)), _)
951 | (_, Err(EvaluateConstErr::EvaluationFailure(..))) => Ok(EvaluatedToErr),
952 (Err(EvaluateConstErr::HasGenericsOrInfers), _)
953 | (_, Err(EvaluateConstErr::HasGenericsOrInfers)) => {
954 if c1.has_non_region_infer() || c2.has_non_region_infer() {
955 Ok(EvaluatedToAmbig)
956 } else {
957 // Two different constants using generic parameters ~> error.
958 Ok(EvaluatedToErr)
959 }
960 }
961 }
962 }
963 ty::PredicateKind::NormalizesTo(..) => {
964 bug!("NormalizesTo is only used by the new solver")
965 }
966 ty::PredicateKind::AliasRelate(..) => {
967 bug!("AliasRelate is only used by the new solver")
968 }
969 ty::PredicateKind::Ambiguous => Ok(EvaluatedToAmbig),
970 ty::PredicateKind::Clause(ty::ClauseKind::ConstArgHasType(ct, ty)) => {
971 let ct = self.infcx.shallow_resolve_const(ct);
972 let ct_ty = match ct.kind() {
973 ty::ConstKind::Infer(_) => {
974 return Ok(EvaluatedToAmbig);
975 }
976 ty::ConstKind::Error(_) => return Ok(EvaluatedToOk),
977 ty::ConstKind::Value(cv) => cv.ty,
978 ty::ConstKind::Unevaluated(uv) => {
979 self.tcx().type_of(uv.def).instantiate(self.tcx(), uv.args)
980 }
981 // FIXME(generic_const_exprs): See comment in `fulfill.rs`
982 ty::ConstKind::Expr(_) => return Ok(EvaluatedToOk),
983 ty::ConstKind::Placeholder(_) => {
984 bug!("placeholder const {:?} in old solver", ct)
985 }
986 ty::ConstKind::Bound(_, _) => bug!("escaping bound vars in {:?}", ct),
987 ty::ConstKind::Param(param_ct) => {
988 param_ct.find_const_ty_from_env(obligation.param_env)
989 }
990 };
991
992 match self.infcx.at(&obligation.cause, obligation.param_env).eq(
993 // Only really exercised by generic_const_exprs
994 DefineOpaqueTypes::Yes,
995 ct_ty,
996 ty,
997 ) {
998 Ok(inf_ok) => self.evaluate_predicates_recursively(
999 previous_stack,
1000 inf_ok.into_obligations(),
1001 ),
1002 Err(_) => Ok(EvaluatedToErr),
1003 }
1004 }
1005 }
1006 })
1007 }
1008
1009 #[instrument(skip(self, previous_stack), level = "debug", ret)]
1010 fn evaluate_trait_predicate_recursively<'o>(
1011 &mut self,
1012 previous_stack: TraitObligationStackList<'o, 'tcx>,
1013 mut obligation: PolyTraitObligation<'tcx>,
1014 ) -> Result<EvaluationResult, OverflowError> {
1015 if !matches!(self.infcx.typing_mode(), TypingMode::Coherence)
1016 && obligation.is_global()
1017 && obligation.param_env.caller_bounds().iter().all(|bound| bound.has_param())
1018 {
1019 // If a param env has no global bounds, global obligations do not
1020 // depend on its particular value in order to work, so we can clear
1021 // out the param env and get better caching.
1022 debug!("in global");
1023 obligation.param_env = ty::ParamEnv::empty();
1024 }
1025
1026 let stack = self.push_stack(previous_stack, &obligation);
1027 let fresh_trait_pred = stack.fresh_trait_pred;
1028 let param_env = obligation.param_env;
1029
1030 debug!(?fresh_trait_pred);
1031
1032 // If a trait predicate is in the (local or global) evaluation cache,
1033 // then we know it holds without cycles.
1034 if let Some(result) = self.check_evaluation_cache(param_env, fresh_trait_pred) {
1035 debug!("CACHE HIT");
1036 return Ok(result);
1037 }
1038
1039 if let Some(result) = stack.cache().get_provisional(fresh_trait_pred) {
1040 debug!("PROVISIONAL CACHE HIT");
1041 stack.update_reached_depth(result.reached_depth);
1042 return Ok(result.result);
1043 }
1044
1045 // Check if this is a match for something already on the
1046 // stack. If so, we don't want to insert the result into the
1047 // main cache (it is cycle dependent) nor the provisional
1048 // cache (which is meant for things that have completed but
1049 // for a "backedge" -- this result *is* the backedge).
1050 if let Some(cycle_result) = self.check_evaluation_cycle(&stack) {
1051 return Ok(cycle_result);
1052 }
1053
1054 let (result, dep_node) = self.in_task(|this| {
1055 let mut result = this.evaluate_stack(&stack)?;
1056
1057 // fix issue #103563, we don't normalize
1058 // nested obligations which produced by `TraitDef` candidate
1059 // (i.e. using bounds on assoc items as assumptions).
1060 // because we don't have enough information to
1061 // normalize these obligations before evaluating.
1062 // so we will try to normalize the obligation and evaluate again.
1063 // we will replace it with new solver in the future.
1064 if EvaluationResult::EvaluatedToErr == result
1065 && fresh_trait_pred.has_aliases()
1066 && fresh_trait_pred.is_global()
1067 {
1068 let mut nested_obligations = PredicateObligations::new();
1069 let predicate = normalize_with_depth_to(
1070 this,
1071 param_env,
1072 obligation.cause.clone(),
1073 obligation.recursion_depth + 1,
1074 obligation.predicate,
1075 &mut nested_obligations,
1076 );
1077 if predicate != obligation.predicate {
1078 let mut nested_result = EvaluationResult::EvaluatedToOk;
1079 for obligation in nested_obligations {
1080 nested_result = cmp::max(
1081 this.evaluate_predicate_recursively(previous_stack, obligation)?,
1082 nested_result,
1083 );
1084 }
1085
1086 if nested_result.must_apply_modulo_regions() {
1087 let obligation = obligation.with(this.tcx(), predicate);
1088 result = cmp::max(
1089 nested_result,
1090 this.evaluate_trait_predicate_recursively(previous_stack, obligation)?,
1091 );
1092 }
1093 }
1094 }
1095
1096 Ok::<_, OverflowError>(result)
1097 });
1098
1099 let result = result?;
1100
1101 if !result.must_apply_modulo_regions() {
1102 stack.cache().on_failure(stack.dfn);
1103 }
1104
1105 let reached_depth = stack.reached_depth.get();
1106 if reached_depth >= stack.depth {
1107 debug!("CACHE MISS");
1108 self.insert_evaluation_cache(param_env, fresh_trait_pred, dep_node, result);
1109 stack.cache().on_completion(stack.dfn);
1110 } else {
1111 debug!("PROVISIONAL");
1112 debug!(
1113 "caching provisionally because {:?} \
1114 is a cycle participant (at depth {}, reached depth {})",
1115 fresh_trait_pred, stack.depth, reached_depth,
1116 );
1117
1118 stack.cache().insert_provisional(stack.dfn, reached_depth, fresh_trait_pred, result);
1119 }
1120
1121 Ok(result)
1122 }
1123
1124 /// If there is any previous entry on the stack that precisely
1125 /// matches this obligation, then we can assume that the
1126 /// obligation is satisfied for now (still all other conditions
1127 /// must be met of course). One obvious case this comes up is
1128 /// marker traits like `Send`. Think of a linked list:
1129 ///
1130 /// struct List<T> { data: T, next: Option<Box<List<T>>> }
1131 ///
1132 /// `Box<List<T>>` will be `Send` if `T` is `Send` and
1133 /// `Option<Box<List<T>>>` is `Send`, and in turn
1134 /// `Option<Box<List<T>>>` is `Send` if `Box<List<T>>` is
1135 /// `Send`.
1136 ///
1137 /// Note that we do this comparison using the `fresh_trait_ref`
1138 /// fields. Because these have all been freshened using
1139 /// `self.freshener`, we can be sure that (a) this will not
1140 /// affect the inferencer state and (b) that if we see two
1141 /// fresh regions with the same index, they refer to the same
1142 /// unbound type variable.
1143 fn check_evaluation_cycle(
1144 &mut self,
1145 stack: &TraitObligationStack<'_, 'tcx>,
1146 ) -> Option<EvaluationResult> {
1147 if let Some(cycle_depth) = stack
1148 .iter()
1149 .skip(1) // Skip top-most frame.
1150 .find(|prev| {
1151 stack.obligation.param_env == prev.obligation.param_env
1152 && stack.fresh_trait_pred == prev.fresh_trait_pred
1153 })
1154 .map(|stack| stack.depth)
1155 {
1156 debug!("evaluate_stack --> recursive at depth {}", cycle_depth);
1157
1158 // If we have a stack like `A B C D E A`, where the top of
1159 // the stack is the final `A`, then this will iterate over
1160 // `A, E, D, C, B` -- i.e., all the participants apart
1161 // from the cycle head. We mark them as participating in a
1162 // cycle. This suppresses caching for those nodes. See
1163 // `in_cycle` field for more details.
1164 stack.update_reached_depth(cycle_depth);
1165
1166 // Subtle: when checking for a coinductive cycle, we do
1167 // not compare using the "freshened trait refs" (which
1168 // have erased regions) but rather the fully explicit
1169 // trait refs. This is important because it's only a cycle
1170 // if the regions match exactly.
1171 let cycle = stack.iter().skip(1).take_while(|s| s.depth >= cycle_depth);
1172 let tcx = self.tcx();
1173 let cycle = cycle.map(|stack| stack.obligation.predicate.upcast(tcx));
1174 if self.coinductive_match(cycle) {
1175 debug!("evaluate_stack --> recursive, coinductive");
1176 Some(EvaluatedToOk)
1177 } else {
1178 debug!("evaluate_stack --> recursive, inductive");
1179 Some(EvaluatedToAmbigStackDependent)
1180 }
1181 } else {
1182 None
1183 }
1184 }
1185
1186 fn evaluate_stack<'o>(
1187 &mut self,
1188 stack: &TraitObligationStack<'o, 'tcx>,
1189 ) -> Result<EvaluationResult, OverflowError> {
1190 debug_assert!(!self.infcx.next_trait_solver());
1191 // In intercrate mode, whenever any of the generics are unbound,
1192 // there can always be an impl. Even if there are no impls in
1193 // this crate, perhaps the type would be unified with
1194 // something from another crate that does provide an impl.
1195 //
1196 // In intra mode, we must still be conservative. The reason is
1197 // that we want to avoid cycles. Imagine an impl like:
1198 //
1199 // impl<T:Eq> Eq for Vec<T>
1200 //
1201 // and a trait reference like `$0 : Eq` where `$0` is an
1202 // unbound variable. When we evaluate this trait-reference, we
1203 // will unify `$0` with `Vec<$1>` (for some fresh variable
1204 // `$1`), on the condition that `$1 : Eq`. We will then wind
1205 // up with many candidates (since that are other `Eq` impls
1206 // that apply) and try to winnow things down. This results in
1207 // a recursive evaluation that `$1 : Eq` -- as you can
1208 // imagine, this is just where we started. To avoid that, we
1209 // check for unbound variables and return an ambiguous (hence possible)
1210 // match if we've seen this trait before.
1211 //
1212 // This suffices to allow chains like `FnMut` implemented in
1213 // terms of `Fn` etc, but we could probably make this more
1214 // precise still.
1215 let unbound_input_types =
1216 stack.fresh_trait_pred.skip_binder().trait_ref.args.types().any(|ty| ty.is_fresh());
1217
1218 if unbound_input_types
1219 && stack.iter().skip(1).any(|prev| {
1220 stack.obligation.param_env == prev.obligation.param_env
1221 && self.match_fresh_trait_refs(stack.fresh_trait_pred, prev.fresh_trait_pred)
1222 })
1223 {
1224 debug!("evaluate_stack --> unbound argument, recursive --> giving up",);
1225 return Ok(EvaluatedToAmbigStackDependent);
1226 }
1227
1228 match self.candidate_from_obligation(stack) {
1229 Ok(Some(c)) => self.evaluate_candidate(stack, &c),
1230 Ok(None) => Ok(EvaluatedToAmbig),
1231 Err(SelectionError::Overflow(OverflowError::Canonical)) => {
1232 Err(OverflowError::Canonical)
1233 }
1234 Err(..) => Ok(EvaluatedToErr),
1235 }
1236 }
1237
1238 /// For defaulted traits, we use a co-inductive strategy to solve, so
1239 /// that recursion is ok. This routine returns `true` if the top of the
1240 /// stack (`cycle[0]`):
1241 ///
1242 /// - is a coinductive trait: an auto-trait or `Sized`,
1243 /// - it also appears in the backtrace at some position `X`,
1244 /// - all the predicates at positions `X..` between `X` and the top are
1245 /// also coinductive traits.
1246 pub(crate) fn coinductive_match<I>(&mut self, mut cycle: I) -> bool
1247 where
1248 I: Iterator<Item = ty::Predicate<'tcx>>,
1249 {
1250 cycle.all(|p| match p.kind().skip_binder() {
1251 ty::PredicateKind::Clause(ty::ClauseKind::Trait(data)) => {
1252 self.infcx.tcx.trait_is_coinductive(data.def_id())
1253 }
1254 ty::PredicateKind::Clause(ty::ClauseKind::WellFormed(_)) => {
1255 // FIXME(generic_const_exprs): GCE needs well-formedness predicates to be
1256 // coinductive, but GCE is on the way out anyways, so this should eventually
1257 // be replaced with `false`.
1258 self.infcx.tcx.features().generic_const_exprs()
1259 }
1260 _ => false,
1261 })
1262 }
1263
1264 /// Further evaluates `candidate` to decide whether all type parameters match and whether nested
1265 /// obligations are met. Returns whether `candidate` remains viable after this further
1266 /// scrutiny.
1267 #[instrument(
1268 level = "debug",
1269 skip(self, stack),
1270 fields(depth = stack.obligation.recursion_depth),
1271 ret
1272 )]
1273 fn evaluate_candidate<'o>(
1274 &mut self,
1275 stack: &TraitObligationStack<'o, 'tcx>,
1276 candidate: &SelectionCandidate<'tcx>,
1277 ) -> Result<EvaluationResult, OverflowError> {
1278 let mut result = self.evaluation_probe(|this| {
1279 match this.confirm_candidate(stack.obligation, candidate.clone()) {
1280 Ok(selection) => {
1281 debug!(?selection);
1282 this.evaluate_predicates_recursively(
1283 stack.list(),
1284 selection.nested_obligations().into_iter(),
1285 )
1286 }
1287 Err(..) => Ok(EvaluatedToErr),
1288 }
1289 })?;
1290
1291 // If we erased any lifetimes, then we want to use
1292 // `EvaluatedToOkModuloRegions` instead of `EvaluatedToOk`
1293 // as your final result. The result will be cached using
1294 // the freshened trait predicate as a key, so we need
1295 // our result to be correct by *any* choice of original lifetimes,
1296 // not just the lifetime choice for this particular (non-erased)
1297 // predicate.
1298 // See issue #80691
1299 if stack.fresh_trait_pred.has_erased_regions() {
1300 result = result.max(EvaluatedToOkModuloRegions);
1301 }
1302
1303 Ok(result)
1304 }
1305
1306 fn check_evaluation_cache(
1307 &self,
1308 param_env: ty::ParamEnv<'tcx>,
1309 trait_pred: ty::PolyTraitPredicate<'tcx>,
1310 ) -> Option<EvaluationResult> {
1311 let infcx = self.infcx;
1312 let tcx = infcx.tcx;
1313 if self.can_use_global_caches(param_env, trait_pred) {
1314 let key = (infcx.typing_env(param_env), trait_pred);
1315 if let Some(res) = tcx.evaluation_cache.get(&key, tcx) {
1316 Some(res)
1317 } else {
1318 debug_assert_eq!(infcx.evaluation_cache.get(&(param_env, trait_pred), tcx), None);
1319 None
1320 }
1321 } else {
1322 self.infcx.evaluation_cache.get(&(param_env, trait_pred), tcx)
1323 }
1324 }
1325
1326 fn insert_evaluation_cache(
1327 &mut self,
1328 param_env: ty::ParamEnv<'tcx>,
1329 trait_pred: ty::PolyTraitPredicate<'tcx>,
1330 dep_node: DepNodeIndex,
1331 result: EvaluationResult,
1332 ) {
1333 // Avoid caching results that depend on more than just the trait-ref
1334 // - the stack can create recursion.
1335 if result.is_stack_dependent() {
1336 return;
1337 }
1338
1339 let infcx = self.infcx;
1340 let tcx = infcx.tcx;
1341 if self.can_use_global_caches(param_env, trait_pred) {
1342 debug!(?trait_pred, ?result, "insert_evaluation_cache global");
1343 // This may overwrite the cache with the same value
1344 tcx.evaluation_cache.insert(
1345 (infcx.typing_env(param_env), trait_pred),
1346 dep_node,
1347 result,
1348 );
1349 return;
1350 } else {
1351 debug!(?trait_pred, ?result, "insert_evaluation_cache local");
1352 self.infcx.evaluation_cache.insert((param_env, trait_pred), dep_node, result);
1353 }
1354 }
1355
1356 fn check_recursion_depth<T>(
1357 &self,
1358 depth: usize,
1359 error_obligation: &Obligation<'tcx, T>,
1360 ) -> Result<(), OverflowError>
1361 where
1362 T: Upcast<TyCtxt<'tcx>, ty::Predicate<'tcx>> + Clone,
1363 {
1364 if !self.infcx.tcx.recursion_limit().value_within_limit(depth) {
1365 match self.query_mode {
1366 TraitQueryMode::Standard => {
1367 if let Some(e) = self.infcx.tainted_by_errors() {
1368 return Err(OverflowError::Error(e));
1369 }
1370 self.infcx.err_ctxt().report_overflow_obligation(error_obligation, true);
1371 }
1372 TraitQueryMode::Canonical => {
1373 return Err(OverflowError::Canonical);
1374 }
1375 }
1376 }
1377 Ok(())
1378 }
1379
1380 /// Checks that the recursion limit has not been exceeded.
1381 ///
1382 /// The weird return type of this function allows it to be used with the `try` (`?`)
1383 /// operator within certain functions.
1384 #[inline(always)]
1385 fn check_recursion_limit<T: Display + TypeFoldable<TyCtxt<'tcx>>, V>(
1386 &self,
1387 obligation: &Obligation<'tcx, T>,
1388 error_obligation: &Obligation<'tcx, V>,
1389 ) -> Result<(), OverflowError>
1390 where
1391 V: Upcast<TyCtxt<'tcx>, ty::Predicate<'tcx>> + Clone,
1392 {
1393 self.check_recursion_depth(obligation.recursion_depth, error_obligation)
1394 }
1395
1396 fn in_task<OP, R>(&mut self, op: OP) -> (R, DepNodeIndex)
1397 where
1398 OP: FnOnce(&mut Self) -> R,
1399 {
1400 self.tcx().dep_graph.with_anon_task(self.tcx(), dep_kinds::TraitSelect, || op(self))
1401 }
1402
1403 /// filter_impls filters candidates that have a positive impl for a negative
1404 /// goal and a negative impl for a positive goal
1405 #[instrument(level = "debug", skip(self, candidates))]
1406 fn filter_impls(
1407 &mut self,
1408 candidates: Vec<SelectionCandidate<'tcx>>,
1409 obligation: &PolyTraitObligation<'tcx>,
1410 ) -> Vec<SelectionCandidate<'tcx>> {
1411 trace!("{candidates:#?}");
1412 let tcx = self.tcx();
1413 let mut result = Vec::with_capacity(candidates.len());
1414
1415 for candidate in candidates {
1416 if let ImplCandidate(def_id) = candidate {
1417 match (tcx.impl_polarity(def_id), obligation.polarity()) {
1418 (ty::ImplPolarity::Reservation, _)
1419 | (ty::ImplPolarity::Positive, ty::PredicatePolarity::Positive)
1420 | (ty::ImplPolarity::Negative, ty::PredicatePolarity::Negative) => {
1421 result.push(candidate);
1422 }
1423 _ => {}
1424 }
1425 } else {
1426 result.push(candidate);
1427 }
1428 }
1429
1430 trace!("{result:#?}");
1431 result
1432 }
1433
1434 /// filter_reservation_impls filter reservation impl for any goal as ambiguous
1435 #[instrument(level = "debug", skip(self))]
1436 fn filter_reservation_impls(
1437 &mut self,
1438 candidate: SelectionCandidate<'tcx>,
1439 ) -> SelectionResult<'tcx, SelectionCandidate<'tcx>> {
1440 let tcx = self.tcx();
1441 // Treat reservation impls as ambiguity.
1442 if let ImplCandidate(def_id) = candidate {
1443 if let ty::ImplPolarity::Reservation = tcx.impl_polarity(def_id) {
1444 if let Some(intercrate_ambiguity_clauses) = &mut self.intercrate_ambiguity_causes {
1445 let message = tcx
1446 .get_attr(def_id, sym::rustc_reservation_impl)
1447 .and_then(|a| a.value_str());
1448 if let Some(message) = message {
1449 debug!(
1450 "filter_reservation_impls: \
1451 reservation impl ambiguity on {:?}",
1452 def_id
1453 );
1454 intercrate_ambiguity_clauses
1455 .insert(IntercrateAmbiguityCause::ReservationImpl { message });
1456 }
1457 }
1458 return Ok(None);
1459 }
1460 }
1461 Ok(Some(candidate))
1462 }
1463
1464 fn is_knowable<'o>(&mut self, stack: &TraitObligationStack<'o, 'tcx>) -> Result<(), Conflict> {
1465 let obligation = &stack.obligation;
1466 match self.infcx.typing_mode() {
1467 TypingMode::Coherence => {}
1468 TypingMode::Analysis { .. }
1469 | TypingMode::Borrowck { .. }
1470 | TypingMode::PostBorrowckAnalysis { .. }
1471 | TypingMode::PostAnalysis => return Ok(()),
1472 }
1473
1474 debug!("is_knowable()");
1475
1476 let predicate = self.infcx.resolve_vars_if_possible(obligation.predicate);
1477
1478 // Okay to skip binder because of the nature of the
1479 // trait-ref-is-knowable check, which does not care about
1480 // bound regions.
1481 let trait_ref = predicate.skip_binder().trait_ref;
1482
1483 coherence::trait_ref_is_knowable(self.infcx, trait_ref, |ty| Ok::<_, !>(ty)).into_ok()
1484 }
1485
1486 /// Returns `true` if the global caches can be used.
1487 fn can_use_global_caches(
1488 &self,
1489 param_env: ty::ParamEnv<'tcx>,
1490 pred: ty::PolyTraitPredicate<'tcx>,
1491 ) -> bool {
1492 // If there are any inference variables in the `ParamEnv`, then we
1493 // always use a cache local to this particular scope. Otherwise, we
1494 // switch to a global cache.
1495 if param_env.has_infer() || pred.has_infer() {
1496 return false;
1497 }
1498
1499 match self.infcx.typing_mode() {
1500 // Avoid using the global cache during coherence and just rely
1501 // on the local cache. It is really just a simplification to
1502 // avoid us having to fear that coherence results "pollute"
1503 // the master cache. Since coherence executes pretty quickly,
1504 // it's not worth going to more trouble to increase the
1505 // hit-rate, I don't think.
1506 TypingMode::Coherence => false,
1507 // Avoid using the global cache when we're defining opaque types
1508 // as their hidden type may impact the result of candidate selection.
1509 //
1510 // HACK: This is still theoretically unsound. Goals can indirectly rely
1511 // on opaques in the defining scope, and it's easier to do so with TAIT.
1512 // However, if we disqualify *all* goals from being cached, perf suffers.
1513 // This is likely fixed by better caching in general in the new solver.
1514 // See: <https://github.com/rust-lang/rust/issues/132064>.
1515 TypingMode::Analysis {
1516 defining_opaque_types_and_generators: defining_opaque_types,
1517 }
1518 | TypingMode::Borrowck { defining_opaque_types } => {
1519 defining_opaque_types.is_empty() || !pred.has_opaque_types()
1520 }
1521 // The hidden types of `defined_opaque_types` is not local to the current
1522 // inference context, so we can freely move this to the global cache.
1523 TypingMode::PostBorrowckAnalysis { .. } => true,
1524 // The global cache is only used if there are no opaque types in
1525 // the defining scope or we're outside of analysis.
1526 //
1527 // FIXME(#132279): This is still incorrect as we treat opaque types
1528 // and default associated items differently between these two modes.
1529 TypingMode::PostAnalysis => true,
1530 }
1531 }
1532
1533 fn check_candidate_cache(
1534 &mut self,
1535 param_env: ty::ParamEnv<'tcx>,
1536 cache_fresh_trait_pred: ty::PolyTraitPredicate<'tcx>,
1537 ) -> Option<SelectionResult<'tcx, SelectionCandidate<'tcx>>> {
1538 let infcx = self.infcx;
1539 let tcx = infcx.tcx;
1540 let pred = cache_fresh_trait_pred.skip_binder();
1541
1542 if self.can_use_global_caches(param_env, cache_fresh_trait_pred) {
1543 if let Some(res) = tcx.selection_cache.get(&(infcx.typing_env(param_env), pred), tcx) {
1544 return Some(res);
1545 } else if cfg!(debug_assertions) {
1546 match infcx.selection_cache.get(&(param_env, pred), tcx) {
1547 None | Some(Err(SelectionError::Overflow(OverflowError::Canonical))) => {}
1548 res => bug!("unexpected local cache result: {res:?}"),
1549 }
1550 }
1551 }
1552
1553 // Subtle: we need to check the local cache even if we're able to use the
1554 // global cache as we don't cache overflow in the global cache but need to
1555 // cache it as otherwise rustdoc hangs when compiling diesel.
1556 infcx.selection_cache.get(&(param_env, pred), tcx)
1557 }
1558
1559 /// Determines whether can we safely cache the result
1560 /// of selecting an obligation. This is almost always `true`,
1561 /// except when dealing with certain `ParamCandidate`s.
1562 ///
1563 /// Ordinarily, a `ParamCandidate` will contain no inference variables,
1564 /// since it was usually produced directly from a `DefId`. However,
1565 /// certain cases (currently only librustdoc's blanket impl finder),
1566 /// a `ParamEnv` may be explicitly constructed with inference types.
1567 /// When this is the case, we do *not* want to cache the resulting selection
1568 /// candidate. This is due to the fact that it might not always be possible
1569 /// to equate the obligation's trait ref and the candidate's trait ref,
1570 /// if more constraints end up getting added to an inference variable.
1571 ///
1572 /// Because of this, we always want to re-run the full selection
1573 /// process for our obligation the next time we see it, since
1574 /// we might end up picking a different `SelectionCandidate` (or none at all).
1575 fn can_cache_candidate(
1576 &self,
1577 result: &SelectionResult<'tcx, SelectionCandidate<'tcx>>,
1578 ) -> bool {
1579 match result {
1580 Ok(Some(SelectionCandidate::ParamCandidate(trait_ref))) => !trait_ref.has_infer(),
1581 _ => true,
1582 }
1583 }
1584
1585 #[instrument(skip(self, param_env, cache_fresh_trait_pred, dep_node), level = "debug")]
1586 fn insert_candidate_cache(
1587 &mut self,
1588 param_env: ty::ParamEnv<'tcx>,
1589 cache_fresh_trait_pred: ty::PolyTraitPredicate<'tcx>,
1590 dep_node: DepNodeIndex,
1591 candidate: SelectionResult<'tcx, SelectionCandidate<'tcx>>,
1592 ) {
1593 let infcx = self.infcx;
1594 let tcx = infcx.tcx;
1595 let pred = cache_fresh_trait_pred.skip_binder();
1596
1597 if !self.can_cache_candidate(&candidate) {
1598 debug!(?pred, ?candidate, "insert_candidate_cache - candidate is not cacheable");
1599 return;
1600 }
1601
1602 if self.can_use_global_caches(param_env, cache_fresh_trait_pred) {
1603 if let Err(SelectionError::Overflow(OverflowError::Canonical)) = candidate {
1604 // Don't cache overflow globally; we only produce this in certain modes.
1605 } else {
1606 debug!(?pred, ?candidate, "insert_candidate_cache global");
1607 debug_assert!(!candidate.has_infer());
1608
1609 // This may overwrite the cache with the same value.
1610 tcx.selection_cache.insert(
1611 (infcx.typing_env(param_env), pred),
1612 dep_node,
1613 candidate,
1614 );
1615 return;
1616 }
1617 }
1618
1619 debug!(?pred, ?candidate, "insert_candidate_cache local");
1620 self.infcx.selection_cache.insert((param_env, pred), dep_node, candidate);
1621 }
1622
1623 /// Looks at the item bounds of the projection or opaque type.
1624 /// If this is a nested rigid projection, such as
1625 /// `<<T as Tr1>::Assoc as Tr2>::Assoc`, consider the item bounds
1626 /// on both `Tr1::Assoc` and `Tr2::Assoc`, since we may encounter
1627 /// relative bounds on both via the `associated_type_bounds` feature.
1628 pub(super) fn for_each_item_bound<T>(
1629 &mut self,
1630 mut self_ty: Ty<'tcx>,
1631 mut for_each: impl FnMut(&mut Self, ty::Clause<'tcx>, usize) -> ControlFlow<T, ()>,
1632 on_ambiguity: impl FnOnce(),
1633 ) -> ControlFlow<T, ()> {
1634 let mut idx = 0;
1635 let mut in_parent_alias_type = false;
1636
1637 loop {
1638 let (kind, alias_ty) = match *self_ty.kind() {
1639 ty::Alias(kind @ (ty::Projection | ty::Opaque), alias_ty) => (kind, alias_ty),
1640 ty::Infer(ty::TyVar(_)) => {
1641 on_ambiguity();
1642 return ControlFlow::Continue(());
1643 }
1644 _ => return ControlFlow::Continue(()),
1645 };
1646
1647 // HACK: On subsequent recursions, we only care about bounds that don't
1648 // share the same type as `self_ty`. This is because for truly rigid
1649 // projections, we will never be able to equate, e.g. `<T as Tr>::A`
1650 // with `<<T as Tr>::A as Tr>::A`.
1651 let relevant_bounds = if in_parent_alias_type {
1652 self.tcx().item_non_self_bounds(alias_ty.def_id)
1653 } else {
1654 self.tcx().item_self_bounds(alias_ty.def_id)
1655 };
1656
1657 for bound in relevant_bounds.instantiate(self.tcx(), alias_ty.args) {
1658 for_each(self, bound, idx)?;
1659 idx += 1;
1660 }
1661
1662 if kind == ty::Projection {
1663 self_ty = alias_ty.self_ty();
1664 } else {
1665 return ControlFlow::Continue(());
1666 }
1667
1668 in_parent_alias_type = true;
1669 }
1670 }
1671
1672 /// Equates the trait in `obligation` with trait bound. If the two traits
1673 /// can be equated and the normalized trait bound doesn't contain inference
1674 /// variables or placeholders, the normalized bound is returned.
1675 fn match_normalize_trait_ref(
1676 &mut self,
1677 obligation: &PolyTraitObligation<'tcx>,
1678 placeholder_trait_ref: ty::TraitRef<'tcx>,
1679 trait_bound: ty::PolyTraitRef<'tcx>,
1680 ) -> Result<Option<ty::TraitRef<'tcx>>, ()> {
1681 debug_assert!(!placeholder_trait_ref.has_escaping_bound_vars());
1682 if placeholder_trait_ref.def_id != trait_bound.def_id() {
1683 // Avoid unnecessary normalization
1684 return Err(());
1685 }
1686
1687 let drcx = DeepRejectCtxt::relate_rigid_rigid(self.infcx.tcx);
1688 let obligation_args = obligation.predicate.skip_binder().trait_ref.args;
1689 if !drcx.args_may_unify(obligation_args, trait_bound.skip_binder().args) {
1690 return Err(());
1691 }
1692
1693 let trait_bound = self.infcx.instantiate_binder_with_fresh_vars(
1694 obligation.cause.span,
1695 HigherRankedType,
1696 trait_bound,
1697 );
1698 let Normalized { value: trait_bound, obligations: _ } = ensure_sufficient_stack(|| {
1699 normalize_with_depth(
1700 self,
1701 obligation.param_env,
1702 obligation.cause.clone(),
1703 obligation.recursion_depth + 1,
1704 trait_bound,
1705 )
1706 });
1707 self.infcx
1708 .at(&obligation.cause, obligation.param_env)
1709 .eq(DefineOpaqueTypes::No, placeholder_trait_ref, trait_bound)
1710 .map(|InferOk { obligations: _, value: () }| {
1711 // This method is called within a probe, so we can't have
1712 // inference variables and placeholders escape.
1713 if !trait_bound.has_infer() && !trait_bound.has_placeholders() {
1714 Some(trait_bound)
1715 } else {
1716 None
1717 }
1718 })
1719 .map_err(|_| ())
1720 }
1721
1722 fn where_clause_may_apply<'o>(
1723 &mut self,
1724 stack: &TraitObligationStack<'o, 'tcx>,
1725 where_clause_trait_ref: ty::PolyTraitRef<'tcx>,
1726 ) -> Result<EvaluationResult, OverflowError> {
1727 self.evaluation_probe(|this| {
1728 match this.match_where_clause_trait_ref(stack.obligation, where_clause_trait_ref) {
1729 Ok(obligations) => this.evaluate_predicates_recursively(stack.list(), obligations),
1730 Err(()) => Ok(EvaluatedToErr),
1731 }
1732 })
1733 }
1734
1735 /// Return `Yes` if the obligation's predicate type applies to the env_predicate, and
1736 /// `No` if it does not. Return `Ambiguous` in the case that the projection type is a GAT,
1737 /// and applying this env_predicate constrains any of the obligation's GAT parameters.
1738 ///
1739 /// This behavior is a somewhat of a hack to prevent over-constraining inference variables
1740 /// in cases like #91762.
1741 pub(super) fn match_projection_projections(
1742 &mut self,
1743 obligation: &ProjectionTermObligation<'tcx>,
1744 env_predicate: PolyProjectionPredicate<'tcx>,
1745 potentially_unnormalized_candidates: bool,
1746 ) -> ProjectionMatchesProjection {
1747 debug_assert_eq!(obligation.predicate.def_id, env_predicate.item_def_id());
1748
1749 let mut nested_obligations = PredicateObligations::new();
1750 let infer_predicate = self.infcx.instantiate_binder_with_fresh_vars(
1751 obligation.cause.span,
1752 BoundRegionConversionTime::HigherRankedType,
1753 env_predicate,
1754 );
1755 let infer_projection = if potentially_unnormalized_candidates {
1756 ensure_sufficient_stack(|| {
1757 normalize_with_depth_to(
1758 self,
1759 obligation.param_env,
1760 obligation.cause.clone(),
1761 obligation.recursion_depth + 1,
1762 infer_predicate.projection_term,
1763 &mut nested_obligations,
1764 )
1765 })
1766 } else {
1767 infer_predicate.projection_term
1768 };
1769
1770 let is_match = self
1771 .infcx
1772 .at(&obligation.cause, obligation.param_env)
1773 .eq(DefineOpaqueTypes::No, obligation.predicate, infer_projection)
1774 .is_ok_and(|InferOk { obligations, value: () }| {
1775 self.evaluate_predicates_recursively(
1776 TraitObligationStackList::empty(&ProvisionalEvaluationCache::default()),
1777 nested_obligations.into_iter().chain(obligations),
1778 )
1779 .is_ok_and(|res| res.may_apply())
1780 });
1781
1782 if is_match {
1783 let generics = self.tcx().generics_of(obligation.predicate.def_id);
1784 // FIXME(generic_associated_types): Addresses aggressive inference in #92917.
1785 // If this type is a GAT, and of the GAT args resolve to something new,
1786 // that means that we must have newly inferred something about the GAT.
1787 // We should give up in that case.
1788 //
1789 // This only detects one layer of inference, which is probably not what we actually
1790 // want, but fixing it causes some ambiguity:
1791 // <https://github.com/rust-lang/rust/issues/125196>.
1792 if !generics.is_own_empty()
1793 && obligation.predicate.args[generics.parent_count..].iter().any(|&p| {
1794 p.has_non_region_infer()
1795 && match p.kind() {
1796 ty::GenericArgKind::Const(ct) => {
1797 self.infcx.shallow_resolve_const(ct) != ct
1798 }
1799 ty::GenericArgKind::Type(ty) => self.infcx.shallow_resolve(ty) != ty,
1800 ty::GenericArgKind::Lifetime(_) => false,
1801 }
1802 })
1803 {
1804 ProjectionMatchesProjection::Ambiguous
1805 } else {
1806 ProjectionMatchesProjection::Yes
1807 }
1808 } else {
1809 ProjectionMatchesProjection::No
1810 }
1811 }
1812}
1813
1814/// ## Winnowing
1815///
1816/// Winnowing is the process of attempting to resolve ambiguity by
1817/// probing further. During the winnowing process, we unify all
1818/// type variables and then we also attempt to evaluate recursive
1819/// bounds to see if they are satisfied.
1820impl<'tcx> SelectionContext<'_, 'tcx> {
1821 /// If there are multiple ways to prove a trait goal, we make some
1822 /// *fairly arbitrary* choices about which candidate is actually used.
1823 ///
1824 /// For more details, look at the implementation of this method :)
1825 #[instrument(level = "debug", skip(self), ret)]
1826 fn winnow_candidates(
1827 &mut self,
1828 has_non_region_infer: bool,
1829 mut candidates: Vec<EvaluatedCandidate<'tcx>>,
1830 ) -> Option<SelectionCandidate<'tcx>> {
1831 if candidates.len() == 1 {
1832 return Some(candidates.pop().unwrap().candidate);
1833 }
1834
1835 // We prefer `Sized` candidates over everything.
1836 let mut sized_candidates =
1837 candidates.iter().filter(|c| matches!(c.candidate, SizedCandidate { has_nested: _ }));
1838 if let Some(sized_candidate) = sized_candidates.next() {
1839 // There should only ever be a single sized candidate
1840 // as they would otherwise overlap.
1841 debug_assert_eq!(sized_candidates.next(), None);
1842 // Only prefer the built-in `Sized` candidate if its nested goals are certain.
1843 // Otherwise, we may encounter failure later on if inference causes this candidate
1844 // to not hold, but a where clause would've applied instead.
1845 if sized_candidate.evaluation.must_apply_modulo_regions() {
1846 return Some(sized_candidate.candidate.clone());
1847 } else {
1848 return None;
1849 }
1850 }
1851
1852 // Before we consider where-bounds, we have to deduplicate them here and also
1853 // drop where-bounds in case the same where-bound exists without bound vars.
1854 // This is necessary as elaborating super-trait bounds may result in duplicates.
1855 'search_victim: loop {
1856 for (i, this) in candidates.iter().enumerate() {
1857 let ParamCandidate(this) = this.candidate else { continue };
1858 for (j, other) in candidates.iter().enumerate() {
1859 if i == j {
1860 continue;
1861 }
1862
1863 let ParamCandidate(other) = other.candidate else { continue };
1864 if this == other {
1865 candidates.remove(j);
1866 continue 'search_victim;
1867 }
1868
1869 if this.skip_binder().trait_ref == other.skip_binder().trait_ref
1870 && this.skip_binder().polarity == other.skip_binder().polarity
1871 && !this.skip_binder().trait_ref.has_escaping_bound_vars()
1872 {
1873 candidates.remove(j);
1874 continue 'search_victim;
1875 }
1876 }
1877 }
1878
1879 break;
1880 }
1881
1882 // The next highest priority is for non-global where-bounds. However, while we don't
1883 // prefer global where-clauses here, we do bail with ambiguity when encountering both
1884 // a global and a non-global where-clause.
1885 //
1886 // Our handling of where-bounds is generally fairly messy but necessary for backwards
1887 // compatibility, see #50825 for why we need to handle global where-bounds like this.
1888 let is_global = |c: ty::PolyTraitPredicate<'tcx>| c.is_global() && !c.has_bound_vars();
1889 let param_candidates = candidates
1890 .iter()
1891 .filter_map(|c| if let ParamCandidate(p) = c.candidate { Some(p) } else { None });
1892 let mut has_global_bounds = false;
1893 let mut param_candidate = None;
1894 for c in param_candidates {
1895 if is_global(c) {
1896 has_global_bounds = true;
1897 } else if param_candidate.replace(c).is_some() {
1898 // Ambiguity, two potentially different where-clauses
1899 return None;
1900 }
1901 }
1902 if let Some(predicate) = param_candidate {
1903 // Ambiguity, a global and a non-global where-bound.
1904 if has_global_bounds {
1905 return None;
1906 } else {
1907 return Some(ParamCandidate(predicate));
1908 }
1909 }
1910
1911 // Prefer alias-bounds over blanket impls for rigid associated types. This is
1912 // fairly arbitrary but once again necessary for backwards compatibility.
1913 // If there are multiple applicable candidates which don't affect type inference,
1914 // choose the one with the lowest index.
1915 let alias_bound = candidates
1916 .iter()
1917 .filter_map(|c| if let ProjectionCandidate(i) = c.candidate { Some(i) } else { None })
1918 .try_reduce(|c1, c2| if has_non_region_infer { None } else { Some(c1.min(c2)) });
1919 match alias_bound {
1920 Some(Some(index)) => return Some(ProjectionCandidate(index)),
1921 Some(None) => {}
1922 None => return None,
1923 }
1924
1925 // Need to prioritize builtin trait object impls as `<dyn Any as Any>::type_id`
1926 // should use the vtable method and not the method provided by the user-defined
1927 // impl `impl<T: ?Sized> Any for T { .. }`. This really shouldn't exist but is
1928 // necessary due to #57893. We again arbitrarily prefer the applicable candidate
1929 // with the lowest index.
1930 //
1931 // We do not want to use these impls to guide inference in case a user-written impl
1932 // may also apply.
1933 let object_bound = candidates
1934 .iter()
1935 .filter_map(|c| if let ObjectCandidate(i) = c.candidate { Some(i) } else { None })
1936 .try_reduce(|c1, c2| if has_non_region_infer { None } else { Some(c1.min(c2)) });
1937 match object_bound {
1938 Some(Some(index)) => {
1939 return if has_non_region_infer
1940 && candidates.iter().any(|c| matches!(c.candidate, ImplCandidate(_)))
1941 {
1942 None
1943 } else {
1944 Some(ObjectCandidate(index))
1945 };
1946 }
1947 Some(None) => {}
1948 None => return None,
1949 }
1950 // Same for upcasting.
1951 let upcast_bound = candidates
1952 .iter()
1953 .filter_map(|c| {
1954 if let TraitUpcastingUnsizeCandidate(i) = c.candidate { Some(i) } else { None }
1955 })
1956 .try_reduce(|c1, c2| if has_non_region_infer { None } else { Some(c1.min(c2)) });
1957 match upcast_bound {
1958 Some(Some(index)) => return Some(TraitUpcastingUnsizeCandidate(index)),
1959 Some(None) => {}
1960 None => return None,
1961 }
1962
1963 // Finally, handle overlapping user-written impls.
1964 let impls = candidates.iter().filter_map(|c| {
1965 if let ImplCandidate(def_id) = c.candidate {
1966 Some((def_id, c.evaluation))
1967 } else {
1968 None
1969 }
1970 });
1971 let mut impl_candidate = None;
1972 for c in impls {
1973 if let Some(prev) = impl_candidate.replace(c) {
1974 if self.prefer_lhs_over_victim(has_non_region_infer, c, prev.0) {
1975 // Ok, prefer `c` over the previous entry
1976 } else if self.prefer_lhs_over_victim(has_non_region_infer, prev, c.0) {
1977 // Ok, keep `prev` instead of the new entry
1978 impl_candidate = Some(prev);
1979 } else {
1980 // Ambiguity, two potentially different where-clauses
1981 return None;
1982 }
1983 }
1984 }
1985 if let Some((def_id, _evaluation)) = impl_candidate {
1986 // Don't use impl candidates which overlap with other candidates.
1987 // This should pretty much only ever happen with malformed impls.
1988 if candidates.iter().all(|c| match c.candidate {
1989 SizedCandidate { has_nested: _ }
1990 | BuiltinCandidate { has_nested: _ }
1991 | TransmutabilityCandidate
1992 | AutoImplCandidate
1993 | ClosureCandidate { .. }
1994 | AsyncClosureCandidate
1995 | AsyncFnKindHelperCandidate
1996 | CoroutineCandidate
1997 | FutureCandidate
1998 | IteratorCandidate
1999 | AsyncIteratorCandidate
2000 | FnPointerCandidate
2001 | TraitAliasCandidate
2002 | TraitUpcastingUnsizeCandidate(_)
2003 | BuiltinObjectCandidate
2004 | BuiltinUnsizeCandidate
2005 | BikeshedGuaranteedNoDropCandidate => false,
2006 // Non-global param candidates have already been handled, global
2007 // where-bounds get ignored.
2008 ParamCandidate(_) | ImplCandidate(_) => true,
2009 ProjectionCandidate(_) | ObjectCandidate(_) => unreachable!(),
2010 }) {
2011 return Some(ImplCandidate(def_id));
2012 } else {
2013 return None;
2014 }
2015 }
2016
2017 if candidates.len() == 1 {
2018 Some(candidates.pop().unwrap().candidate)
2019 } else {
2020 // Also try ignoring all global where-bounds and check whether we end
2021 // with a unique candidate in this case.
2022 let mut not_a_global_where_bound = candidates
2023 .into_iter()
2024 .filter(|c| !matches!(c.candidate, ParamCandidate(p) if is_global(p)));
2025 not_a_global_where_bound
2026 .next()
2027 .map(|c| c.candidate)
2028 .filter(|_| not_a_global_where_bound.next().is_none())
2029 }
2030 }
2031
2032 fn prefer_lhs_over_victim(
2033 &self,
2034 has_non_region_infer: bool,
2035 (lhs, lhs_evaluation): (DefId, EvaluationResult),
2036 victim: DefId,
2037 ) -> bool {
2038 let tcx = self.tcx();
2039 // See if we can toss out `victim` based on specialization.
2040 //
2041 // While this requires us to know *for sure* that the `lhs` impl applies
2042 // we still use modulo regions here. This is fine as specialization currently
2043 // assumes that specializing impls have to be always applicable, meaning that
2044 // the only allowed region constraints may be constraints also present on the default impl.
2045 if lhs_evaluation.must_apply_modulo_regions() {
2046 if tcx.specializes((lhs, victim)) {
2047 return true;
2048 }
2049 }
2050
2051 match tcx.impls_are_allowed_to_overlap(lhs, victim) {
2052 // For candidates which already reference errors it doesn't really
2053 // matter what we do 🤷
2054 Some(ty::ImplOverlapKind::Permitted { marker: false }) => {
2055 lhs_evaluation.must_apply_considering_regions()
2056 }
2057 Some(ty::ImplOverlapKind::Permitted { marker: true }) => {
2058 // Subtle: If the predicate we are evaluating has inference
2059 // variables, do *not* allow discarding candidates due to
2060 // marker trait impls.
2061 //
2062 // Without this restriction, we could end up accidentally
2063 // constraining inference variables based on an arbitrarily
2064 // chosen trait impl.
2065 //
2066 // Imagine we have the following code:
2067 //
2068 // ```rust
2069 // #[marker] trait MyTrait {}
2070 // impl MyTrait for u8 {}
2071 // impl MyTrait for bool {}
2072 // ```
2073 //
2074 // And we are evaluating the predicate `<_#0t as MyTrait>`.
2075 //
2076 // During selection, we will end up with one candidate for each
2077 // impl of `MyTrait`. If we were to discard one impl in favor
2078 // of the other, we would be left with one candidate, causing
2079 // us to "successfully" select the predicate, unifying
2080 // _#0t with (for example) `u8`.
2081 //
2082 // However, we have no reason to believe that this unification
2083 // is correct - we've essentially just picked an arbitrary
2084 // *possibility* for _#0t, and required that this be the *only*
2085 // possibility.
2086 //
2087 // Eventually, we will either:
2088 // 1) Unify all inference variables in the predicate through
2089 // some other means (e.g. type-checking of a function). We will
2090 // then be in a position to drop marker trait candidates
2091 // without constraining inference variables (since there are
2092 // none left to constrain)
2093 // 2) Be left with some unconstrained inference variables. We
2094 // will then correctly report an inference error, since the
2095 // existence of multiple marker trait impls tells us nothing
2096 // about which one should actually apply.
2097 !has_non_region_infer && lhs_evaluation.must_apply_considering_regions()
2098 }
2099 None => false,
2100 }
2101 }
2102}
2103
2104impl<'tcx> SelectionContext<'_, 'tcx> {
2105 fn sizedness_conditions(
2106 &mut self,
2107 obligation: &PolyTraitObligation<'tcx>,
2108 sizedness: SizedTraitKind,
2109 ) -> BuiltinImplConditions<'tcx> {
2110 use self::BuiltinImplConditions::{Ambiguous, None, Where};
2111
2112 // NOTE: binder moved to (*)
2113 let self_ty = self.infcx.shallow_resolve(obligation.predicate.skip_binder().self_ty());
2114
2115 match self_ty.kind() {
2116 ty::Infer(ty::IntVar(_) | ty::FloatVar(_))
2117 | ty::Uint(_)
2118 | ty::Int(_)
2119 | ty::Bool
2120 | ty::Float(_)
2121 | ty::FnDef(..)
2122 | ty::FnPtr(..)
2123 | ty::RawPtr(..)
2124 | ty::Char
2125 | ty::Ref(..)
2126 | ty::Coroutine(..)
2127 | ty::CoroutineWitness(..)
2128 | ty::Array(..)
2129 | ty::Closure(..)
2130 | ty::CoroutineClosure(..)
2131 | ty::Never
2132 | ty::Error(_) => {
2133 // safe for everything
2134 Where(ty::Binder::dummy(Vec::new()))
2135 }
2136
2137 ty::Str | ty::Slice(_) | ty::Dynamic(..) => match sizedness {
2138 SizedTraitKind::Sized => None,
2139 SizedTraitKind::MetaSized => Where(ty::Binder::dummy(Vec::new())),
2140 },
2141
2142 ty::Foreign(..) => None,
2143
2144 ty::Tuple(tys) => Where(
2145 obligation.predicate.rebind(tys.last().map_or_else(Vec::new, |&last| vec![last])),
2146 ),
2147
2148 ty::Pat(ty, _) => Where(obligation.predicate.rebind(vec![*ty])),
2149
2150 ty::Adt(def, args) => {
2151 if let Some(crit) = def.sizedness_constraint(self.tcx(), sizedness) {
2152 // (*) binder moved here
2153 Where(obligation.predicate.rebind(vec![crit.instantiate(self.tcx(), args)]))
2154 } else {
2155 Where(ty::Binder::dummy(Vec::new()))
2156 }
2157 }
2158
2159 // FIXME(unsafe_binders): This binder needs to be squashed
2160 ty::UnsafeBinder(binder_ty) => Where(binder_ty.map_bound(|ty| vec![ty])),
2161
2162 ty::Alias(..) | ty::Param(_) | ty::Placeholder(..) => None,
2163 ty::Infer(ty::TyVar(_)) => Ambiguous,
2164
2165 // We can make this an ICE if/once we actually instantiate the trait obligation eagerly.
2166 ty::Bound(..) => None,
2167
2168 ty::Infer(ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
2169 bug!("asked to assemble builtin bounds of unexpected type: {:?}", self_ty);
2170 }
2171 }
2172 }
2173
2174 fn copy_clone_conditions(
2175 &mut self,
2176 obligation: &PolyTraitObligation<'tcx>,
2177 ) -> BuiltinImplConditions<'tcx> {
2178 // NOTE: binder moved to (*)
2179 let self_ty = self.infcx.shallow_resolve(obligation.predicate.skip_binder().self_ty());
2180
2181 use self::BuiltinImplConditions::{Ambiguous, None, Where};
2182
2183 match *self_ty.kind() {
2184 ty::FnDef(..) | ty::FnPtr(..) | ty::Error(_) => Where(ty::Binder::dummy(Vec::new())),
2185
2186 ty::Uint(_)
2187 | ty::Int(_)
2188 | ty::Infer(ty::IntVar(_) | ty::FloatVar(_))
2189 | ty::Bool
2190 | ty::Float(_)
2191 | ty::Char
2192 | ty::RawPtr(..)
2193 | ty::Never
2194 | ty::Ref(_, _, hir::Mutability::Not)
2195 | ty::Array(..) => {
2196 // Implementations provided in libcore
2197 None
2198 }
2199
2200 // FIXME(unsafe_binder): Should we conditionally
2201 // (i.e. universally) implement copy/clone?
2202 ty::UnsafeBinder(_) => None,
2203
2204 ty::Dynamic(..)
2205 | ty::Str
2206 | ty::Slice(..)
2207 | ty::Foreign(..)
2208 | ty::Ref(_, _, hir::Mutability::Mut) => None,
2209
2210 ty::Tuple(tys) => {
2211 // (*) binder moved here
2212 Where(obligation.predicate.rebind(tys.iter().collect()))
2213 }
2214
2215 ty::Pat(ty, _) => {
2216 // (*) binder moved here
2217 Where(obligation.predicate.rebind(vec![ty]))
2218 }
2219
2220 ty::Coroutine(coroutine_def_id, args) => {
2221 match self.tcx().coroutine_movability(coroutine_def_id) {
2222 hir::Movability::Static => None,
2223 hir::Movability::Movable => {
2224 if self.tcx().features().coroutine_clone() {
2225 let resolved_upvars =
2226 self.infcx.shallow_resolve(args.as_coroutine().tupled_upvars_ty());
2227 let resolved_witness =
2228 self.infcx.shallow_resolve(args.as_coroutine().witness());
2229 if resolved_upvars.is_ty_var() || resolved_witness.is_ty_var() {
2230 // Not yet resolved.
2231 Ambiguous
2232 } else {
2233 let all = args
2234 .as_coroutine()
2235 .upvar_tys()
2236 .iter()
2237 .chain([args.as_coroutine().witness()])
2238 .collect::<Vec<_>>();
2239 Where(obligation.predicate.rebind(all))
2240 }
2241 } else {
2242 None
2243 }
2244 }
2245 }
2246 }
2247
2248 ty::CoroutineWitness(def_id, args) => {
2249 let hidden_types = rebind_coroutine_witness_types(
2250 self.infcx.tcx,
2251 def_id,
2252 args,
2253 obligation.predicate.bound_vars(),
2254 );
2255 Where(hidden_types)
2256 }
2257
2258 ty::Closure(_, args) => {
2259 // (*) binder moved here
2260 let ty = self.infcx.shallow_resolve(args.as_closure().tupled_upvars_ty());
2261 if let ty::Infer(ty::TyVar(_)) = ty.kind() {
2262 // Not yet resolved.
2263 Ambiguous
2264 } else {
2265 Where(obligation.predicate.rebind(args.as_closure().upvar_tys().to_vec()))
2266 }
2267 }
2268
2269 ty::CoroutineClosure(_, args) => {
2270 // (*) binder moved here
2271 let ty = self.infcx.shallow_resolve(args.as_coroutine_closure().tupled_upvars_ty());
2272 if let ty::Infer(ty::TyVar(_)) = ty.kind() {
2273 // Not yet resolved.
2274 Ambiguous
2275 } else {
2276 Where(
2277 obligation
2278 .predicate
2279 .rebind(args.as_coroutine_closure().upvar_tys().to_vec()),
2280 )
2281 }
2282 }
2283
2284 ty::Adt(..) | ty::Alias(..) | ty::Param(..) | ty::Placeholder(..) => {
2285 // Fallback to whatever user-defined impls exist in this case.
2286 None
2287 }
2288
2289 ty::Infer(ty::TyVar(_)) => {
2290 // Unbound type variable. Might or might not have
2291 // applicable impls and so forth, depending on what
2292 // those type variables wind up being bound to.
2293 Ambiguous
2294 }
2295
2296 // We can make this an ICE if/once we actually instantiate the trait obligation eagerly.
2297 ty::Bound(..) => None,
2298
2299 ty::Infer(ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
2300 bug!("asked to assemble builtin bounds of unexpected type: {:?}", self_ty);
2301 }
2302 }
2303 }
2304
2305 fn fused_iterator_conditions(
2306 &mut self,
2307 obligation: &PolyTraitObligation<'tcx>,
2308 ) -> BuiltinImplConditions<'tcx> {
2309 let self_ty = self.infcx.shallow_resolve(obligation.self_ty().skip_binder());
2310 if let ty::Coroutine(did, ..) = *self_ty.kind()
2311 && self.tcx().coroutine_is_gen(did)
2312 {
2313 BuiltinImplConditions::Where(ty::Binder::dummy(Vec::new()))
2314 } else {
2315 BuiltinImplConditions::None
2316 }
2317 }
2318
2319 /// For default impls, we need to break apart a type into its
2320 /// "constituent types" -- meaning, the types that it contains.
2321 ///
2322 /// Here are some (simple) examples:
2323 ///
2324 /// ```ignore (illustrative)
2325 /// (i32, u32) -> [i32, u32]
2326 /// Foo where struct Foo { x: i32, y: u32 } -> [i32, u32]
2327 /// Bar<i32> where struct Bar<T> { x: T, y: u32 } -> [i32, u32]
2328 /// Zed<i32> where enum Zed { A(T), B(u32) } -> [i32, u32]
2329 /// ```
2330 #[instrument(level = "debug", skip(self), ret)]
2331 fn constituent_types_for_ty(
2332 &self,
2333 t: ty::Binder<'tcx, Ty<'tcx>>,
2334 ) -> Result<ty::Binder<'tcx, Vec<Ty<'tcx>>>, SelectionError<'tcx>> {
2335 Ok(match *t.skip_binder().kind() {
2336 ty::Uint(_)
2337 | ty::Int(_)
2338 | ty::Bool
2339 | ty::Float(_)
2340 | ty::FnDef(..)
2341 | ty::FnPtr(..)
2342 | ty::Error(_)
2343 | ty::Infer(ty::IntVar(_) | ty::FloatVar(_))
2344 | ty::Never
2345 | ty::Char => ty::Binder::dummy(Vec::new()),
2346
2347 // This branch is only for `experimental_default_bounds`.
2348 // Other foreign types were rejected earlier in
2349 // `assemble_candidates_from_auto_impls`.
2350 ty::Foreign(..) => ty::Binder::dummy(Vec::new()),
2351
2352 // FIXME(unsafe_binders): Squash the double binder for now, I guess.
2353 ty::UnsafeBinder(_) => return Err(SelectionError::Unimplemented),
2354
2355 // Treat this like `struct str([u8]);`
2356 ty::Str => ty::Binder::dummy(vec![Ty::new_slice(self.tcx(), self.tcx().types.u8)]),
2357
2358 ty::Placeholder(..)
2359 | ty::Dynamic(..)
2360 | ty::Param(..)
2361 | ty::Alias(ty::Projection | ty::Inherent | ty::Free, ..)
2362 | ty::Bound(..)
2363 | ty::Infer(ty::TyVar(_) | ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
2364 bug!("asked to assemble constituent types of unexpected type: {:?}", t);
2365 }
2366
2367 ty::RawPtr(element_ty, _) | ty::Ref(_, element_ty, _) => t.rebind(vec![element_ty]),
2368
2369 ty::Pat(ty, _) | ty::Array(ty, _) | ty::Slice(ty) => t.rebind(vec![ty]),
2370
2371 ty::Tuple(tys) => {
2372 // (T1, ..., Tn) -- meets any bound that all of T1...Tn meet
2373 t.rebind(tys.iter().collect())
2374 }
2375
2376 ty::Closure(_, args) => {
2377 let ty = self.infcx.shallow_resolve(args.as_closure().tupled_upvars_ty());
2378 t.rebind(vec![ty])
2379 }
2380
2381 ty::CoroutineClosure(_, args) => {
2382 let ty = self.infcx.shallow_resolve(args.as_coroutine_closure().tupled_upvars_ty());
2383 t.rebind(vec![ty])
2384 }
2385
2386 ty::Coroutine(_, args) => {
2387 let ty = self.infcx.shallow_resolve(args.as_coroutine().tupled_upvars_ty());
2388 let witness = args.as_coroutine().witness();
2389 t.rebind([ty].into_iter().chain(iter::once(witness)).collect())
2390 }
2391
2392 ty::CoroutineWitness(def_id, args) => {
2393 rebind_coroutine_witness_types(self.infcx.tcx, def_id, args, t.bound_vars())
2394 }
2395
2396 // For `PhantomData<T>`, we pass `T`.
2397 ty::Adt(def, args) if def.is_phantom_data() => t.rebind(args.types().collect()),
2398
2399 ty::Adt(def, args) => {
2400 t.rebind(def.all_fields().map(|f| f.ty(self.tcx(), args)).collect())
2401 }
2402
2403 ty::Alias(ty::Opaque, ty::AliasTy { def_id, args, .. }) => {
2404 if self.infcx.can_define_opaque_ty(def_id) {
2405 unreachable!()
2406 } else {
2407 // We can resolve the `impl Trait` to its concrete type,
2408 // which enforces a DAG between the functions requiring
2409 // the auto trait bounds in question.
2410 match self.tcx().type_of_opaque(def_id) {
2411 Ok(ty) => t.rebind(vec![ty.instantiate(self.tcx(), args)]),
2412 Err(_) => {
2413 return Err(SelectionError::OpaqueTypeAutoTraitLeakageUnknown(def_id));
2414 }
2415 }
2416 }
2417 }
2418 })
2419 }
2420
2421 fn collect_predicates_for_types(
2422 &mut self,
2423 param_env: ty::ParamEnv<'tcx>,
2424 cause: ObligationCause<'tcx>,
2425 recursion_depth: usize,
2426 trait_def_id: DefId,
2427 types: Vec<Ty<'tcx>>,
2428 ) -> PredicateObligations<'tcx> {
2429 // Because the types were potentially derived from
2430 // higher-ranked obligations they may reference late-bound
2431 // regions. For example, `for<'a> Foo<&'a i32> : Copy` would
2432 // yield a type like `for<'a> &'a i32`. In general, we
2433 // maintain the invariant that we never manipulate bound
2434 // regions, so we have to process these bound regions somehow.
2435 //
2436 // The strategy is to:
2437 //
2438 // 1. Instantiate those regions to placeholder regions (e.g.,
2439 // `for<'a> &'a i32` becomes `&0 i32`.
2440 // 2. Produce something like `&'0 i32 : Copy`
2441 // 3. Re-bind the regions back to `for<'a> &'a i32 : Copy`
2442
2443 types
2444 .into_iter()
2445 .flat_map(|placeholder_ty| {
2446 let Normalized { value: normalized_ty, mut obligations } =
2447 ensure_sufficient_stack(|| {
2448 normalize_with_depth(
2449 self,
2450 param_env,
2451 cause.clone(),
2452 recursion_depth,
2453 placeholder_ty,
2454 )
2455 });
2456
2457 let tcx = self.tcx();
2458 let trait_ref = if tcx.generics_of(trait_def_id).own_params.len() == 1 {
2459 ty::TraitRef::new(tcx, trait_def_id, [normalized_ty])
2460 } else {
2461 // If this is an ill-formed auto/built-in trait, then synthesize
2462 // new error args for the missing generics.
2463 let err_args = ty::GenericArgs::extend_with_error(
2464 tcx,
2465 trait_def_id,
2466 &[normalized_ty.into()],
2467 );
2468 ty::TraitRef::new_from_args(tcx, trait_def_id, err_args)
2469 };
2470
2471 let obligation = Obligation::new(self.tcx(), cause.clone(), param_env, trait_ref);
2472 obligations.push(obligation);
2473 obligations
2474 })
2475 .collect()
2476 }
2477
2478 ///////////////////////////////////////////////////////////////////////////
2479 // Matching
2480 //
2481 // Matching is a common path used for both evaluation and
2482 // confirmation. It basically unifies types that appear in impls
2483 // and traits. This does affect the surrounding environment;
2484 // therefore, when used during evaluation, match routines must be
2485 // run inside of a `probe()` so that their side-effects are
2486 // contained.
2487
2488 fn rematch_impl(
2489 &mut self,
2490 impl_def_id: DefId,
2491 obligation: &PolyTraitObligation<'tcx>,
2492 ) -> Normalized<'tcx, GenericArgsRef<'tcx>> {
2493 let impl_trait_header = self.tcx().impl_trait_header(impl_def_id).unwrap();
2494 match self.match_impl(impl_def_id, impl_trait_header, obligation) {
2495 Ok(args) => args,
2496 Err(()) => {
2497 let predicate = self.infcx.resolve_vars_if_possible(obligation.predicate);
2498 bug!("impl {impl_def_id:?} was matchable against {predicate:?} but now is not")
2499 }
2500 }
2501 }
2502
2503 #[instrument(level = "debug", skip(self), ret)]
2504 fn match_impl(
2505 &mut self,
2506 impl_def_id: DefId,
2507 impl_trait_header: ty::ImplTraitHeader<'tcx>,
2508 obligation: &PolyTraitObligation<'tcx>,
2509 ) -> Result<Normalized<'tcx, GenericArgsRef<'tcx>>, ()> {
2510 let placeholder_obligation =
2511 self.infcx.enter_forall_and_leak_universe(obligation.predicate);
2512 let placeholder_obligation_trait_ref = placeholder_obligation.trait_ref;
2513
2514 let impl_args = self.infcx.fresh_args_for_item(obligation.cause.span, impl_def_id);
2515
2516 let trait_ref = impl_trait_header.trait_ref.instantiate(self.tcx(), impl_args);
2517 debug!(?impl_trait_header);
2518
2519 let Normalized { value: impl_trait_ref, obligations: mut nested_obligations } =
2520 ensure_sufficient_stack(|| {
2521 normalize_with_depth(
2522 self,
2523 obligation.param_env,
2524 obligation.cause.clone(),
2525 obligation.recursion_depth + 1,
2526 trait_ref,
2527 )
2528 });
2529
2530 debug!(?impl_trait_ref, ?placeholder_obligation_trait_ref);
2531
2532 let cause = ObligationCause::new(
2533 obligation.cause.span,
2534 obligation.cause.body_id,
2535 ObligationCauseCode::MatchImpl(obligation.cause.clone(), impl_def_id),
2536 );
2537
2538 let InferOk { obligations, .. } = self
2539 .infcx
2540 .at(&cause, obligation.param_env)
2541 .eq(DefineOpaqueTypes::No, placeholder_obligation_trait_ref, impl_trait_ref)
2542 .map_err(|e| {
2543 debug!("match_impl: failed eq_trait_refs due to `{}`", e.to_string(self.tcx()))
2544 })?;
2545 nested_obligations.extend(obligations);
2546
2547 if impl_trait_header.polarity == ty::ImplPolarity::Reservation
2548 && !matches!(self.infcx.typing_mode(), TypingMode::Coherence)
2549 {
2550 debug!("reservation impls only apply in intercrate mode");
2551 return Err(());
2552 }
2553
2554 Ok(Normalized { value: impl_args, obligations: nested_obligations })
2555 }
2556
2557 fn match_upcast_principal(
2558 &mut self,
2559 obligation: &PolyTraitObligation<'tcx>,
2560 unnormalized_upcast_principal: ty::PolyTraitRef<'tcx>,
2561 a_data: &'tcx ty::List<ty::PolyExistentialPredicate<'tcx>>,
2562 b_data: &'tcx ty::List<ty::PolyExistentialPredicate<'tcx>>,
2563 a_region: ty::Region<'tcx>,
2564 b_region: ty::Region<'tcx>,
2565 ) -> SelectionResult<'tcx, PredicateObligations<'tcx>> {
2566 let tcx = self.tcx();
2567 let mut nested = PredicateObligations::new();
2568
2569 // We may upcast to auto traits that are either explicitly listed in
2570 // the object type's bounds, or implied by the principal trait ref's
2571 // supertraits.
2572 let a_auto_traits: FxIndexSet<DefId> = a_data
2573 .auto_traits()
2574 .chain(a_data.principal_def_id().into_iter().flat_map(|principal_def_id| {
2575 elaborate::supertrait_def_ids(tcx, principal_def_id)
2576 .filter(|def_id| tcx.trait_is_auto(*def_id))
2577 }))
2578 .collect();
2579
2580 let upcast_principal = normalize_with_depth_to(
2581 self,
2582 obligation.param_env,
2583 obligation.cause.clone(),
2584 obligation.recursion_depth + 1,
2585 unnormalized_upcast_principal,
2586 &mut nested,
2587 );
2588
2589 for bound in b_data {
2590 match bound.skip_binder() {
2591 // Check that a_ty's supertrait (upcast_principal) is compatible
2592 // with the target (b_ty).
2593 ty::ExistentialPredicate::Trait(target_principal) => {
2594 let hr_source_principal = upcast_principal.map_bound(|trait_ref| {
2595 ty::ExistentialTraitRef::erase_self_ty(tcx, trait_ref)
2596 });
2597 let hr_target_principal = bound.rebind(target_principal);
2598
2599 nested.extend(
2600 self.infcx
2601 .enter_forall(hr_target_principal, |target_principal| {
2602 let source_principal =
2603 self.infcx.instantiate_binder_with_fresh_vars(
2604 obligation.cause.span,
2605 HigherRankedType,
2606 hr_source_principal,
2607 );
2608 self.infcx.at(&obligation.cause, obligation.param_env).eq_trace(
2609 DefineOpaqueTypes::Yes,
2610 ToTrace::to_trace(
2611 &obligation.cause,
2612 hr_target_principal,
2613 hr_source_principal,
2614 ),
2615 target_principal,
2616 source_principal,
2617 )
2618 })
2619 .map_err(|_| SelectionError::Unimplemented)?
2620 .into_obligations(),
2621 );
2622 }
2623 // Check that b_ty's projection is satisfied by exactly one of
2624 // a_ty's projections. First, we look through the list to see if
2625 // any match. If not, error. Then, if *more* than one matches, we
2626 // return ambiguity. Otherwise, if exactly one matches, equate
2627 // it with b_ty's projection.
2628 ty::ExistentialPredicate::Projection(target_projection) => {
2629 let hr_target_projection = bound.rebind(target_projection);
2630
2631 let mut matching_projections =
2632 a_data.projection_bounds().filter(|&hr_source_projection| {
2633 // Eager normalization means that we can just use can_eq
2634 // here instead of equating and processing obligations.
2635 hr_source_projection.item_def_id() == hr_target_projection.item_def_id()
2636 && self.infcx.probe(|_| {
2637 self.infcx
2638 .enter_forall(hr_target_projection, |target_projection| {
2639 let source_projection =
2640 self.infcx.instantiate_binder_with_fresh_vars(
2641 obligation.cause.span,
2642 HigherRankedType,
2643 hr_source_projection,
2644 );
2645 self.infcx
2646 .at(&obligation.cause, obligation.param_env)
2647 .eq_trace(
2648 DefineOpaqueTypes::Yes,
2649 ToTrace::to_trace(
2650 &obligation.cause,
2651 hr_target_projection,
2652 hr_source_projection,
2653 ),
2654 target_projection,
2655 source_projection,
2656 )
2657 })
2658 .is_ok()
2659 })
2660 });
2661
2662 let Some(hr_source_projection) = matching_projections.next() else {
2663 return Err(SelectionError::Unimplemented);
2664 };
2665 if matching_projections.next().is_some() {
2666 return Ok(None);
2667 }
2668 nested.extend(
2669 self.infcx
2670 .enter_forall(hr_target_projection, |target_projection| {
2671 let source_projection =
2672 self.infcx.instantiate_binder_with_fresh_vars(
2673 obligation.cause.span,
2674 HigherRankedType,
2675 hr_source_projection,
2676 );
2677 self.infcx.at(&obligation.cause, obligation.param_env).eq_trace(
2678 DefineOpaqueTypes::Yes,
2679 ToTrace::to_trace(
2680 &obligation.cause,
2681 hr_target_projection,
2682 hr_source_projection,
2683 ),
2684 target_projection,
2685 source_projection,
2686 )
2687 })
2688 .map_err(|_| SelectionError::Unimplemented)?
2689 .into_obligations(),
2690 );
2691 }
2692 // Check that b_ty's auto traits are present in a_ty's bounds.
2693 ty::ExistentialPredicate::AutoTrait(def_id) => {
2694 if !a_auto_traits.contains(&def_id) {
2695 return Err(SelectionError::Unimplemented);
2696 }
2697 }
2698 }
2699 }
2700
2701 nested.push(Obligation::with_depth(
2702 tcx,
2703 obligation.cause.clone(),
2704 obligation.recursion_depth + 1,
2705 obligation.param_env,
2706 ty::Binder::dummy(ty::OutlivesPredicate(a_region, b_region)),
2707 ));
2708
2709 Ok(Some(nested))
2710 }
2711
2712 /// Normalize `where_clause_trait_ref` and try to match it against
2713 /// `obligation`. If successful, return any predicates that
2714 /// result from the normalization.
2715 fn match_where_clause_trait_ref(
2716 &mut self,
2717 obligation: &PolyTraitObligation<'tcx>,
2718 where_clause_trait_ref: ty::PolyTraitRef<'tcx>,
2719 ) -> Result<PredicateObligations<'tcx>, ()> {
2720 self.match_poly_trait_ref(obligation, where_clause_trait_ref)
2721 }
2722
2723 /// Returns `Ok` if `poly_trait_ref` being true implies that the
2724 /// obligation is satisfied.
2725 #[instrument(skip(self), level = "debug")]
2726 fn match_poly_trait_ref(
2727 &mut self,
2728 obligation: &PolyTraitObligation<'tcx>,
2729 poly_trait_ref: ty::PolyTraitRef<'tcx>,
2730 ) -> Result<PredicateObligations<'tcx>, ()> {
2731 let predicate = self.infcx.enter_forall_and_leak_universe(obligation.predicate);
2732 let trait_ref = self.infcx.instantiate_binder_with_fresh_vars(
2733 obligation.cause.span,
2734 HigherRankedType,
2735 poly_trait_ref,
2736 );
2737 self.infcx
2738 .at(&obligation.cause, obligation.param_env)
2739 .eq(DefineOpaqueTypes::No, predicate.trait_ref, trait_ref)
2740 .map(|InferOk { obligations, .. }| obligations)
2741 .map_err(|_| ())
2742 }
2743
2744 ///////////////////////////////////////////////////////////////////////////
2745 // Miscellany
2746
2747 fn match_fresh_trait_refs(
2748 &self,
2749 previous: ty::PolyTraitPredicate<'tcx>,
2750 current: ty::PolyTraitPredicate<'tcx>,
2751 ) -> bool {
2752 let mut matcher = _match::MatchAgainstFreshVars::new(self.tcx());
2753 matcher.relate(previous, current).is_ok()
2754 }
2755
2756 fn push_stack<'o>(
2757 &mut self,
2758 previous_stack: TraitObligationStackList<'o, 'tcx>,
2759 obligation: &'o PolyTraitObligation<'tcx>,
2760 ) -> TraitObligationStack<'o, 'tcx> {
2761 let fresh_trait_pred = obligation.predicate.fold_with(&mut self.freshener);
2762
2763 let dfn = previous_stack.cache.next_dfn();
2764 let depth = previous_stack.depth() + 1;
2765 TraitObligationStack {
2766 obligation,
2767 fresh_trait_pred,
2768 reached_depth: Cell::new(depth),
2769 previous: previous_stack,
2770 dfn,
2771 depth,
2772 }
2773 }
2774
2775 #[instrument(skip(self), level = "debug")]
2776 fn closure_trait_ref_unnormalized(
2777 &mut self,
2778 self_ty: Ty<'tcx>,
2779 fn_trait_def_id: DefId,
2780 ) -> ty::PolyTraitRef<'tcx> {
2781 let ty::Closure(_, args) = *self_ty.kind() else {
2782 bug!("expected closure, found {self_ty}");
2783 };
2784 let closure_sig = args.as_closure().sig();
2785
2786 closure_trait_ref_and_return_type(
2787 self.tcx(),
2788 fn_trait_def_id,
2789 self_ty,
2790 closure_sig,
2791 util::TupleArgumentsFlag::No,
2792 )
2793 .map_bound(|(trait_ref, _)| trait_ref)
2794 }
2795
2796 /// Returns the obligations that are implied by instantiating an
2797 /// impl or trait. The obligations are instantiated and fully
2798 /// normalized. This is used when confirming an impl or default
2799 /// impl.
2800 #[instrument(level = "debug", skip(self, cause, param_env))]
2801 fn impl_or_trait_obligations(
2802 &mut self,
2803 cause: &ObligationCause<'tcx>,
2804 recursion_depth: usize,
2805 param_env: ty::ParamEnv<'tcx>,
2806 def_id: DefId, // of impl or trait
2807 args: GenericArgsRef<'tcx>, // for impl or trait
2808 parent_trait_pred: ty::Binder<'tcx, ty::TraitPredicate<'tcx>>,
2809 ) -> PredicateObligations<'tcx> {
2810 let tcx = self.tcx();
2811
2812 // To allow for one-pass evaluation of the nested obligation,
2813 // each predicate must be preceded by the obligations required
2814 // to normalize it.
2815 // for example, if we have:
2816 // impl<U: Iterator<Item: Copy>, V: Iterator<Item = U>> Foo for V
2817 // the impl will have the following predicates:
2818 // <V as Iterator>::Item = U,
2819 // U: Iterator, U: Sized,
2820 // V: Iterator, V: Sized,
2821 // <U as Iterator>::Item: Copy
2822 // When we instantiate, say, `V => IntoIter<u32>, U => $0`, the last
2823 // obligation will normalize to `<$0 as Iterator>::Item = $1` and
2824 // `$1: Copy`, so we must ensure the obligations are emitted in
2825 // that order.
2826 let predicates = tcx.predicates_of(def_id);
2827 assert_eq!(predicates.parent, None);
2828 let predicates = predicates.instantiate_own(tcx, args);
2829 let mut obligations = PredicateObligations::with_capacity(predicates.len());
2830 for (index, (predicate, span)) in predicates.into_iter().enumerate() {
2831 let cause = if tcx.is_lang_item(parent_trait_pred.def_id(), LangItem::CoerceUnsized) {
2832 cause.clone()
2833 } else {
2834 cause.clone().derived_cause(parent_trait_pred, |derived| {
2835 ObligationCauseCode::ImplDerived(Box::new(ImplDerivedCause {
2836 derived,
2837 impl_or_alias_def_id: def_id,
2838 impl_def_predicate_index: Some(index),
2839 span,
2840 }))
2841 })
2842 };
2843 let clause = normalize_with_depth_to(
2844 self,
2845 param_env,
2846 cause.clone(),
2847 recursion_depth,
2848 predicate,
2849 &mut obligations,
2850 );
2851 obligations.push(Obligation {
2852 cause,
2853 recursion_depth,
2854 param_env,
2855 predicate: clause.as_predicate(),
2856 });
2857 }
2858
2859 // Register any outlives obligations from the trait here, cc #124336.
2860 if matches!(tcx.def_kind(def_id), DefKind::Impl { of_trait: true }) {
2861 for clause in tcx.impl_super_outlives(def_id).iter_instantiated(tcx, args) {
2862 let clause = normalize_with_depth_to(
2863 self,
2864 param_env,
2865 cause.clone(),
2866 recursion_depth,
2867 clause,
2868 &mut obligations,
2869 );
2870 obligations.push(Obligation {
2871 cause: cause.clone(),
2872 recursion_depth,
2873 param_env,
2874 predicate: clause.as_predicate(),
2875 });
2876 }
2877 }
2878
2879 obligations
2880 }
2881}
2882
2883fn rebind_coroutine_witness_types<'tcx>(
2884 tcx: TyCtxt<'tcx>,
2885 def_id: DefId,
2886 args: ty::GenericArgsRef<'tcx>,
2887 bound_vars: &'tcx ty::List<ty::BoundVariableKind>,
2888) -> ty::Binder<'tcx, Vec<Ty<'tcx>>> {
2889 let bound_coroutine_types = tcx.coroutine_hidden_types(def_id).skip_binder();
2890 let shifted_coroutine_types =
2891 tcx.shift_bound_var_indices(bound_vars.len(), bound_coroutine_types.skip_binder());
2892 ty::Binder::bind_with_vars(
2893 ty::EarlyBinder::bind(shifted_coroutine_types.types.to_vec()).instantiate(tcx, args),
2894 tcx.mk_bound_variable_kinds_from_iter(
2895 bound_vars.iter().chain(bound_coroutine_types.bound_vars()),
2896 ),
2897 )
2898}
2899
2900impl<'o, 'tcx> TraitObligationStack<'o, 'tcx> {
2901 fn list(&'o self) -> TraitObligationStackList<'o, 'tcx> {
2902 TraitObligationStackList::with(self)
2903 }
2904
2905 fn cache(&self) -> &'o ProvisionalEvaluationCache<'tcx> {
2906 self.previous.cache
2907 }
2908
2909 fn iter(&'o self) -> TraitObligationStackList<'o, 'tcx> {
2910 self.list()
2911 }
2912
2913 /// Indicates that attempting to evaluate this stack entry
2914 /// required accessing something from the stack at depth `reached_depth`.
2915 fn update_reached_depth(&self, reached_depth: usize) {
2916 assert!(
2917 self.depth >= reached_depth,
2918 "invoked `update_reached_depth` with something under this stack: \
2919 self.depth={} reached_depth={}",
2920 self.depth,
2921 reached_depth,
2922 );
2923 debug!(reached_depth, "update_reached_depth");
2924 let mut p = self;
2925 while reached_depth < p.depth {
2926 debug!(?p.fresh_trait_pred, "update_reached_depth: marking as cycle participant");
2927 p.reached_depth.set(p.reached_depth.get().min(reached_depth));
2928 p = p.previous.head.unwrap();
2929 }
2930 }
2931}
2932
2933/// The "provisional evaluation cache" is used to store intermediate cache results
2934/// when solving auto traits. Auto traits are unusual in that they can support
2935/// cycles. So, for example, a "proof tree" like this would be ok:
2936///
2937/// - `Foo<T>: Send` :-
2938/// - `Bar<T>: Send` :-
2939/// - `Foo<T>: Send` -- cycle, but ok
2940/// - `Baz<T>: Send`
2941///
2942/// Here, to prove `Foo<T>: Send`, we have to prove `Bar<T>: Send` and
2943/// `Baz<T>: Send`. Proving `Bar<T>: Send` in turn required `Foo<T>: Send`.
2944/// For non-auto traits, this cycle would be an error, but for auto traits (because
2945/// they are coinductive) it is considered ok.
2946///
2947/// However, there is a complication: at the point where we have
2948/// "proven" `Bar<T>: Send`, we have in fact only proven it
2949/// *provisionally*. In particular, we proved that `Bar<T>: Send`
2950/// *under the assumption* that `Foo<T>: Send`. But what if we later
2951/// find out this assumption is wrong? Specifically, we could
2952/// encounter some kind of error proving `Baz<T>: Send`. In that case,
2953/// `Bar<T>: Send` didn't turn out to be true.
2954///
2955/// In Issue #60010, we found a bug in rustc where it would cache
2956/// these intermediate results. This was fixed in #60444 by disabling
2957/// *all* caching for things involved in a cycle -- in our example,
2958/// that would mean we don't cache that `Bar<T>: Send`. But this led
2959/// to large slowdowns.
2960///
2961/// Specifically, imagine this scenario, where proving `Baz<T>: Send`
2962/// first requires proving `Bar<T>: Send` (which is true:
2963///
2964/// - `Foo<T>: Send` :-
2965/// - `Bar<T>: Send` :-
2966/// - `Foo<T>: Send` -- cycle, but ok
2967/// - `Baz<T>: Send`
2968/// - `Bar<T>: Send` -- would be nice for this to be a cache hit!
2969/// - `*const T: Send` -- but what if we later encounter an error?
2970///
2971/// The *provisional evaluation cache* resolves this issue. It stores
2972/// cache results that we've proven but which were involved in a cycle
2973/// in some way. We track the minimal stack depth (i.e., the
2974/// farthest from the top of the stack) that we are dependent on.
2975/// The idea is that the cache results within are all valid -- so long as
2976/// none of the nodes in between the current node and the node at that minimum
2977/// depth result in an error (in which case the cached results are just thrown away).
2978///
2979/// During evaluation, we consult this provisional cache and rely on
2980/// it. Accessing a cached value is considered equivalent to accessing
2981/// a result at `reached_depth`, so it marks the *current* solution as
2982/// provisional as well. If an error is encountered, we toss out any
2983/// provisional results added from the subtree that encountered the
2984/// error. When we pop the node at `reached_depth` from the stack, we
2985/// can commit all the things that remain in the provisional cache.
2986struct ProvisionalEvaluationCache<'tcx> {
2987 /// next "depth first number" to issue -- just a counter
2988 dfn: Cell<usize>,
2989
2990 /// Map from cache key to the provisionally evaluated thing.
2991 /// The cache entries contain the result but also the DFN in which they
2992 /// were added. The DFN is used to clear out values on failure.
2993 ///
2994 /// Imagine we have a stack like:
2995 ///
2996 /// - `A B C` and we add a cache for the result of C (DFN 2)
2997 /// - Then we have a stack `A B D` where `D` has DFN 3
2998 /// - We try to solve D by evaluating E: `A B D E` (DFN 4)
2999 /// - `E` generates various cache entries which have cyclic dependencies on `B`
3000 /// - `A B D E F` and so forth
3001 /// - the DFN of `F` for example would be 5
3002 /// - then we determine that `E` is in error -- we will then clear
3003 /// all cache values whose DFN is >= 4 -- in this case, that
3004 /// means the cached value for `F`.
3005 map: RefCell<FxIndexMap<ty::PolyTraitPredicate<'tcx>, ProvisionalEvaluation>>,
3006
3007 /// The stack of terms that we assume to be well-formed because a `WF(term)` predicate
3008 /// is on the stack above (and because of wellformedness is coinductive).
3009 /// In an "ideal" world, this would share a stack with trait predicates in
3010 /// `TraitObligationStack`. However, trait predicates are *much* hotter than
3011 /// `WellFormed` predicates, and it's very likely that the additional matches
3012 /// will have a perf effect. The value here is the well-formed `GenericArg`
3013 /// and the depth of the trait predicate *above* that well-formed predicate.
3014 wf_args: RefCell<Vec<(ty::Term<'tcx>, usize)>>,
3015}
3016
3017/// A cache value for the provisional cache: contains the depth-first
3018/// number (DFN) and result.
3019#[derive(Copy, Clone, Debug)]
3020struct ProvisionalEvaluation {
3021 from_dfn: usize,
3022 reached_depth: usize,
3023 result: EvaluationResult,
3024}
3025
3026impl<'tcx> Default for ProvisionalEvaluationCache<'tcx> {
3027 fn default() -> Self {
3028 Self { dfn: Cell::new(0), map: Default::default(), wf_args: Default::default() }
3029 }
3030}
3031
3032impl<'tcx> ProvisionalEvaluationCache<'tcx> {
3033 /// Get the next DFN in sequence (basically a counter).
3034 fn next_dfn(&self) -> usize {
3035 let result = self.dfn.get();
3036 self.dfn.set(result + 1);
3037 result
3038 }
3039
3040 /// Check the provisional cache for any result for
3041 /// `fresh_trait_ref`. If there is a hit, then you must consider
3042 /// it an access to the stack slots at depth
3043 /// `reached_depth` (from the returned value).
3044 fn get_provisional(
3045 &self,
3046 fresh_trait_pred: ty::PolyTraitPredicate<'tcx>,
3047 ) -> Option<ProvisionalEvaluation> {
3048 debug!(
3049 ?fresh_trait_pred,
3050 "get_provisional = {:#?}",
3051 self.map.borrow().get(&fresh_trait_pred),
3052 );
3053 Some(*self.map.borrow().get(&fresh_trait_pred)?)
3054 }
3055
3056 /// Insert a provisional result into the cache. The result came
3057 /// from the node with the given DFN. It accessed a minimum depth
3058 /// of `reached_depth` to compute. It evaluated `fresh_trait_pred`
3059 /// and resulted in `result`.
3060 fn insert_provisional(
3061 &self,
3062 from_dfn: usize,
3063 reached_depth: usize,
3064 fresh_trait_pred: ty::PolyTraitPredicate<'tcx>,
3065 result: EvaluationResult,
3066 ) {
3067 debug!(?from_dfn, ?fresh_trait_pred, ?result, "insert_provisional");
3068
3069 let mut map = self.map.borrow_mut();
3070
3071 // Subtle: when we complete working on the DFN `from_dfn`, anything
3072 // that remains in the provisional cache must be dependent on some older
3073 // stack entry than `from_dfn`. We have to update their depth with our transitive
3074 // depth in that case or else it would be referring to some popped note.
3075 //
3076 // Example:
3077 // A (reached depth 0)
3078 // ...
3079 // B // depth 1 -- reached depth = 0
3080 // C // depth 2 -- reached depth = 1 (should be 0)
3081 // B
3082 // A // depth 0
3083 // D (reached depth 1)
3084 // C (cache -- reached depth = 2)
3085 for (_k, v) in &mut *map {
3086 if v.from_dfn >= from_dfn {
3087 v.reached_depth = reached_depth.min(v.reached_depth);
3088 }
3089 }
3090
3091 map.insert(fresh_trait_pred, ProvisionalEvaluation { from_dfn, reached_depth, result });
3092 }
3093
3094 /// Invoked when the node with dfn `dfn` does not get a successful
3095 /// result. This will clear out any provisional cache entries
3096 /// that were added since `dfn` was created. This is because the
3097 /// provisional entries are things which must assume that the
3098 /// things on the stack at the time of their creation succeeded --
3099 /// since the failing node is presently at the top of the stack,
3100 /// these provisional entries must either depend on it or some
3101 /// ancestor of it.
3102 fn on_failure(&self, dfn: usize) {
3103 debug!(?dfn, "on_failure");
3104 self.map.borrow_mut().retain(|key, eval| {
3105 if !eval.from_dfn >= dfn {
3106 debug!("on_failure: removing {:?}", key);
3107 false
3108 } else {
3109 true
3110 }
3111 });
3112 }
3113
3114 /// Invoked when the node at depth `depth` completed without
3115 /// depending on anything higher in the stack (if that completion
3116 /// was a failure, then `on_failure` should have been invoked
3117 /// already).
3118 ///
3119 /// Note that we may still have provisional cache items remaining
3120 /// in the cache when this is done. For example, if there is a
3121 /// cycle:
3122 ///
3123 /// * A depends on...
3124 /// * B depends on A
3125 /// * C depends on...
3126 /// * D depends on C
3127 /// * ...
3128 ///
3129 /// Then as we complete the C node we will have a provisional cache
3130 /// with results for A, B, C, and D. This method would clear out
3131 /// the C and D results, but leave A and B provisional.
3132 ///
3133 /// This is determined based on the DFN: we remove any provisional
3134 /// results created since `dfn` started (e.g., in our example, dfn
3135 /// would be 2, representing the C node, and hence we would
3136 /// remove the result for D, which has DFN 3, but not the results for
3137 /// A and B, which have DFNs 0 and 1 respectively).
3138 ///
3139 /// Note that we *do not* attempt to cache these cycle participants
3140 /// in the evaluation cache. Doing so would require carefully computing
3141 /// the correct `DepNode` to store in the cache entry:
3142 /// cycle participants may implicitly depend on query results
3143 /// related to other participants in the cycle, due to our logic
3144 /// which examines the evaluation stack.
3145 ///
3146 /// We used to try to perform this caching,
3147 /// but it lead to multiple incremental compilation ICEs
3148 /// (see #92987 and #96319), and was very hard to understand.
3149 /// Fortunately, removing the caching didn't seem to
3150 /// have a performance impact in practice.
3151 fn on_completion(&self, dfn: usize) {
3152 debug!(?dfn, "on_completion");
3153 self.map.borrow_mut().retain(|fresh_trait_pred, eval| {
3154 if eval.from_dfn >= dfn {
3155 debug!(?fresh_trait_pred, ?eval, "on_completion");
3156 return false;
3157 }
3158 true
3159 });
3160 }
3161}
3162
3163#[derive(Copy, Clone)]
3164struct TraitObligationStackList<'o, 'tcx> {
3165 cache: &'o ProvisionalEvaluationCache<'tcx>,
3166 head: Option<&'o TraitObligationStack<'o, 'tcx>>,
3167}
3168
3169impl<'o, 'tcx> TraitObligationStackList<'o, 'tcx> {
3170 fn empty(cache: &'o ProvisionalEvaluationCache<'tcx>) -> TraitObligationStackList<'o, 'tcx> {
3171 TraitObligationStackList { cache, head: None }
3172 }
3173
3174 fn with(r: &'o TraitObligationStack<'o, 'tcx>) -> TraitObligationStackList<'o, 'tcx> {
3175 TraitObligationStackList { cache: r.cache(), head: Some(r) }
3176 }
3177
3178 fn head(&self) -> Option<&'o TraitObligationStack<'o, 'tcx>> {
3179 self.head
3180 }
3181
3182 fn depth(&self) -> usize {
3183 if let Some(head) = self.head { head.depth } else { 0 }
3184 }
3185}
3186
3187impl<'o, 'tcx> Iterator for TraitObligationStackList<'o, 'tcx> {
3188 type Item = &'o TraitObligationStack<'o, 'tcx>;
3189
3190 fn next(&mut self) -> Option<&'o TraitObligationStack<'o, 'tcx>> {
3191 let o = self.head?;
3192 *self = o.previous;
3193 Some(o)
3194 }
3195}
3196
3197impl<'o, 'tcx> fmt::Debug for TraitObligationStack<'o, 'tcx> {
3198 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3199 write!(f, "TraitObligationStack({:?})", self.obligation)
3200 }
3201}
3202
3203pub(crate) enum ProjectionMatchesProjection {
3204 Yes,
3205 Ambiguous,
3206 No,
3207}