rustc_trait_selection/traits/select/mod.rs
1//! Candidate selection. See the [rustc dev guide] for more information on how this works.
2//!
3//! [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/traits/resolution.html#selection
4
5use std::assert_matches::assert_matches;
6use std::cell::{Cell, RefCell};
7use std::fmt::{self, Display};
8use std::ops::ControlFlow;
9use std::{cmp, iter};
10
11use hir::def::DefKind;
12use rustc_data_structures::fx::{FxIndexMap, FxIndexSet};
13use rustc_data_structures::stack::ensure_sufficient_stack;
14use rustc_errors::{Diag, EmissionGuarantee};
15use rustc_hir as hir;
16use rustc_hir::LangItem;
17use rustc_hir::def_id::DefId;
18use rustc_infer::infer::BoundRegionConversionTime::{self, HigherRankedType};
19use rustc_infer::infer::DefineOpaqueTypes;
20use rustc_infer::infer::at::ToTrace;
21use rustc_infer::infer::relate::TypeRelation;
22use rustc_infer::traits::{PredicateObligations, TraitObligation};
23use rustc_middle::bug;
24use rustc_middle::dep_graph::{DepNodeIndex, dep_kinds};
25pub use rustc_middle::traits::select::*;
26use rustc_middle::ty::abstract_const::NotConstEvaluatable;
27use rustc_middle::ty::error::TypeErrorToStringExt;
28use rustc_middle::ty::print::{PrintTraitRefExt as _, with_no_trimmed_paths};
29use rustc_middle::ty::{
30 self, DeepRejectCtxt, GenericArgsRef, PolyProjectionPredicate, Ty, TyCtxt, TypeFoldable,
31 TypeVisitableExt, TypingMode, Upcast, elaborate,
32};
33use rustc_span::{Symbol, sym};
34use tracing::{debug, instrument, trace};
35
36use self::EvaluationResult::*;
37use self::SelectionCandidate::*;
38use super::coherence::{self, Conflict};
39use super::project::ProjectionTermObligation;
40use super::util::closure_trait_ref_and_return_type;
41use super::{
42 ImplDerivedCause, Normalized, Obligation, ObligationCause, ObligationCauseCode, Overflow,
43 PolyTraitObligation, PredicateObligation, Selection, SelectionError, SelectionResult,
44 TraitQueryMode, const_evaluatable, project, util, wf,
45};
46use crate::error_reporting::InferCtxtErrorExt;
47use crate::infer::{InferCtxt, InferOk, TypeFreshener};
48use crate::solve::InferCtxtSelectExt as _;
49use crate::traits::normalize::{normalize_with_depth, normalize_with_depth_to};
50use crate::traits::project::{ProjectAndUnifyResult, ProjectionCacheKeyExt};
51use crate::traits::{
52 EvaluateConstErr, ProjectionCacheKey, Unimplemented, effects, sizedness_fast_path,
53};
54
55mod _match;
56mod candidate_assembly;
57mod confirmation;
58
59#[derive(Clone, Debug, Eq, PartialEq, Hash)]
60pub enum IntercrateAmbiguityCause<'tcx> {
61 DownstreamCrate { trait_ref: ty::TraitRef<'tcx>, self_ty: Option<Ty<'tcx>> },
62 UpstreamCrateUpdate { trait_ref: ty::TraitRef<'tcx>, self_ty: Option<Ty<'tcx>> },
63 ReservationImpl { message: Symbol },
64}
65
66impl<'tcx> IntercrateAmbiguityCause<'tcx> {
67 /// Emits notes when the overlap is caused by complex intercrate ambiguities.
68 /// See #23980 for details.
69 pub fn add_intercrate_ambiguity_hint<G: EmissionGuarantee>(&self, err: &mut Diag<'_, G>) {
70 err.note(self.intercrate_ambiguity_hint());
71 }
72
73 pub fn intercrate_ambiguity_hint(&self) -> String {
74 with_no_trimmed_paths!(match self {
75 IntercrateAmbiguityCause::DownstreamCrate { trait_ref, self_ty } => {
76 format!(
77 "downstream crates may implement trait `{trait_desc}`{self_desc}",
78 trait_desc = trait_ref.print_trait_sugared(),
79 self_desc = if let Some(self_ty) = self_ty {
80 format!(" for type `{self_ty}`")
81 } else {
82 String::new()
83 }
84 )
85 }
86 IntercrateAmbiguityCause::UpstreamCrateUpdate { trait_ref, self_ty } => {
87 format!(
88 "upstream crates may add a new impl of trait `{trait_desc}`{self_desc} \
89 in future versions",
90 trait_desc = trait_ref.print_trait_sugared(),
91 self_desc = if let Some(self_ty) = self_ty {
92 format!(" for type `{self_ty}`")
93 } else {
94 String::new()
95 }
96 )
97 }
98 IntercrateAmbiguityCause::ReservationImpl { message } => message.to_string(),
99 })
100 }
101}
102
103pub struct SelectionContext<'cx, 'tcx> {
104 pub infcx: &'cx InferCtxt<'tcx>,
105
106 /// Freshener used specifically for entries on the obligation
107 /// stack. This ensures that all entries on the stack at one time
108 /// will have the same set of placeholder entries, which is
109 /// important for checking for trait bounds that recursively
110 /// require themselves.
111 freshener: TypeFreshener<'cx, 'tcx>,
112
113 /// If `intercrate` is set, we remember predicates which were
114 /// considered ambiguous because of impls potentially added in other crates.
115 /// This is used in coherence to give improved diagnostics.
116 /// We don't do his until we detect a coherence error because it can
117 /// lead to false overflow results (#47139) and because always
118 /// computing it may negatively impact performance.
119 intercrate_ambiguity_causes: Option<FxIndexSet<IntercrateAmbiguityCause<'tcx>>>,
120
121 /// The mode that trait queries run in, which informs our error handling
122 /// policy. In essence, canonicalized queries need their errors propagated
123 /// rather than immediately reported because we do not have accurate spans.
124 query_mode: TraitQueryMode,
125}
126
127// A stack that walks back up the stack frame.
128struct TraitObligationStack<'prev, 'tcx> {
129 obligation: &'prev PolyTraitObligation<'tcx>,
130
131 /// The trait predicate from `obligation` but "freshened" with the
132 /// selection-context's freshener. Used to check for recursion.
133 fresh_trait_pred: ty::PolyTraitPredicate<'tcx>,
134
135 /// Starts out equal to `depth` -- if, during evaluation, we
136 /// encounter a cycle, then we will set this flag to the minimum
137 /// depth of that cycle for all participants in the cycle. These
138 /// participants will then forego caching their results. This is
139 /// not the most efficient solution, but it addresses #60010. The
140 /// problem we are trying to prevent:
141 ///
142 /// - If you have `A: AutoTrait` requires `B: AutoTrait` and `C: NonAutoTrait`
143 /// - `B: AutoTrait` requires `A: AutoTrait` (coinductive cycle, ok)
144 /// - `C: NonAutoTrait` requires `A: AutoTrait` (non-coinductive cycle, not ok)
145 ///
146 /// you don't want to cache that `B: AutoTrait` or `A: AutoTrait`
147 /// is `EvaluatedToOk`; this is because they were only considered
148 /// ok on the premise that if `A: AutoTrait` held, but we indeed
149 /// encountered a problem (later on) with `A: AutoTrait`. So we
150 /// currently set a flag on the stack node for `B: AutoTrait` (as
151 /// well as the second instance of `A: AutoTrait`) to suppress
152 /// caching.
153 ///
154 /// This is a simple, targeted fix. A more-performant fix requires
155 /// deeper changes, but would permit more caching: we could
156 /// basically defer caching until we have fully evaluated the
157 /// tree, and then cache the entire tree at once. In any case, the
158 /// performance impact here shouldn't be so horrible: every time
159 /// this is hit, we do cache at least one trait, so we only
160 /// evaluate each member of a cycle up to N times, where N is the
161 /// length of the cycle. This means the performance impact is
162 /// bounded and we shouldn't have any terrible worst-cases.
163 reached_depth: Cell<usize>,
164
165 previous: TraitObligationStackList<'prev, 'tcx>,
166
167 /// The number of parent frames plus one (thus, the topmost frame has depth 1).
168 depth: usize,
169
170 /// The depth-first number of this node in the search graph -- a
171 /// pre-order index. Basically, a freshly incremented counter.
172 dfn: usize,
173}
174
175struct SelectionCandidateSet<'tcx> {
176 /// A list of candidates that definitely apply to the current
177 /// obligation (meaning: types unify).
178 vec: Vec<SelectionCandidate<'tcx>>,
179
180 /// If `true`, then there were candidates that might or might
181 /// not have applied, but we couldn't tell. This occurs when some
182 /// of the input types are type variables, in which case there are
183 /// various "builtin" rules that might or might not trigger.
184 ambiguous: bool,
185}
186
187#[derive(PartialEq, Eq, Debug, Clone)]
188struct EvaluatedCandidate<'tcx> {
189 candidate: SelectionCandidate<'tcx>,
190 evaluation: EvaluationResult,
191}
192
193/// When does the builtin impl for `T: Trait` apply?
194#[derive(Debug)]
195enum BuiltinImplConditions<'tcx> {
196 /// The impl is conditional on `T1, T2, ...: Trait`.
197 Where(ty::Binder<'tcx, Vec<Ty<'tcx>>>),
198 /// There is no built-in impl. There may be some other
199 /// candidate (a where-clause or user-defined impl).
200 None,
201 /// It is unknown whether there is an impl.
202 Ambiguous,
203}
204
205impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
206 pub fn new(infcx: &'cx InferCtxt<'tcx>) -> SelectionContext<'cx, 'tcx> {
207 SelectionContext {
208 infcx,
209 freshener: infcx.freshener(),
210 intercrate_ambiguity_causes: None,
211 query_mode: TraitQueryMode::Standard,
212 }
213 }
214
215 pub fn with_query_mode(
216 infcx: &'cx InferCtxt<'tcx>,
217 query_mode: TraitQueryMode,
218 ) -> SelectionContext<'cx, 'tcx> {
219 debug!(?query_mode, "with_query_mode");
220 SelectionContext { query_mode, ..SelectionContext::new(infcx) }
221 }
222
223 /// Enables tracking of intercrate ambiguity causes. See
224 /// the documentation of [`Self::intercrate_ambiguity_causes`] for more.
225 pub fn enable_tracking_intercrate_ambiguity_causes(&mut self) {
226 assert_matches!(self.infcx.typing_mode(), TypingMode::Coherence);
227 assert!(self.intercrate_ambiguity_causes.is_none());
228 self.intercrate_ambiguity_causes = Some(FxIndexSet::default());
229 debug!("selcx: enable_tracking_intercrate_ambiguity_causes");
230 }
231
232 /// Gets the intercrate ambiguity causes collected since tracking
233 /// was enabled and disables tracking at the same time. If
234 /// tracking is not enabled, just returns an empty vector.
235 pub fn take_intercrate_ambiguity_causes(
236 &mut self,
237 ) -> FxIndexSet<IntercrateAmbiguityCause<'tcx>> {
238 assert_matches!(self.infcx.typing_mode(), TypingMode::Coherence);
239 self.intercrate_ambiguity_causes.take().unwrap_or_default()
240 }
241
242 pub fn tcx(&self) -> TyCtxt<'tcx> {
243 self.infcx.tcx
244 }
245
246 ///////////////////////////////////////////////////////////////////////////
247 // Selection
248 //
249 // The selection phase tries to identify *how* an obligation will
250 // be resolved. For example, it will identify which impl or
251 // parameter bound is to be used. The process can be inconclusive
252 // if the self type in the obligation is not fully inferred. Selection
253 // can result in an error in one of two ways:
254 //
255 // 1. If no applicable impl or parameter bound can be found.
256 // 2. If the output type parameters in the obligation do not match
257 // those specified by the impl/bound. For example, if the obligation
258 // is `Vec<Foo>: Iterable<Bar>`, but the impl specifies
259 // `impl<T> Iterable<T> for Vec<T>`, than an error would result.
260
261 /// Attempts to satisfy the obligation. If successful, this will affect the surrounding
262 /// type environment by performing unification.
263 #[instrument(level = "debug", skip(self), ret)]
264 pub fn poly_select(
265 &mut self,
266 obligation: &PolyTraitObligation<'tcx>,
267 ) -> SelectionResult<'tcx, Selection<'tcx>> {
268 assert!(!self.infcx.next_trait_solver());
269
270 let candidate = match self.select_from_obligation(obligation) {
271 Err(SelectionError::Overflow(OverflowError::Canonical)) => {
272 // In standard mode, overflow must have been caught and reported
273 // earlier.
274 assert!(self.query_mode == TraitQueryMode::Canonical);
275 return Err(SelectionError::Overflow(OverflowError::Canonical));
276 }
277 Err(e) => {
278 return Err(e);
279 }
280 Ok(None) => {
281 return Ok(None);
282 }
283 Ok(Some(candidate)) => candidate,
284 };
285
286 match self.confirm_candidate(obligation, candidate) {
287 Err(SelectionError::Overflow(OverflowError::Canonical)) => {
288 assert!(self.query_mode == TraitQueryMode::Canonical);
289 Err(SelectionError::Overflow(OverflowError::Canonical))
290 }
291 Err(e) => Err(e),
292 Ok(candidate) => Ok(Some(candidate)),
293 }
294 }
295
296 pub fn select(
297 &mut self,
298 obligation: &TraitObligation<'tcx>,
299 ) -> SelectionResult<'tcx, Selection<'tcx>> {
300 if self.infcx.next_trait_solver() {
301 return self.infcx.select_in_new_trait_solver(obligation);
302 }
303
304 self.poly_select(&Obligation {
305 cause: obligation.cause.clone(),
306 param_env: obligation.param_env,
307 predicate: ty::Binder::dummy(obligation.predicate),
308 recursion_depth: obligation.recursion_depth,
309 })
310 }
311
312 fn select_from_obligation(
313 &mut self,
314 obligation: &PolyTraitObligation<'tcx>,
315 ) -> SelectionResult<'tcx, SelectionCandidate<'tcx>> {
316 debug_assert!(!obligation.predicate.has_escaping_bound_vars());
317
318 let pec = &ProvisionalEvaluationCache::default();
319 let stack = self.push_stack(TraitObligationStackList::empty(pec), obligation);
320
321 self.candidate_from_obligation(&stack)
322 }
323
324 #[instrument(level = "debug", skip(self), ret)]
325 fn candidate_from_obligation<'o>(
326 &mut self,
327 stack: &TraitObligationStack<'o, 'tcx>,
328 ) -> SelectionResult<'tcx, SelectionCandidate<'tcx>> {
329 debug_assert!(!self.infcx.next_trait_solver());
330 // Watch out for overflow. This intentionally bypasses (and does
331 // not update) the cache.
332 self.check_recursion_limit(stack.obligation, stack.obligation)?;
333
334 // Check the cache. Note that we freshen the trait-ref
335 // separately rather than using `stack.fresh_trait_ref` --
336 // this is because we want the unbound variables to be
337 // replaced with fresh types starting from index 0.
338 let cache_fresh_trait_pred = self.infcx.freshen(stack.obligation.predicate);
339 debug!(?cache_fresh_trait_pred);
340 debug_assert!(!stack.obligation.predicate.has_escaping_bound_vars());
341
342 if let Some(c) =
343 self.check_candidate_cache(stack.obligation.param_env, cache_fresh_trait_pred)
344 {
345 debug!("CACHE HIT");
346 return c;
347 }
348
349 // If no match, compute result and insert into cache.
350 //
351 // FIXME(nikomatsakis) -- this cache is not taking into
352 // account cycles that may have occurred in forming the
353 // candidate. I don't know of any specific problems that
354 // result but it seems awfully suspicious.
355 let (candidate, dep_node) =
356 self.in_task(|this| this.candidate_from_obligation_no_cache(stack));
357
358 debug!("CACHE MISS");
359 self.insert_candidate_cache(
360 stack.obligation.param_env,
361 cache_fresh_trait_pred,
362 dep_node,
363 candidate.clone(),
364 );
365 candidate
366 }
367
368 fn candidate_from_obligation_no_cache<'o>(
369 &mut self,
370 stack: &TraitObligationStack<'o, 'tcx>,
371 ) -> SelectionResult<'tcx, SelectionCandidate<'tcx>> {
372 if let Err(conflict) = self.is_knowable(stack) {
373 debug!("coherence stage: not knowable");
374 if self.intercrate_ambiguity_causes.is_some() {
375 debug!("evaluate_stack: intercrate_ambiguity_causes is some");
376 // Heuristics: show the diagnostics when there are no candidates in crate.
377 if let Ok(candidate_set) = self.assemble_candidates(stack) {
378 let mut no_candidates_apply = true;
379
380 for c in candidate_set.vec.iter() {
381 if self.evaluate_candidate(stack, c)?.may_apply() {
382 no_candidates_apply = false;
383 break;
384 }
385 }
386
387 if !candidate_set.ambiguous && no_candidates_apply {
388 let trait_ref = self.infcx.resolve_vars_if_possible(
389 stack.obligation.predicate.skip_binder().trait_ref,
390 );
391 if !trait_ref.references_error() {
392 let self_ty = trait_ref.self_ty();
393 let self_ty = self_ty.has_concrete_skeleton().then(|| self_ty);
394 let cause = if let Conflict::Upstream = conflict {
395 IntercrateAmbiguityCause::UpstreamCrateUpdate { trait_ref, self_ty }
396 } else {
397 IntercrateAmbiguityCause::DownstreamCrate { trait_ref, self_ty }
398 };
399 debug!(?cause, "evaluate_stack: pushing cause");
400 self.intercrate_ambiguity_causes.as_mut().unwrap().insert(cause);
401 }
402 }
403 }
404 }
405 return Ok(None);
406 }
407
408 let candidate_set = self.assemble_candidates(stack)?;
409
410 if candidate_set.ambiguous {
411 debug!("candidate set contains ambig");
412 return Ok(None);
413 }
414
415 let candidates = candidate_set.vec;
416
417 debug!(?stack, ?candidates, "assembled {} candidates", candidates.len());
418
419 // At this point, we know that each of the entries in the
420 // candidate set is *individually* applicable. Now we have to
421 // figure out if they contain mutual incompatibilities. This
422 // frequently arises if we have an unconstrained input type --
423 // for example, we are looking for `$0: Eq` where `$0` is some
424 // unconstrained type variable. In that case, we'll get a
425 // candidate which assumes $0 == int, one that assumes `$0 ==
426 // usize`, etc. This spells an ambiguity.
427
428 let mut candidates = self.filter_impls(candidates, stack.obligation);
429
430 // If there is more than one candidate, first winnow them down
431 // by considering extra conditions (nested obligations and so
432 // forth). We don't winnow if there is exactly one
433 // candidate. This is a relatively minor distinction but it
434 // can lead to better inference and error-reporting. An
435 // example would be if there was an impl:
436 //
437 // impl<T:Clone> Vec<T> { fn push_clone(...) { ... } }
438 //
439 // and we were to see some code `foo.push_clone()` where `boo`
440 // is a `Vec<Bar>` and `Bar` does not implement `Clone`. If
441 // we were to winnow, we'd wind up with zero candidates.
442 // Instead, we select the right impl now but report "`Bar` does
443 // not implement `Clone`".
444 if candidates.len() == 1 {
445 return self.filter_reservation_impls(candidates.pop().unwrap());
446 }
447
448 // Winnow, but record the exact outcome of evaluation, which
449 // is needed for specialization. Propagate overflow if it occurs.
450 let candidates = candidates
451 .into_iter()
452 .map(|c| match self.evaluate_candidate(stack, &c) {
453 Ok(eval) if eval.may_apply() => {
454 Ok(Some(EvaluatedCandidate { candidate: c, evaluation: eval }))
455 }
456 Ok(_) => Ok(None),
457 Err(OverflowError::Canonical) => Err(Overflow(OverflowError::Canonical)),
458 Err(OverflowError::Error(e)) => Err(Overflow(OverflowError::Error(e))),
459 })
460 .flat_map(Result::transpose)
461 .collect::<Result<Vec<_>, _>>()?;
462
463 debug!(?stack, ?candidates, "{} potentially applicable candidates", candidates.len());
464 // If there are *NO* candidates, then there are no impls --
465 // that we know of, anyway. Note that in the case where there
466 // are unbound type variables within the obligation, it might
467 // be the case that you could still satisfy the obligation
468 // from another crate by instantiating the type variables with
469 // a type from another crate that does have an impl. This case
470 // is checked for in `evaluate_stack` (and hence users
471 // who might care about this case, like coherence, should use
472 // that function).
473 if candidates.is_empty() {
474 // If there's an error type, 'downgrade' our result from
475 // `Err(Unimplemented)` to `Ok(None)`. This helps us avoid
476 // emitting additional spurious errors, since we're guaranteed
477 // to have emitted at least one.
478 if stack.obligation.predicate.references_error() {
479 debug!(?stack.obligation.predicate, "found error type in predicate, treating as ambiguous");
480 Ok(None)
481 } else {
482 Err(Unimplemented)
483 }
484 } else {
485 let has_non_region_infer = stack.obligation.predicate.has_non_region_infer();
486 if let Some(candidate) = self.winnow_candidates(has_non_region_infer, candidates) {
487 self.filter_reservation_impls(candidate)
488 } else {
489 Ok(None)
490 }
491 }
492 }
493
494 ///////////////////////////////////////////////////////////////////////////
495 // EVALUATION
496 //
497 // Tests whether an obligation can be selected or whether an impl
498 // can be applied to particular types. It skips the "confirmation"
499 // step and hence completely ignores output type parameters.
500 //
501 // The result is "true" if the obligation *may* hold and "false" if
502 // we can be sure it does not.
503
504 /// Evaluates whether the obligation `obligation` can be satisfied
505 /// and returns an `EvaluationResult`. This is meant for the
506 /// *initial* call.
507 ///
508 /// Do not use this directly, use `infcx.evaluate_obligation` instead.
509 pub fn evaluate_root_obligation(
510 &mut self,
511 obligation: &PredicateObligation<'tcx>,
512 ) -> Result<EvaluationResult, OverflowError> {
513 debug_assert!(!self.infcx.next_trait_solver());
514 self.evaluation_probe(|this| {
515 let goal =
516 this.infcx.resolve_vars_if_possible((obligation.predicate, obligation.param_env));
517 let mut result = this.evaluate_predicate_recursively(
518 TraitObligationStackList::empty(&ProvisionalEvaluationCache::default()),
519 obligation.clone(),
520 )?;
521 // If the predicate has done any inference, then downgrade the
522 // result to ambiguous.
523 if this.infcx.resolve_vars_if_possible(goal) != goal {
524 result = result.max(EvaluatedToAmbig);
525 }
526 Ok(result)
527 })
528 }
529
530 /// Computes the evaluation result of `op`, discarding any constraints.
531 ///
532 /// This also runs for leak check to allow higher ranked region errors to impact
533 /// selection. By default it checks for leaks from all universes created inside of
534 /// `op`, but this can be overwritten if necessary.
535 fn evaluation_probe(
536 &mut self,
537 op: impl FnOnce(&mut Self) -> Result<EvaluationResult, OverflowError>,
538 ) -> Result<EvaluationResult, OverflowError> {
539 self.infcx.probe(|snapshot| -> Result<EvaluationResult, OverflowError> {
540 let outer_universe = self.infcx.universe();
541 let result = op(self)?;
542
543 match self.infcx.leak_check(outer_universe, Some(snapshot)) {
544 Ok(()) => {}
545 Err(_) => return Ok(EvaluatedToErr),
546 }
547
548 if self.infcx.opaque_types_added_in_snapshot(snapshot) {
549 return Ok(result.max(EvaluatedToOkModuloOpaqueTypes));
550 }
551
552 if self.infcx.region_constraints_added_in_snapshot(snapshot) {
553 Ok(result.max(EvaluatedToOkModuloRegions))
554 } else {
555 Ok(result)
556 }
557 })
558 }
559
560 /// Evaluates the predicates in `predicates` recursively. This may
561 /// guide inference. If this is not desired, run it inside of a
562 /// is run within an inference probe.
563 /// `probe`.
564 #[instrument(skip(self, stack), level = "debug")]
565 fn evaluate_predicates_recursively<'o, I>(
566 &mut self,
567 stack: TraitObligationStackList<'o, 'tcx>,
568 predicates: I,
569 ) -> Result<EvaluationResult, OverflowError>
570 where
571 I: IntoIterator<Item = PredicateObligation<'tcx>> + std::fmt::Debug,
572 {
573 let mut result = EvaluatedToOk;
574 for mut obligation in predicates {
575 obligation.set_depth_from_parent(stack.depth());
576 let eval = self.evaluate_predicate_recursively(stack, obligation.clone())?;
577 if let EvaluatedToErr = eval {
578 // fast-path - EvaluatedToErr is the top of the lattice,
579 // so we don't need to look on the other predicates.
580 return Ok(EvaluatedToErr);
581 } else {
582 result = cmp::max(result, eval);
583 }
584 }
585 Ok(result)
586 }
587
588 #[instrument(
589 level = "debug",
590 skip(self, previous_stack),
591 fields(previous_stack = ?previous_stack.head())
592 ret,
593 )]
594 fn evaluate_predicate_recursively<'o>(
595 &mut self,
596 previous_stack: TraitObligationStackList<'o, 'tcx>,
597 obligation: PredicateObligation<'tcx>,
598 ) -> Result<EvaluationResult, OverflowError> {
599 debug_assert!(!self.infcx.next_trait_solver());
600 // `previous_stack` stores a `PolyTraitObligation`, while `obligation` is
601 // a `PredicateObligation`. These are distinct types, so we can't
602 // use any `Option` combinator method that would force them to be
603 // the same.
604 match previous_stack.head() {
605 Some(h) => self.check_recursion_limit(&obligation, h.obligation)?,
606 None => self.check_recursion_limit(&obligation, &obligation)?,
607 }
608
609 if sizedness_fast_path(self.tcx(), obligation.predicate) {
610 return Ok(EvaluatedToOk);
611 }
612
613 ensure_sufficient_stack(|| {
614 let bound_predicate = obligation.predicate.kind();
615 match bound_predicate.skip_binder() {
616 ty::PredicateKind::Clause(ty::ClauseKind::Trait(t)) => {
617 let t = bound_predicate.rebind(t);
618 debug_assert!(!t.has_escaping_bound_vars());
619 let obligation = obligation.with(self.tcx(), t);
620 self.evaluate_trait_predicate_recursively(previous_stack, obligation)
621 }
622
623 ty::PredicateKind::Clause(ty::ClauseKind::HostEffect(data)) => {
624 self.infcx.enter_forall(bound_predicate.rebind(data), |data| {
625 match effects::evaluate_host_effect_obligation(
626 self,
627 &obligation.with(self.tcx(), data),
628 ) {
629 Ok(nested) => {
630 self.evaluate_predicates_recursively(previous_stack, nested)
631 }
632 Err(effects::EvaluationFailure::Ambiguous) => Ok(EvaluatedToAmbig),
633 Err(effects::EvaluationFailure::NoSolution) => Ok(EvaluatedToErr),
634 }
635 })
636 }
637
638 ty::PredicateKind::Subtype(p) => {
639 let p = bound_predicate.rebind(p);
640 // Does this code ever run?
641 match self.infcx.subtype_predicate(&obligation.cause, obligation.param_env, p) {
642 Ok(Ok(InferOk { obligations, .. })) => {
643 self.evaluate_predicates_recursively(previous_stack, obligations)
644 }
645 Ok(Err(_)) => Ok(EvaluatedToErr),
646 Err(..) => Ok(EvaluatedToAmbig),
647 }
648 }
649
650 ty::PredicateKind::Coerce(p) => {
651 let p = bound_predicate.rebind(p);
652 // Does this code ever run?
653 match self.infcx.coerce_predicate(&obligation.cause, obligation.param_env, p) {
654 Ok(Ok(InferOk { obligations, .. })) => {
655 self.evaluate_predicates_recursively(previous_stack, obligations)
656 }
657 Ok(Err(_)) => Ok(EvaluatedToErr),
658 Err(..) => Ok(EvaluatedToAmbig),
659 }
660 }
661
662 ty::PredicateKind::Clause(ty::ClauseKind::WellFormed(term)) => {
663 // So, there is a bit going on here. First, `WellFormed` predicates
664 // are coinductive, like trait predicates with auto traits.
665 // This means that we need to detect if we have recursively
666 // evaluated `WellFormed(X)`. Otherwise, we would run into
667 // a "natural" overflow error.
668 //
669 // Now, the next question is whether we need to do anything
670 // special with caching. Considering the following tree:
671 // - `WF(Foo<T>)`
672 // - `Bar<T>: Send`
673 // - `WF(Foo<T>)`
674 // - `Foo<T>: Trait`
675 // In this case, the innermost `WF(Foo<T>)` should return
676 // `EvaluatedToOk`, since it's coinductive. Then if
677 // `Bar<T>: Send` is resolved to `EvaluatedToOk`, it can be
678 // inserted into a cache (because without thinking about `WF`
679 // goals, it isn't in a cycle). If `Foo<T>: Trait` later doesn't
680 // hold, then `Bar<T>: Send` shouldn't hold. Therefore, we
681 // *do* need to keep track of coinductive cycles.
682
683 let cache = previous_stack.cache;
684 let dfn = cache.next_dfn();
685
686 for stack_term in previous_stack.cache.wf_args.borrow().iter().rev() {
687 if stack_term.0 != term {
688 continue;
689 }
690 debug!("WellFormed({:?}) on stack", term);
691 if let Some(stack) = previous_stack.head {
692 // Okay, let's imagine we have two different stacks:
693 // `T: NonAutoTrait -> WF(T) -> T: NonAutoTrait`
694 // `WF(T) -> T: NonAutoTrait -> WF(T)`
695 // Because of this, we need to check that all
696 // predicates between the WF goals are coinductive.
697 // Otherwise, we can say that `T: NonAutoTrait` is
698 // true.
699 // Let's imagine we have a predicate stack like
700 // `Foo: Bar -> WF(T) -> T: NonAutoTrait -> T: Auto`
701 // depth ^1 ^2 ^3
702 // and the current predicate is `WF(T)`. `wf_args`
703 // would contain `(T, 1)`. We want to check all
704 // trait predicates greater than `1`. The previous
705 // stack would be `T: Auto`.
706 let cycle = stack.iter().take_while(|s| s.depth > stack_term.1);
707 let tcx = self.tcx();
708 let cycle = cycle.map(|stack| stack.obligation.predicate.upcast(tcx));
709 if self.coinductive_match(cycle) {
710 stack.update_reached_depth(stack_term.1);
711 return Ok(EvaluatedToOk);
712 } else {
713 return Ok(EvaluatedToAmbigStackDependent);
714 }
715 }
716 return Ok(EvaluatedToOk);
717 }
718
719 match wf::obligations(
720 self.infcx,
721 obligation.param_env,
722 obligation.cause.body_id,
723 obligation.recursion_depth + 1,
724 term,
725 obligation.cause.span,
726 ) {
727 Some(obligations) => {
728 cache.wf_args.borrow_mut().push((term, previous_stack.depth()));
729 let result =
730 self.evaluate_predicates_recursively(previous_stack, obligations);
731 cache.wf_args.borrow_mut().pop();
732
733 let result = result?;
734
735 if !result.must_apply_modulo_regions() {
736 cache.on_failure(dfn);
737 }
738
739 cache.on_completion(dfn);
740
741 Ok(result)
742 }
743 None => Ok(EvaluatedToAmbig),
744 }
745 }
746
747 ty::PredicateKind::Clause(ty::ClauseKind::TypeOutlives(pred)) => {
748 // A global type with no free lifetimes or generic parameters
749 // outlives anything.
750 if pred.0.has_free_regions()
751 || pred.0.has_bound_regions()
752 || pred.0.has_non_region_infer()
753 || pred.0.has_non_region_infer()
754 {
755 Ok(EvaluatedToOkModuloRegions)
756 } else {
757 Ok(EvaluatedToOk)
758 }
759 }
760
761 ty::PredicateKind::Clause(ty::ClauseKind::RegionOutlives(..)) => {
762 // We do not consider region relationships when evaluating trait matches.
763 Ok(EvaluatedToOkModuloRegions)
764 }
765
766 ty::PredicateKind::DynCompatible(trait_def_id) => {
767 if self.tcx().is_dyn_compatible(trait_def_id) {
768 Ok(EvaluatedToOk)
769 } else {
770 Ok(EvaluatedToErr)
771 }
772 }
773
774 ty::PredicateKind::Clause(ty::ClauseKind::Projection(data)) => {
775 let data = bound_predicate.rebind(data);
776 let project_obligation = obligation.with(self.tcx(), data);
777 match project::poly_project_and_unify_term(self, &project_obligation) {
778 ProjectAndUnifyResult::Holds(mut subobligations) => {
779 'compute_res: {
780 // If we've previously marked this projection as 'complete', then
781 // use the final cached result (either `EvaluatedToOk` or
782 // `EvaluatedToOkModuloRegions`), and skip re-evaluating the
783 // sub-obligations.
784 if let Some(key) =
785 ProjectionCacheKey::from_poly_projection_obligation(
786 self,
787 &project_obligation,
788 )
789 {
790 if let Some(cached_res) = self
791 .infcx
792 .inner
793 .borrow_mut()
794 .projection_cache()
795 .is_complete(key)
796 {
797 break 'compute_res Ok(cached_res);
798 }
799 }
800
801 // Need to explicitly set the depth of nested goals here as
802 // projection obligations can cycle by themselves and in
803 // `evaluate_predicates_recursively` we only add the depth
804 // for parent trait goals because only these get added to the
805 // `TraitObligationStackList`.
806 for subobligation in subobligations.iter_mut() {
807 subobligation.set_depth_from_parent(obligation.recursion_depth);
808 }
809 let res = self.evaluate_predicates_recursively(
810 previous_stack,
811 subobligations,
812 );
813 if let Ok(eval_rslt) = res
814 && (eval_rslt == EvaluatedToOk
815 || eval_rslt == EvaluatedToOkModuloRegions)
816 && let Some(key) =
817 ProjectionCacheKey::from_poly_projection_obligation(
818 self,
819 &project_obligation,
820 )
821 {
822 // If the result is something that we can cache, then mark this
823 // entry as 'complete'. This will allow us to skip evaluating the
824 // subobligations at all the next time we evaluate the projection
825 // predicate.
826 self.infcx
827 .inner
828 .borrow_mut()
829 .projection_cache()
830 .complete(key, eval_rslt);
831 }
832 res
833 }
834 }
835 ProjectAndUnifyResult::FailedNormalization => Ok(EvaluatedToAmbig),
836 ProjectAndUnifyResult::Recursive => Ok(EvaluatedToAmbigStackDependent),
837 ProjectAndUnifyResult::MismatchedProjectionTypes(_) => Ok(EvaluatedToErr),
838 }
839 }
840
841 ty::PredicateKind::Clause(ty::ClauseKind::ConstEvaluatable(uv)) => {
842 match const_evaluatable::is_const_evaluatable(
843 self.infcx,
844 uv,
845 obligation.param_env,
846 obligation.cause.span,
847 ) {
848 Ok(()) => Ok(EvaluatedToOk),
849 Err(NotConstEvaluatable::MentionsInfer) => Ok(EvaluatedToAmbig),
850 Err(NotConstEvaluatable::MentionsParam) => Ok(EvaluatedToErr),
851 Err(_) => Ok(EvaluatedToErr),
852 }
853 }
854
855 ty::PredicateKind::ConstEquate(c1, c2) => {
856 let tcx = self.tcx();
857 assert!(
858 tcx.features().generic_const_exprs(),
859 "`ConstEquate` without a feature gate: {c1:?} {c2:?}",
860 );
861
862 {
863 let c1 = tcx.expand_abstract_consts(c1);
864 let c2 = tcx.expand_abstract_consts(c2);
865 debug!(
866 "evaluate_predicate_recursively: equating consts:\nc1= {:?}\nc2= {:?}",
867 c1, c2
868 );
869
870 use rustc_hir::def::DefKind;
871 match (c1.kind(), c2.kind()) {
872 (ty::ConstKind::Unevaluated(a), ty::ConstKind::Unevaluated(b))
873 if a.def == b.def && tcx.def_kind(a.def) == DefKind::AssocConst =>
874 {
875 if let Ok(InferOk { obligations, value: () }) = self
876 .infcx
877 .at(&obligation.cause, obligation.param_env)
878 // Can define opaque types as this is only reachable with
879 // `generic_const_exprs`
880 .eq(
881 DefineOpaqueTypes::Yes,
882 ty::AliasTerm::from(a),
883 ty::AliasTerm::from(b),
884 )
885 {
886 return self.evaluate_predicates_recursively(
887 previous_stack,
888 obligations,
889 );
890 }
891 }
892 (_, ty::ConstKind::Unevaluated(_))
893 | (ty::ConstKind::Unevaluated(_), _) => (),
894 (_, _) => {
895 if let Ok(InferOk { obligations, value: () }) = self
896 .infcx
897 .at(&obligation.cause, obligation.param_env)
898 // Can define opaque types as this is only reachable with
899 // `generic_const_exprs`
900 .eq(DefineOpaqueTypes::Yes, c1, c2)
901 {
902 return self.evaluate_predicates_recursively(
903 previous_stack,
904 obligations,
905 );
906 }
907 }
908 }
909 }
910
911 let evaluate = |c: ty::Const<'tcx>| {
912 if let ty::ConstKind::Unevaluated(_) = c.kind() {
913 match crate::traits::try_evaluate_const(
914 self.infcx,
915 c,
916 obligation.param_env,
917 ) {
918 Ok(val) => Ok(val),
919 Err(e) => Err(e),
920 }
921 } else {
922 Ok(c)
923 }
924 };
925
926 match (evaluate(c1), evaluate(c2)) {
927 (Ok(c1), Ok(c2)) => {
928 match self.infcx.at(&obligation.cause, obligation.param_env).eq(
929 // Can define opaque types as this is only reachable with
930 // `generic_const_exprs`
931 DefineOpaqueTypes::Yes,
932 c1,
933 c2,
934 ) {
935 Ok(inf_ok) => self.evaluate_predicates_recursively(
936 previous_stack,
937 inf_ok.into_obligations(),
938 ),
939 Err(_) => Ok(EvaluatedToErr),
940 }
941 }
942 (Err(EvaluateConstErr::InvalidConstParamTy(..)), _)
943 | (_, Err(EvaluateConstErr::InvalidConstParamTy(..))) => Ok(EvaluatedToErr),
944 (Err(EvaluateConstErr::EvaluationFailure(..)), _)
945 | (_, Err(EvaluateConstErr::EvaluationFailure(..))) => Ok(EvaluatedToErr),
946 (Err(EvaluateConstErr::HasGenericsOrInfers), _)
947 | (_, Err(EvaluateConstErr::HasGenericsOrInfers)) => {
948 if c1.has_non_region_infer() || c2.has_non_region_infer() {
949 Ok(EvaluatedToAmbig)
950 } else {
951 // Two different constants using generic parameters ~> error.
952 Ok(EvaluatedToErr)
953 }
954 }
955 }
956 }
957 ty::PredicateKind::NormalizesTo(..) => {
958 bug!("NormalizesTo is only used by the new solver")
959 }
960 ty::PredicateKind::AliasRelate(..) => {
961 bug!("AliasRelate is only used by the new solver")
962 }
963 ty::PredicateKind::Ambiguous => Ok(EvaluatedToAmbig),
964 ty::PredicateKind::Clause(ty::ClauseKind::ConstArgHasType(ct, ty)) => {
965 let ct = self.infcx.shallow_resolve_const(ct);
966 let ct_ty = match ct.kind() {
967 ty::ConstKind::Infer(_) => {
968 return Ok(EvaluatedToAmbig);
969 }
970 ty::ConstKind::Error(_) => return Ok(EvaluatedToOk),
971 ty::ConstKind::Value(cv) => cv.ty,
972 ty::ConstKind::Unevaluated(uv) => {
973 self.tcx().type_of(uv.def).instantiate(self.tcx(), uv.args)
974 }
975 // FIXME(generic_const_exprs): See comment in `fulfill.rs`
976 ty::ConstKind::Expr(_) => return Ok(EvaluatedToOk),
977 ty::ConstKind::Placeholder(_) => {
978 bug!("placeholder const {:?} in old solver", ct)
979 }
980 ty::ConstKind::Bound(_, _) => bug!("escaping bound vars in {:?}", ct),
981 ty::ConstKind::Param(param_ct) => {
982 param_ct.find_ty_from_env(obligation.param_env)
983 }
984 };
985
986 match self.infcx.at(&obligation.cause, obligation.param_env).eq(
987 // Only really exercised by generic_const_exprs
988 DefineOpaqueTypes::Yes,
989 ct_ty,
990 ty,
991 ) {
992 Ok(inf_ok) => self.evaluate_predicates_recursively(
993 previous_stack,
994 inf_ok.into_obligations(),
995 ),
996 Err(_) => Ok(EvaluatedToErr),
997 }
998 }
999 }
1000 })
1001 }
1002
1003 #[instrument(skip(self, previous_stack), level = "debug", ret)]
1004 fn evaluate_trait_predicate_recursively<'o>(
1005 &mut self,
1006 previous_stack: TraitObligationStackList<'o, 'tcx>,
1007 mut obligation: PolyTraitObligation<'tcx>,
1008 ) -> Result<EvaluationResult, OverflowError> {
1009 if !matches!(self.infcx.typing_mode(), TypingMode::Coherence)
1010 && obligation.is_global()
1011 && obligation.param_env.caller_bounds().iter().all(|bound| bound.has_param())
1012 {
1013 // If a param env has no global bounds, global obligations do not
1014 // depend on its particular value in order to work, so we can clear
1015 // out the param env and get better caching.
1016 debug!("in global");
1017 obligation.param_env = ty::ParamEnv::empty();
1018 }
1019
1020 let stack = self.push_stack(previous_stack, &obligation);
1021 let fresh_trait_pred = stack.fresh_trait_pred;
1022 let param_env = obligation.param_env;
1023
1024 debug!(?fresh_trait_pred);
1025
1026 // If a trait predicate is in the (local or global) evaluation cache,
1027 // then we know it holds without cycles.
1028 if let Some(result) = self.check_evaluation_cache(param_env, fresh_trait_pred) {
1029 debug!("CACHE HIT");
1030 return Ok(result);
1031 }
1032
1033 if let Some(result) = stack.cache().get_provisional(fresh_trait_pred) {
1034 debug!("PROVISIONAL CACHE HIT");
1035 stack.update_reached_depth(result.reached_depth);
1036 return Ok(result.result);
1037 }
1038
1039 // Check if this is a match for something already on the
1040 // stack. If so, we don't want to insert the result into the
1041 // main cache (it is cycle dependent) nor the provisional
1042 // cache (which is meant for things that have completed but
1043 // for a "backedge" -- this result *is* the backedge).
1044 if let Some(cycle_result) = self.check_evaluation_cycle(&stack) {
1045 return Ok(cycle_result);
1046 }
1047
1048 let (result, dep_node) = self.in_task(|this| {
1049 let mut result = this.evaluate_stack(&stack)?;
1050
1051 // fix issue #103563, we don't normalize
1052 // nested obligations which produced by `TraitDef` candidate
1053 // (i.e. using bounds on assoc items as assumptions).
1054 // because we don't have enough information to
1055 // normalize these obligations before evaluating.
1056 // so we will try to normalize the obligation and evaluate again.
1057 // we will replace it with new solver in the future.
1058 if EvaluationResult::EvaluatedToErr == result
1059 && fresh_trait_pred.has_aliases()
1060 && fresh_trait_pred.is_global()
1061 {
1062 let mut nested_obligations = PredicateObligations::new();
1063 let predicate = normalize_with_depth_to(
1064 this,
1065 param_env,
1066 obligation.cause.clone(),
1067 obligation.recursion_depth + 1,
1068 obligation.predicate,
1069 &mut nested_obligations,
1070 );
1071 if predicate != obligation.predicate {
1072 let mut nested_result = EvaluationResult::EvaluatedToOk;
1073 for obligation in nested_obligations {
1074 nested_result = cmp::max(
1075 this.evaluate_predicate_recursively(previous_stack, obligation)?,
1076 nested_result,
1077 );
1078 }
1079
1080 if nested_result.must_apply_modulo_regions() {
1081 let obligation = obligation.with(this.tcx(), predicate);
1082 result = cmp::max(
1083 nested_result,
1084 this.evaluate_trait_predicate_recursively(previous_stack, obligation)?,
1085 );
1086 }
1087 }
1088 }
1089
1090 Ok::<_, OverflowError>(result)
1091 });
1092
1093 let result = result?;
1094
1095 if !result.must_apply_modulo_regions() {
1096 stack.cache().on_failure(stack.dfn);
1097 }
1098
1099 let reached_depth = stack.reached_depth.get();
1100 if reached_depth >= stack.depth {
1101 debug!("CACHE MISS");
1102 self.insert_evaluation_cache(param_env, fresh_trait_pred, dep_node, result);
1103 stack.cache().on_completion(stack.dfn);
1104 } else {
1105 debug!("PROVISIONAL");
1106 debug!(
1107 "caching provisionally because {:?} \
1108 is a cycle participant (at depth {}, reached depth {})",
1109 fresh_trait_pred, stack.depth, reached_depth,
1110 );
1111
1112 stack.cache().insert_provisional(stack.dfn, reached_depth, fresh_trait_pred, result);
1113 }
1114
1115 Ok(result)
1116 }
1117
1118 /// If there is any previous entry on the stack that precisely
1119 /// matches this obligation, then we can assume that the
1120 /// obligation is satisfied for now (still all other conditions
1121 /// must be met of course). One obvious case this comes up is
1122 /// marker traits like `Send`. Think of a linked list:
1123 ///
1124 /// struct List<T> { data: T, next: Option<Box<List<T>>> }
1125 ///
1126 /// `Box<List<T>>` will be `Send` if `T` is `Send` and
1127 /// `Option<Box<List<T>>>` is `Send`, and in turn
1128 /// `Option<Box<List<T>>>` is `Send` if `Box<List<T>>` is
1129 /// `Send`.
1130 ///
1131 /// Note that we do this comparison using the `fresh_trait_ref`
1132 /// fields. Because these have all been freshened using
1133 /// `self.freshener`, we can be sure that (a) this will not
1134 /// affect the inferencer state and (b) that if we see two
1135 /// fresh regions with the same index, they refer to the same
1136 /// unbound type variable.
1137 fn check_evaluation_cycle(
1138 &mut self,
1139 stack: &TraitObligationStack<'_, 'tcx>,
1140 ) -> Option<EvaluationResult> {
1141 if let Some(cycle_depth) = stack
1142 .iter()
1143 .skip(1) // Skip top-most frame.
1144 .find(|prev| {
1145 stack.obligation.param_env == prev.obligation.param_env
1146 && stack.fresh_trait_pred == prev.fresh_trait_pred
1147 })
1148 .map(|stack| stack.depth)
1149 {
1150 debug!("evaluate_stack --> recursive at depth {}", cycle_depth);
1151
1152 // If we have a stack like `A B C D E A`, where the top of
1153 // the stack is the final `A`, then this will iterate over
1154 // `A, E, D, C, B` -- i.e., all the participants apart
1155 // from the cycle head. We mark them as participating in a
1156 // cycle. This suppresses caching for those nodes. See
1157 // `in_cycle` field for more details.
1158 stack.update_reached_depth(cycle_depth);
1159
1160 // Subtle: when checking for a coinductive cycle, we do
1161 // not compare using the "freshened trait refs" (which
1162 // have erased regions) but rather the fully explicit
1163 // trait refs. This is important because it's only a cycle
1164 // if the regions match exactly.
1165 let cycle = stack.iter().skip(1).take_while(|s| s.depth >= cycle_depth);
1166 let tcx = self.tcx();
1167 let cycle = cycle.map(|stack| stack.obligation.predicate.upcast(tcx));
1168 if self.coinductive_match(cycle) {
1169 debug!("evaluate_stack --> recursive, coinductive");
1170 Some(EvaluatedToOk)
1171 } else {
1172 debug!("evaluate_stack --> recursive, inductive");
1173 Some(EvaluatedToAmbigStackDependent)
1174 }
1175 } else {
1176 None
1177 }
1178 }
1179
1180 fn evaluate_stack<'o>(
1181 &mut self,
1182 stack: &TraitObligationStack<'o, 'tcx>,
1183 ) -> Result<EvaluationResult, OverflowError> {
1184 debug_assert!(!self.infcx.next_trait_solver());
1185 // In intercrate mode, whenever any of the generics are unbound,
1186 // there can always be an impl. Even if there are no impls in
1187 // this crate, perhaps the type would be unified with
1188 // something from another crate that does provide an impl.
1189 //
1190 // In intra mode, we must still be conservative. The reason is
1191 // that we want to avoid cycles. Imagine an impl like:
1192 //
1193 // impl<T:Eq> Eq for Vec<T>
1194 //
1195 // and a trait reference like `$0 : Eq` where `$0` is an
1196 // unbound variable. When we evaluate this trait-reference, we
1197 // will unify `$0` with `Vec<$1>` (for some fresh variable
1198 // `$1`), on the condition that `$1 : Eq`. We will then wind
1199 // up with many candidates (since that are other `Eq` impls
1200 // that apply) and try to winnow things down. This results in
1201 // a recursive evaluation that `$1 : Eq` -- as you can
1202 // imagine, this is just where we started. To avoid that, we
1203 // check for unbound variables and return an ambiguous (hence possible)
1204 // match if we've seen this trait before.
1205 //
1206 // This suffices to allow chains like `FnMut` implemented in
1207 // terms of `Fn` etc, but we could probably make this more
1208 // precise still.
1209 let unbound_input_types =
1210 stack.fresh_trait_pred.skip_binder().trait_ref.args.types().any(|ty| ty.is_fresh());
1211
1212 if unbound_input_types
1213 && stack.iter().skip(1).any(|prev| {
1214 stack.obligation.param_env == prev.obligation.param_env
1215 && self.match_fresh_trait_refs(stack.fresh_trait_pred, prev.fresh_trait_pred)
1216 })
1217 {
1218 debug!("evaluate_stack --> unbound argument, recursive --> giving up",);
1219 return Ok(EvaluatedToAmbigStackDependent);
1220 }
1221
1222 match self.candidate_from_obligation(stack) {
1223 Ok(Some(c)) => self.evaluate_candidate(stack, &c),
1224 Ok(None) => Ok(EvaluatedToAmbig),
1225 Err(Overflow(OverflowError::Canonical)) => Err(OverflowError::Canonical),
1226 Err(..) => Ok(EvaluatedToErr),
1227 }
1228 }
1229
1230 /// For defaulted traits, we use a co-inductive strategy to solve, so
1231 /// that recursion is ok. This routine returns `true` if the top of the
1232 /// stack (`cycle[0]`):
1233 ///
1234 /// - is a coinductive trait: an auto-trait or `Sized`,
1235 /// - it also appears in the backtrace at some position `X`,
1236 /// - all the predicates at positions `X..` between `X` and the top are
1237 /// also coinductive traits.
1238 pub(crate) fn coinductive_match<I>(&mut self, mut cycle: I) -> bool
1239 where
1240 I: Iterator<Item = ty::Predicate<'tcx>>,
1241 {
1242 cycle.all(|p| match p.kind().skip_binder() {
1243 ty::PredicateKind::Clause(ty::ClauseKind::Trait(data)) => {
1244 self.infcx.tcx.trait_is_coinductive(data.def_id())
1245 }
1246 ty::PredicateKind::Clause(ty::ClauseKind::WellFormed(_)) => {
1247 // FIXME(generic_const_exprs): GCE needs well-formedness predicates to be
1248 // coinductive, but GCE is on the way out anyways, so this should eventually
1249 // be replaced with `false`.
1250 self.infcx.tcx.features().generic_const_exprs()
1251 }
1252 _ => false,
1253 })
1254 }
1255
1256 /// Further evaluates `candidate` to decide whether all type parameters match and whether nested
1257 /// obligations are met. Returns whether `candidate` remains viable after this further
1258 /// scrutiny.
1259 #[instrument(
1260 level = "debug",
1261 skip(self, stack),
1262 fields(depth = stack.obligation.recursion_depth),
1263 ret
1264 )]
1265 fn evaluate_candidate<'o>(
1266 &mut self,
1267 stack: &TraitObligationStack<'o, 'tcx>,
1268 candidate: &SelectionCandidate<'tcx>,
1269 ) -> Result<EvaluationResult, OverflowError> {
1270 let mut result = self.evaluation_probe(|this| {
1271 match this.confirm_candidate(stack.obligation, candidate.clone()) {
1272 Ok(selection) => {
1273 debug!(?selection);
1274 this.evaluate_predicates_recursively(
1275 stack.list(),
1276 selection.nested_obligations().into_iter(),
1277 )
1278 }
1279 Err(..) => Ok(EvaluatedToErr),
1280 }
1281 })?;
1282
1283 // If we erased any lifetimes, then we want to use
1284 // `EvaluatedToOkModuloRegions` instead of `EvaluatedToOk`
1285 // as your final result. The result will be cached using
1286 // the freshened trait predicate as a key, so we need
1287 // our result to be correct by *any* choice of original lifetimes,
1288 // not just the lifetime choice for this particular (non-erased)
1289 // predicate.
1290 // See issue #80691
1291 if stack.fresh_trait_pred.has_erased_regions() {
1292 result = result.max(EvaluatedToOkModuloRegions);
1293 }
1294
1295 Ok(result)
1296 }
1297
1298 fn check_evaluation_cache(
1299 &self,
1300 param_env: ty::ParamEnv<'tcx>,
1301 trait_pred: ty::PolyTraitPredicate<'tcx>,
1302 ) -> Option<EvaluationResult> {
1303 let infcx = self.infcx;
1304 let tcx = infcx.tcx;
1305 if self.can_use_global_caches(param_env, trait_pred) {
1306 let key = (infcx.typing_env(param_env), trait_pred);
1307 if let Some(res) = tcx.evaluation_cache.get(&key, tcx) {
1308 Some(res)
1309 } else {
1310 debug_assert_eq!(infcx.evaluation_cache.get(&(param_env, trait_pred), tcx), None);
1311 None
1312 }
1313 } else {
1314 self.infcx.evaluation_cache.get(&(param_env, trait_pred), tcx)
1315 }
1316 }
1317
1318 fn insert_evaluation_cache(
1319 &mut self,
1320 param_env: ty::ParamEnv<'tcx>,
1321 trait_pred: ty::PolyTraitPredicate<'tcx>,
1322 dep_node: DepNodeIndex,
1323 result: EvaluationResult,
1324 ) {
1325 // Avoid caching results that depend on more than just the trait-ref
1326 // - the stack can create recursion.
1327 if result.is_stack_dependent() {
1328 return;
1329 }
1330
1331 let infcx = self.infcx;
1332 let tcx = infcx.tcx;
1333 if self.can_use_global_caches(param_env, trait_pred) {
1334 debug!(?trait_pred, ?result, "insert_evaluation_cache global");
1335 // This may overwrite the cache with the same value
1336 tcx.evaluation_cache.insert(
1337 (infcx.typing_env(param_env), trait_pred),
1338 dep_node,
1339 result,
1340 );
1341 return;
1342 } else {
1343 debug!(?trait_pred, ?result, "insert_evaluation_cache local");
1344 self.infcx.evaluation_cache.insert((param_env, trait_pred), dep_node, result);
1345 }
1346 }
1347
1348 fn check_recursion_depth<T>(
1349 &self,
1350 depth: usize,
1351 error_obligation: &Obligation<'tcx, T>,
1352 ) -> Result<(), OverflowError>
1353 where
1354 T: Upcast<TyCtxt<'tcx>, ty::Predicate<'tcx>> + Clone,
1355 {
1356 if !self.infcx.tcx.recursion_limit().value_within_limit(depth) {
1357 match self.query_mode {
1358 TraitQueryMode::Standard => {
1359 if let Some(e) = self.infcx.tainted_by_errors() {
1360 return Err(OverflowError::Error(e));
1361 }
1362 self.infcx.err_ctxt().report_overflow_obligation(error_obligation, true);
1363 }
1364 TraitQueryMode::Canonical => {
1365 return Err(OverflowError::Canonical);
1366 }
1367 }
1368 }
1369 Ok(())
1370 }
1371
1372 /// Checks that the recursion limit has not been exceeded.
1373 ///
1374 /// The weird return type of this function allows it to be used with the `try` (`?`)
1375 /// operator within certain functions.
1376 #[inline(always)]
1377 fn check_recursion_limit<T: Display + TypeFoldable<TyCtxt<'tcx>>, V>(
1378 &self,
1379 obligation: &Obligation<'tcx, T>,
1380 error_obligation: &Obligation<'tcx, V>,
1381 ) -> Result<(), OverflowError>
1382 where
1383 V: Upcast<TyCtxt<'tcx>, ty::Predicate<'tcx>> + Clone,
1384 {
1385 self.check_recursion_depth(obligation.recursion_depth, error_obligation)
1386 }
1387
1388 fn in_task<OP, R>(&mut self, op: OP) -> (R, DepNodeIndex)
1389 where
1390 OP: FnOnce(&mut Self) -> R,
1391 {
1392 self.tcx().dep_graph.with_anon_task(self.tcx(), dep_kinds::TraitSelect, || op(self))
1393 }
1394
1395 /// filter_impls filters candidates that have a positive impl for a negative
1396 /// goal and a negative impl for a positive goal
1397 #[instrument(level = "debug", skip(self, candidates))]
1398 fn filter_impls(
1399 &mut self,
1400 candidates: Vec<SelectionCandidate<'tcx>>,
1401 obligation: &PolyTraitObligation<'tcx>,
1402 ) -> Vec<SelectionCandidate<'tcx>> {
1403 trace!("{candidates:#?}");
1404 let tcx = self.tcx();
1405 let mut result = Vec::with_capacity(candidates.len());
1406
1407 for candidate in candidates {
1408 if let ImplCandidate(def_id) = candidate {
1409 match (tcx.impl_polarity(def_id), obligation.polarity()) {
1410 (ty::ImplPolarity::Reservation, _)
1411 | (ty::ImplPolarity::Positive, ty::PredicatePolarity::Positive)
1412 | (ty::ImplPolarity::Negative, ty::PredicatePolarity::Negative) => {
1413 result.push(candidate);
1414 }
1415 _ => {}
1416 }
1417 } else {
1418 result.push(candidate);
1419 }
1420 }
1421
1422 trace!("{result:#?}");
1423 result
1424 }
1425
1426 /// filter_reservation_impls filter reservation impl for any goal as ambiguous
1427 #[instrument(level = "debug", skip(self))]
1428 fn filter_reservation_impls(
1429 &mut self,
1430 candidate: SelectionCandidate<'tcx>,
1431 ) -> SelectionResult<'tcx, SelectionCandidate<'tcx>> {
1432 let tcx = self.tcx();
1433 // Treat reservation impls as ambiguity.
1434 if let ImplCandidate(def_id) = candidate {
1435 if let ty::ImplPolarity::Reservation = tcx.impl_polarity(def_id) {
1436 if let Some(intercrate_ambiguity_clauses) = &mut self.intercrate_ambiguity_causes {
1437 let message = tcx
1438 .get_attr(def_id, sym::rustc_reservation_impl)
1439 .and_then(|a| a.value_str());
1440 if let Some(message) = message {
1441 debug!(
1442 "filter_reservation_impls: \
1443 reservation impl ambiguity on {:?}",
1444 def_id
1445 );
1446 intercrate_ambiguity_clauses
1447 .insert(IntercrateAmbiguityCause::ReservationImpl { message });
1448 }
1449 }
1450 return Ok(None);
1451 }
1452 }
1453 Ok(Some(candidate))
1454 }
1455
1456 fn is_knowable<'o>(&mut self, stack: &TraitObligationStack<'o, 'tcx>) -> Result<(), Conflict> {
1457 let obligation = &stack.obligation;
1458 match self.infcx.typing_mode() {
1459 TypingMode::Coherence => {}
1460 TypingMode::Analysis { .. }
1461 | TypingMode::Borrowck { .. }
1462 | TypingMode::PostBorrowckAnalysis { .. }
1463 | TypingMode::PostAnalysis => return Ok(()),
1464 }
1465
1466 debug!("is_knowable()");
1467
1468 let predicate = self.infcx.resolve_vars_if_possible(obligation.predicate);
1469
1470 // Okay to skip binder because of the nature of the
1471 // trait-ref-is-knowable check, which does not care about
1472 // bound regions.
1473 let trait_ref = predicate.skip_binder().trait_ref;
1474
1475 coherence::trait_ref_is_knowable(self.infcx, trait_ref, |ty| Ok::<_, !>(ty)).into_ok()
1476 }
1477
1478 /// Returns `true` if the global caches can be used.
1479 fn can_use_global_caches(
1480 &self,
1481 param_env: ty::ParamEnv<'tcx>,
1482 pred: ty::PolyTraitPredicate<'tcx>,
1483 ) -> bool {
1484 // If there are any inference variables in the `ParamEnv`, then we
1485 // always use a cache local to this particular scope. Otherwise, we
1486 // switch to a global cache.
1487 if param_env.has_infer() || pred.has_infer() {
1488 return false;
1489 }
1490
1491 match self.infcx.typing_mode() {
1492 // Avoid using the global cache during coherence and just rely
1493 // on the local cache. It is really just a simplification to
1494 // avoid us having to fear that coherence results "pollute"
1495 // the master cache. Since coherence executes pretty quickly,
1496 // it's not worth going to more trouble to increase the
1497 // hit-rate, I don't think.
1498 TypingMode::Coherence => false,
1499 // Avoid using the global cache when we're defining opaque types
1500 // as their hidden type may impact the result of candidate selection.
1501 //
1502 // HACK: This is still theoretically unsound. Goals can indirectly rely
1503 // on opaques in the defining scope, and it's easier to do so with TAIT.
1504 // However, if we disqualify *all* goals from being cached, perf suffers.
1505 // This is likely fixed by better caching in general in the new solver.
1506 // See: <https://github.com/rust-lang/rust/issues/132064>.
1507 TypingMode::Analysis {
1508 defining_opaque_types_and_generators: defining_opaque_types,
1509 }
1510 | TypingMode::Borrowck { defining_opaque_types } => {
1511 defining_opaque_types.is_empty() || !pred.has_opaque_types()
1512 }
1513 // The hidden types of `defined_opaque_types` is not local to the current
1514 // inference context, so we can freely move this to the global cache.
1515 TypingMode::PostBorrowckAnalysis { .. } => true,
1516 // The global cache is only used if there are no opaque types in
1517 // the defining scope or we're outside of analysis.
1518 //
1519 // FIXME(#132279): This is still incorrect as we treat opaque types
1520 // and default associated items differently between these two modes.
1521 TypingMode::PostAnalysis => true,
1522 }
1523 }
1524
1525 fn check_candidate_cache(
1526 &mut self,
1527 param_env: ty::ParamEnv<'tcx>,
1528 cache_fresh_trait_pred: ty::PolyTraitPredicate<'tcx>,
1529 ) -> Option<SelectionResult<'tcx, SelectionCandidate<'tcx>>> {
1530 let infcx = self.infcx;
1531 let tcx = infcx.tcx;
1532 let pred = cache_fresh_trait_pred.skip_binder();
1533
1534 if self.can_use_global_caches(param_env, cache_fresh_trait_pred) {
1535 if let Some(res) = tcx.selection_cache.get(&(infcx.typing_env(param_env), pred), tcx) {
1536 return Some(res);
1537 } else if cfg!(debug_assertions) {
1538 match infcx.selection_cache.get(&(param_env, pred), tcx) {
1539 None | Some(Err(Overflow(OverflowError::Canonical))) => {}
1540 res => bug!("unexpected local cache result: {res:?}"),
1541 }
1542 }
1543 }
1544
1545 // Subtle: we need to check the local cache even if we're able to use the
1546 // global cache as we don't cache overflow in the global cache but need to
1547 // cache it as otherwise rustdoc hangs when compiling diesel.
1548 infcx.selection_cache.get(&(param_env, pred), tcx)
1549 }
1550
1551 /// Determines whether can we safely cache the result
1552 /// of selecting an obligation. This is almost always `true`,
1553 /// except when dealing with certain `ParamCandidate`s.
1554 ///
1555 /// Ordinarily, a `ParamCandidate` will contain no inference variables,
1556 /// since it was usually produced directly from a `DefId`. However,
1557 /// certain cases (currently only librustdoc's blanket impl finder),
1558 /// a `ParamEnv` may be explicitly constructed with inference types.
1559 /// When this is the case, we do *not* want to cache the resulting selection
1560 /// candidate. This is due to the fact that it might not always be possible
1561 /// to equate the obligation's trait ref and the candidate's trait ref,
1562 /// if more constraints end up getting added to an inference variable.
1563 ///
1564 /// Because of this, we always want to re-run the full selection
1565 /// process for our obligation the next time we see it, since
1566 /// we might end up picking a different `SelectionCandidate` (or none at all).
1567 fn can_cache_candidate(
1568 &self,
1569 result: &SelectionResult<'tcx, SelectionCandidate<'tcx>>,
1570 ) -> bool {
1571 match result {
1572 Ok(Some(SelectionCandidate::ParamCandidate(trait_ref))) => !trait_ref.has_infer(),
1573 _ => true,
1574 }
1575 }
1576
1577 #[instrument(skip(self, param_env, cache_fresh_trait_pred, dep_node), level = "debug")]
1578 fn insert_candidate_cache(
1579 &mut self,
1580 param_env: ty::ParamEnv<'tcx>,
1581 cache_fresh_trait_pred: ty::PolyTraitPredicate<'tcx>,
1582 dep_node: DepNodeIndex,
1583 candidate: SelectionResult<'tcx, SelectionCandidate<'tcx>>,
1584 ) {
1585 let infcx = self.infcx;
1586 let tcx = infcx.tcx;
1587 let pred = cache_fresh_trait_pred.skip_binder();
1588
1589 if !self.can_cache_candidate(&candidate) {
1590 debug!(?pred, ?candidate, "insert_candidate_cache - candidate is not cacheable");
1591 return;
1592 }
1593
1594 if self.can_use_global_caches(param_env, cache_fresh_trait_pred) {
1595 if let Err(Overflow(OverflowError::Canonical)) = candidate {
1596 // Don't cache overflow globally; we only produce this in certain modes.
1597 } else {
1598 debug!(?pred, ?candidate, "insert_candidate_cache global");
1599 debug_assert!(!candidate.has_infer());
1600
1601 // This may overwrite the cache with the same value.
1602 tcx.selection_cache.insert(
1603 (infcx.typing_env(param_env), pred),
1604 dep_node,
1605 candidate,
1606 );
1607 return;
1608 }
1609 }
1610
1611 debug!(?pred, ?candidate, "insert_candidate_cache local");
1612 self.infcx.selection_cache.insert((param_env, pred), dep_node, candidate);
1613 }
1614
1615 /// Looks at the item bounds of the projection or opaque type.
1616 /// If this is a nested rigid projection, such as
1617 /// `<<T as Tr1>::Assoc as Tr2>::Assoc`, consider the item bounds
1618 /// on both `Tr1::Assoc` and `Tr2::Assoc`, since we may encounter
1619 /// relative bounds on both via the `associated_type_bounds` feature.
1620 pub(super) fn for_each_item_bound<T>(
1621 &mut self,
1622 mut self_ty: Ty<'tcx>,
1623 mut for_each: impl FnMut(&mut Self, ty::Clause<'tcx>, usize) -> ControlFlow<T, ()>,
1624 on_ambiguity: impl FnOnce(),
1625 ) -> ControlFlow<T, ()> {
1626 let mut idx = 0;
1627 let mut in_parent_alias_type = false;
1628
1629 loop {
1630 let (kind, alias_ty) = match *self_ty.kind() {
1631 ty::Alias(kind @ (ty::Projection | ty::Opaque), alias_ty) => (kind, alias_ty),
1632 ty::Infer(ty::TyVar(_)) => {
1633 on_ambiguity();
1634 return ControlFlow::Continue(());
1635 }
1636 _ => return ControlFlow::Continue(()),
1637 };
1638
1639 // HACK: On subsequent recursions, we only care about bounds that don't
1640 // share the same type as `self_ty`. This is because for truly rigid
1641 // projections, we will never be able to equate, e.g. `<T as Tr>::A`
1642 // with `<<T as Tr>::A as Tr>::A`.
1643 let relevant_bounds = if in_parent_alias_type {
1644 self.tcx().item_non_self_bounds(alias_ty.def_id)
1645 } else {
1646 self.tcx().item_self_bounds(alias_ty.def_id)
1647 };
1648
1649 for bound in relevant_bounds.instantiate(self.tcx(), alias_ty.args) {
1650 for_each(self, bound, idx)?;
1651 idx += 1;
1652 }
1653
1654 if kind == ty::Projection {
1655 self_ty = alias_ty.self_ty();
1656 } else {
1657 return ControlFlow::Continue(());
1658 }
1659
1660 in_parent_alias_type = true;
1661 }
1662 }
1663
1664 /// Equates the trait in `obligation` with trait bound. If the two traits
1665 /// can be equated and the normalized trait bound doesn't contain inference
1666 /// variables or placeholders, the normalized bound is returned.
1667 fn match_normalize_trait_ref(
1668 &mut self,
1669 obligation: &PolyTraitObligation<'tcx>,
1670 placeholder_trait_ref: ty::TraitRef<'tcx>,
1671 trait_bound: ty::PolyTraitRef<'tcx>,
1672 ) -> Result<Option<ty::TraitRef<'tcx>>, ()> {
1673 debug_assert!(!placeholder_trait_ref.has_escaping_bound_vars());
1674 if placeholder_trait_ref.def_id != trait_bound.def_id() {
1675 // Avoid unnecessary normalization
1676 return Err(());
1677 }
1678
1679 let drcx = DeepRejectCtxt::relate_rigid_rigid(self.infcx.tcx);
1680 let obligation_args = obligation.predicate.skip_binder().trait_ref.args;
1681 if !drcx.args_may_unify(obligation_args, trait_bound.skip_binder().args) {
1682 return Err(());
1683 }
1684
1685 let trait_bound = self.infcx.instantiate_binder_with_fresh_vars(
1686 obligation.cause.span,
1687 HigherRankedType,
1688 trait_bound,
1689 );
1690 let Normalized { value: trait_bound, obligations: _ } = ensure_sufficient_stack(|| {
1691 normalize_with_depth(
1692 self,
1693 obligation.param_env,
1694 obligation.cause.clone(),
1695 obligation.recursion_depth + 1,
1696 trait_bound,
1697 )
1698 });
1699 self.infcx
1700 .at(&obligation.cause, obligation.param_env)
1701 .eq(DefineOpaqueTypes::No, placeholder_trait_ref, trait_bound)
1702 .map(|InferOk { obligations: _, value: () }| {
1703 // This method is called within a probe, so we can't have
1704 // inference variables and placeholders escape.
1705 if !trait_bound.has_infer() && !trait_bound.has_placeholders() {
1706 Some(trait_bound)
1707 } else {
1708 None
1709 }
1710 })
1711 .map_err(|_| ())
1712 }
1713
1714 fn where_clause_may_apply<'o>(
1715 &mut self,
1716 stack: &TraitObligationStack<'o, 'tcx>,
1717 where_clause_trait_ref: ty::PolyTraitRef<'tcx>,
1718 ) -> Result<EvaluationResult, OverflowError> {
1719 self.evaluation_probe(|this| {
1720 match this.match_where_clause_trait_ref(stack.obligation, where_clause_trait_ref) {
1721 Ok(obligations) => this.evaluate_predicates_recursively(stack.list(), obligations),
1722 Err(()) => Ok(EvaluatedToErr),
1723 }
1724 })
1725 }
1726
1727 /// Return `Yes` if the obligation's predicate type applies to the env_predicate, and
1728 /// `No` if it does not. Return `Ambiguous` in the case that the projection type is a GAT,
1729 /// and applying this env_predicate constrains any of the obligation's GAT parameters.
1730 ///
1731 /// This behavior is a somewhat of a hack to prevent over-constraining inference variables
1732 /// in cases like #91762.
1733 pub(super) fn match_projection_projections(
1734 &mut self,
1735 obligation: &ProjectionTermObligation<'tcx>,
1736 env_predicate: PolyProjectionPredicate<'tcx>,
1737 potentially_unnormalized_candidates: bool,
1738 ) -> ProjectionMatchesProjection {
1739 debug_assert_eq!(obligation.predicate.def_id, env_predicate.item_def_id());
1740
1741 let mut nested_obligations = PredicateObligations::new();
1742 let infer_predicate = self.infcx.instantiate_binder_with_fresh_vars(
1743 obligation.cause.span,
1744 BoundRegionConversionTime::HigherRankedType,
1745 env_predicate,
1746 );
1747 let infer_projection = if potentially_unnormalized_candidates {
1748 ensure_sufficient_stack(|| {
1749 normalize_with_depth_to(
1750 self,
1751 obligation.param_env,
1752 obligation.cause.clone(),
1753 obligation.recursion_depth + 1,
1754 infer_predicate.projection_term,
1755 &mut nested_obligations,
1756 )
1757 })
1758 } else {
1759 infer_predicate.projection_term
1760 };
1761
1762 let is_match = self
1763 .infcx
1764 .at(&obligation.cause, obligation.param_env)
1765 .eq(DefineOpaqueTypes::No, obligation.predicate, infer_projection)
1766 .is_ok_and(|InferOk { obligations, value: () }| {
1767 self.evaluate_predicates_recursively(
1768 TraitObligationStackList::empty(&ProvisionalEvaluationCache::default()),
1769 nested_obligations.into_iter().chain(obligations),
1770 )
1771 .is_ok_and(|res| res.may_apply())
1772 });
1773
1774 if is_match {
1775 let generics = self.tcx().generics_of(obligation.predicate.def_id);
1776 // FIXME(generic_associated_types): Addresses aggressive inference in #92917.
1777 // If this type is a GAT, and of the GAT args resolve to something new,
1778 // that means that we must have newly inferred something about the GAT.
1779 // We should give up in that case.
1780 //
1781 // This only detects one layer of inference, which is probably not what we actually
1782 // want, but fixing it causes some ambiguity:
1783 // <https://github.com/rust-lang/rust/issues/125196>.
1784 if !generics.is_own_empty()
1785 && obligation.predicate.args[generics.parent_count..].iter().any(|&p| {
1786 p.has_non_region_infer()
1787 && match p.unpack() {
1788 ty::GenericArgKind::Const(ct) => {
1789 self.infcx.shallow_resolve_const(ct) != ct
1790 }
1791 ty::GenericArgKind::Type(ty) => self.infcx.shallow_resolve(ty) != ty,
1792 ty::GenericArgKind::Lifetime(_) => false,
1793 }
1794 })
1795 {
1796 ProjectionMatchesProjection::Ambiguous
1797 } else {
1798 ProjectionMatchesProjection::Yes
1799 }
1800 } else {
1801 ProjectionMatchesProjection::No
1802 }
1803 }
1804}
1805
1806/// ## Winnowing
1807///
1808/// Winnowing is the process of attempting to resolve ambiguity by
1809/// probing further. During the winnowing process, we unify all
1810/// type variables and then we also attempt to evaluate recursive
1811/// bounds to see if they are satisfied.
1812impl<'tcx> SelectionContext<'_, 'tcx> {
1813 /// If there are multiple ways to prove a trait goal, we make some
1814 /// *fairly arbitrary* choices about which candidate is actually used.
1815 ///
1816 /// For more details, look at the implementation of this method :)
1817 #[instrument(level = "debug", skip(self), ret)]
1818 fn winnow_candidates(
1819 &mut self,
1820 has_non_region_infer: bool,
1821 mut candidates: Vec<EvaluatedCandidate<'tcx>>,
1822 ) -> Option<SelectionCandidate<'tcx>> {
1823 if candidates.len() == 1 {
1824 return Some(candidates.pop().unwrap().candidate);
1825 }
1826
1827 // We prefer `Sized` candidates over everything.
1828 let mut sized_candidates =
1829 candidates.iter().filter(|c| matches!(c.candidate, SizedCandidate { has_nested: _ }));
1830 if let Some(sized_candidate) = sized_candidates.next() {
1831 // There should only ever be a single sized candidate
1832 // as they would otherwise overlap.
1833 debug_assert_eq!(sized_candidates.next(), None);
1834 // Only prefer the built-in `Sized` candidate if its nested goals are certain.
1835 // Otherwise, we may encounter failure later on if inference causes this candidate
1836 // to not hold, but a where clause would've applied instead.
1837 if sized_candidate.evaluation.must_apply_modulo_regions() {
1838 return Some(sized_candidate.candidate.clone());
1839 } else {
1840 return None;
1841 }
1842 }
1843
1844 // Before we consider where-bounds, we have to deduplicate them here and also
1845 // drop where-bounds in case the same where-bound exists without bound vars.
1846 // This is necessary as elaborating super-trait bounds may result in duplicates.
1847 'search_victim: loop {
1848 for (i, this) in candidates.iter().enumerate() {
1849 let ParamCandidate(this) = this.candidate else { continue };
1850 for (j, other) in candidates.iter().enumerate() {
1851 if i == j {
1852 continue;
1853 }
1854
1855 let ParamCandidate(other) = other.candidate else { continue };
1856 if this == other {
1857 candidates.remove(j);
1858 continue 'search_victim;
1859 }
1860
1861 if this.skip_binder().trait_ref == other.skip_binder().trait_ref
1862 && this.skip_binder().polarity == other.skip_binder().polarity
1863 && !this.skip_binder().trait_ref.has_escaping_bound_vars()
1864 {
1865 candidates.remove(j);
1866 continue 'search_victim;
1867 }
1868 }
1869 }
1870
1871 break;
1872 }
1873
1874 // The next highest priority is for non-global where-bounds. However, while we don't
1875 // prefer global where-clauses here, we do bail with ambiguity when encountering both
1876 // a global and a non-global where-clause.
1877 //
1878 // Our handling of where-bounds is generally fairly messy but necessary for backwards
1879 // compatibility, see #50825 for why we need to handle global where-bounds like this.
1880 let is_global = |c: ty::PolyTraitPredicate<'tcx>| c.is_global() && !c.has_bound_vars();
1881 let param_candidates = candidates
1882 .iter()
1883 .filter_map(|c| if let ParamCandidate(p) = c.candidate { Some(p) } else { None });
1884 let mut has_global_bounds = false;
1885 let mut param_candidate = None;
1886 for c in param_candidates {
1887 if is_global(c) {
1888 has_global_bounds = true;
1889 } else if param_candidate.replace(c).is_some() {
1890 // Ambiguity, two potentially different where-clauses
1891 return None;
1892 }
1893 }
1894 if let Some(predicate) = param_candidate {
1895 // Ambiguity, a global and a non-global where-bound.
1896 if has_global_bounds {
1897 return None;
1898 } else {
1899 return Some(ParamCandidate(predicate));
1900 }
1901 }
1902
1903 // Prefer alias-bounds over blanket impls for rigid associated types. This is
1904 // fairly arbitrary but once again necessary for backwards compatibility.
1905 // If there are multiple applicable candidates which don't affect type inference,
1906 // choose the one with the lowest index.
1907 let alias_bound = candidates
1908 .iter()
1909 .filter_map(|c| if let ProjectionCandidate(i) = c.candidate { Some(i) } else { None })
1910 .try_reduce(|c1, c2| if has_non_region_infer { None } else { Some(c1.min(c2)) });
1911 match alias_bound {
1912 Some(Some(index)) => return Some(ProjectionCandidate(index)),
1913 Some(None) => {}
1914 None => return None,
1915 }
1916
1917 // Need to prioritize builtin trait object impls as `<dyn Any as Any>::type_id`
1918 // should use the vtable method and not the method provided by the user-defined
1919 // impl `impl<T: ?Sized> Any for T { .. }`. This really shouldn't exist but is
1920 // necessary due to #57893. We again arbitrarily prefer the applicable candidate
1921 // with the lowest index.
1922 let object_bound = candidates
1923 .iter()
1924 .filter_map(|c| if let ObjectCandidate(i) = c.candidate { Some(i) } else { None })
1925 .try_reduce(|c1, c2| if has_non_region_infer { None } else { Some(c1.min(c2)) });
1926 match object_bound {
1927 Some(Some(index)) => return Some(ObjectCandidate(index)),
1928 Some(None) => {}
1929 None => return None,
1930 }
1931 // Same for upcasting.
1932 let upcast_bound = candidates
1933 .iter()
1934 .filter_map(|c| {
1935 if let TraitUpcastingUnsizeCandidate(i) = c.candidate { Some(i) } else { None }
1936 })
1937 .try_reduce(|c1, c2| if has_non_region_infer { None } else { Some(c1.min(c2)) });
1938 match upcast_bound {
1939 Some(Some(index)) => return Some(TraitUpcastingUnsizeCandidate(index)),
1940 Some(None) => {}
1941 None => return None,
1942 }
1943
1944 // Finally, handle overlapping user-written impls.
1945 let impls = candidates.iter().filter_map(|c| {
1946 if let ImplCandidate(def_id) = c.candidate {
1947 Some((def_id, c.evaluation))
1948 } else {
1949 None
1950 }
1951 });
1952 let mut impl_candidate = None;
1953 for c in impls {
1954 if let Some(prev) = impl_candidate.replace(c) {
1955 if self.prefer_lhs_over_victim(has_non_region_infer, c, prev.0) {
1956 // Ok, prefer `c` over the previous entry
1957 } else if self.prefer_lhs_over_victim(has_non_region_infer, prev, c.0) {
1958 // Ok, keep `prev` instead of the new entry
1959 impl_candidate = Some(prev);
1960 } else {
1961 // Ambiguity, two potentially different where-clauses
1962 return None;
1963 }
1964 }
1965 }
1966 if let Some((def_id, _evaluation)) = impl_candidate {
1967 // Don't use impl candidates which overlap with other candidates.
1968 // This should pretty much only ever happen with malformed impls.
1969 if candidates.iter().all(|c| match c.candidate {
1970 SizedCandidate { has_nested: _ }
1971 | BuiltinCandidate { has_nested: _ }
1972 | TransmutabilityCandidate
1973 | AutoImplCandidate
1974 | ClosureCandidate { .. }
1975 | AsyncClosureCandidate
1976 | AsyncFnKindHelperCandidate
1977 | CoroutineCandidate
1978 | FutureCandidate
1979 | IteratorCandidate
1980 | AsyncIteratorCandidate
1981 | FnPointerCandidate
1982 | TraitAliasCandidate
1983 | TraitUpcastingUnsizeCandidate(_)
1984 | BuiltinObjectCandidate
1985 | BuiltinUnsizeCandidate
1986 | BikeshedGuaranteedNoDropCandidate => false,
1987 // Non-global param candidates have already been handled, global
1988 // where-bounds get ignored.
1989 ParamCandidate(_) | ImplCandidate(_) => true,
1990 ProjectionCandidate(_) | ObjectCandidate(_) => unreachable!(),
1991 }) {
1992 return Some(ImplCandidate(def_id));
1993 } else {
1994 return None;
1995 }
1996 }
1997
1998 if candidates.len() == 1 {
1999 Some(candidates.pop().unwrap().candidate)
2000 } else {
2001 // Also try ignoring all global where-bounds and check whether we end
2002 // with a unique candidate in this case.
2003 let mut not_a_global_where_bound = candidates
2004 .into_iter()
2005 .filter(|c| !matches!(c.candidate, ParamCandidate(p) if is_global(p)));
2006 not_a_global_where_bound
2007 .next()
2008 .map(|c| c.candidate)
2009 .filter(|_| not_a_global_where_bound.next().is_none())
2010 }
2011 }
2012
2013 fn prefer_lhs_over_victim(
2014 &self,
2015 has_non_region_infer: bool,
2016 (lhs, lhs_evaluation): (DefId, EvaluationResult),
2017 victim: DefId,
2018 ) -> bool {
2019 let tcx = self.tcx();
2020 // See if we can toss out `victim` based on specialization.
2021 //
2022 // While this requires us to know *for sure* that the `lhs` impl applies
2023 // we still use modulo regions here. This is fine as specialization currently
2024 // assumes that specializing impls have to be always applicable, meaning that
2025 // the only allowed region constraints may be constraints also present on the default impl.
2026 if lhs_evaluation.must_apply_modulo_regions() {
2027 if tcx.specializes((lhs, victim)) {
2028 return true;
2029 }
2030 }
2031
2032 match tcx.impls_are_allowed_to_overlap(lhs, victim) {
2033 // For candidates which already reference errors it doesn't really
2034 // matter what we do 🤷
2035 Some(ty::ImplOverlapKind::Permitted { marker: false }) => {
2036 lhs_evaluation.must_apply_considering_regions()
2037 }
2038 Some(ty::ImplOverlapKind::Permitted { marker: true }) => {
2039 // Subtle: If the predicate we are evaluating has inference
2040 // variables, do *not* allow discarding candidates due to
2041 // marker trait impls.
2042 //
2043 // Without this restriction, we could end up accidentally
2044 // constraining inference variables based on an arbitrarily
2045 // chosen trait impl.
2046 //
2047 // Imagine we have the following code:
2048 //
2049 // ```rust
2050 // #[marker] trait MyTrait {}
2051 // impl MyTrait for u8 {}
2052 // impl MyTrait for bool {}
2053 // ```
2054 //
2055 // And we are evaluating the predicate `<_#0t as MyTrait>`.
2056 //
2057 // During selection, we will end up with one candidate for each
2058 // impl of `MyTrait`. If we were to discard one impl in favor
2059 // of the other, we would be left with one candidate, causing
2060 // us to "successfully" select the predicate, unifying
2061 // _#0t with (for example) `u8`.
2062 //
2063 // However, we have no reason to believe that this unification
2064 // is correct - we've essentially just picked an arbitrary
2065 // *possibility* for _#0t, and required that this be the *only*
2066 // possibility.
2067 //
2068 // Eventually, we will either:
2069 // 1) Unify all inference variables in the predicate through
2070 // some other means (e.g. type-checking of a function). We will
2071 // then be in a position to drop marker trait candidates
2072 // without constraining inference variables (since there are
2073 // none left to constrain)
2074 // 2) Be left with some unconstrained inference variables. We
2075 // will then correctly report an inference error, since the
2076 // existence of multiple marker trait impls tells us nothing
2077 // about which one should actually apply.
2078 !has_non_region_infer && lhs_evaluation.must_apply_considering_regions()
2079 }
2080 None => false,
2081 }
2082 }
2083}
2084
2085impl<'tcx> SelectionContext<'_, 'tcx> {
2086 fn sized_conditions(
2087 &mut self,
2088 obligation: &PolyTraitObligation<'tcx>,
2089 ) -> BuiltinImplConditions<'tcx> {
2090 use self::BuiltinImplConditions::{Ambiguous, None, Where};
2091
2092 // NOTE: binder moved to (*)
2093 let self_ty = self.infcx.shallow_resolve(obligation.predicate.skip_binder().self_ty());
2094
2095 match self_ty.kind() {
2096 ty::Infer(ty::IntVar(_) | ty::FloatVar(_))
2097 | ty::Uint(_)
2098 | ty::Int(_)
2099 | ty::Bool
2100 | ty::Float(_)
2101 | ty::FnDef(..)
2102 | ty::FnPtr(..)
2103 | ty::RawPtr(..)
2104 | ty::Char
2105 | ty::Ref(..)
2106 | ty::Coroutine(..)
2107 | ty::CoroutineWitness(..)
2108 | ty::Array(..)
2109 | ty::Closure(..)
2110 | ty::CoroutineClosure(..)
2111 | ty::Never
2112 | ty::Dynamic(_, _, ty::DynStar)
2113 | ty::Error(_) => {
2114 // safe for everything
2115 Where(ty::Binder::dummy(Vec::new()))
2116 }
2117
2118 ty::Str | ty::Slice(_) | ty::Dynamic(..) | ty::Foreign(..) => None,
2119
2120 ty::Tuple(tys) => Where(
2121 obligation.predicate.rebind(tys.last().map_or_else(Vec::new, |&last| vec![last])),
2122 ),
2123
2124 ty::Pat(ty, _) => Where(obligation.predicate.rebind(vec![*ty])),
2125
2126 ty::Adt(def, args) => {
2127 if let Some(sized_crit) = def.sized_constraint(self.tcx()) {
2128 // (*) binder moved here
2129 Where(
2130 obligation.predicate.rebind(vec![sized_crit.instantiate(self.tcx(), args)]),
2131 )
2132 } else {
2133 Where(ty::Binder::dummy(Vec::new()))
2134 }
2135 }
2136
2137 // FIXME(unsafe_binders): This binder needs to be squashed
2138 ty::UnsafeBinder(binder_ty) => Where(binder_ty.map_bound(|ty| vec![ty])),
2139
2140 ty::Alias(..) | ty::Param(_) | ty::Placeholder(..) => None,
2141 ty::Infer(ty::TyVar(_)) => Ambiguous,
2142
2143 // We can make this an ICE if/once we actually instantiate the trait obligation eagerly.
2144 ty::Bound(..) => None,
2145
2146 ty::Infer(ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
2147 bug!("asked to assemble builtin bounds of unexpected type: {:?}", self_ty);
2148 }
2149 }
2150 }
2151
2152 fn copy_clone_conditions(
2153 &mut self,
2154 obligation: &PolyTraitObligation<'tcx>,
2155 ) -> BuiltinImplConditions<'tcx> {
2156 // NOTE: binder moved to (*)
2157 let self_ty = self.infcx.shallow_resolve(obligation.predicate.skip_binder().self_ty());
2158
2159 use self::BuiltinImplConditions::{Ambiguous, None, Where};
2160
2161 match *self_ty.kind() {
2162 ty::FnDef(..) | ty::FnPtr(..) | ty::Error(_) => Where(ty::Binder::dummy(Vec::new())),
2163
2164 ty::Uint(_)
2165 | ty::Int(_)
2166 | ty::Infer(ty::IntVar(_) | ty::FloatVar(_))
2167 | ty::Bool
2168 | ty::Float(_)
2169 | ty::Char
2170 | ty::RawPtr(..)
2171 | ty::Never
2172 | ty::Ref(_, _, hir::Mutability::Not)
2173 | ty::Array(..) => {
2174 // Implementations provided in libcore
2175 None
2176 }
2177
2178 // FIXME(unsafe_binder): Should we conditionally
2179 // (i.e. universally) implement copy/clone?
2180 ty::UnsafeBinder(_) => None,
2181
2182 ty::Dynamic(..)
2183 | ty::Str
2184 | ty::Slice(..)
2185 | ty::Foreign(..)
2186 | ty::Ref(_, _, hir::Mutability::Mut) => None,
2187
2188 ty::Tuple(tys) => {
2189 // (*) binder moved here
2190 Where(obligation.predicate.rebind(tys.iter().collect()))
2191 }
2192
2193 ty::Pat(ty, _) => {
2194 // (*) binder moved here
2195 Where(obligation.predicate.rebind(vec![ty]))
2196 }
2197
2198 ty::Coroutine(coroutine_def_id, args) => {
2199 match self.tcx().coroutine_movability(coroutine_def_id) {
2200 hir::Movability::Static => None,
2201 hir::Movability::Movable => {
2202 if self.tcx().features().coroutine_clone() {
2203 let resolved_upvars =
2204 self.infcx.shallow_resolve(args.as_coroutine().tupled_upvars_ty());
2205 let resolved_witness =
2206 self.infcx.shallow_resolve(args.as_coroutine().witness());
2207 if resolved_upvars.is_ty_var() || resolved_witness.is_ty_var() {
2208 // Not yet resolved.
2209 Ambiguous
2210 } else {
2211 let all = args
2212 .as_coroutine()
2213 .upvar_tys()
2214 .iter()
2215 .chain([args.as_coroutine().witness()])
2216 .collect::<Vec<_>>();
2217 Where(obligation.predicate.rebind(all))
2218 }
2219 } else {
2220 None
2221 }
2222 }
2223 }
2224 }
2225
2226 ty::CoroutineWitness(def_id, args) => {
2227 let hidden_types = rebind_coroutine_witness_types(
2228 self.infcx.tcx,
2229 def_id,
2230 args,
2231 obligation.predicate.bound_vars(),
2232 );
2233 Where(hidden_types)
2234 }
2235
2236 ty::Closure(_, args) => {
2237 // (*) binder moved here
2238 let ty = self.infcx.shallow_resolve(args.as_closure().tupled_upvars_ty());
2239 if let ty::Infer(ty::TyVar(_)) = ty.kind() {
2240 // Not yet resolved.
2241 Ambiguous
2242 } else {
2243 Where(obligation.predicate.rebind(args.as_closure().upvar_tys().to_vec()))
2244 }
2245 }
2246
2247 ty::CoroutineClosure(_, args) => {
2248 // (*) binder moved here
2249 let ty = self.infcx.shallow_resolve(args.as_coroutine_closure().tupled_upvars_ty());
2250 if let ty::Infer(ty::TyVar(_)) = ty.kind() {
2251 // Not yet resolved.
2252 Ambiguous
2253 } else {
2254 Where(
2255 obligation
2256 .predicate
2257 .rebind(args.as_coroutine_closure().upvar_tys().to_vec()),
2258 )
2259 }
2260 }
2261
2262 ty::Adt(..) | ty::Alias(..) | ty::Param(..) | ty::Placeholder(..) => {
2263 // Fallback to whatever user-defined impls exist in this case.
2264 None
2265 }
2266
2267 ty::Infer(ty::TyVar(_)) => {
2268 // Unbound type variable. Might or might not have
2269 // applicable impls and so forth, depending on what
2270 // those type variables wind up being bound to.
2271 Ambiguous
2272 }
2273
2274 // We can make this an ICE if/once we actually instantiate the trait obligation eagerly.
2275 ty::Bound(..) => None,
2276
2277 ty::Infer(ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
2278 bug!("asked to assemble builtin bounds of unexpected type: {:?}", self_ty);
2279 }
2280 }
2281 }
2282
2283 fn fused_iterator_conditions(
2284 &mut self,
2285 obligation: &PolyTraitObligation<'tcx>,
2286 ) -> BuiltinImplConditions<'tcx> {
2287 let self_ty = self.infcx.shallow_resolve(obligation.self_ty().skip_binder());
2288 if let ty::Coroutine(did, ..) = *self_ty.kind()
2289 && self.tcx().coroutine_is_gen(did)
2290 {
2291 BuiltinImplConditions::Where(ty::Binder::dummy(Vec::new()))
2292 } else {
2293 BuiltinImplConditions::None
2294 }
2295 }
2296
2297 /// For default impls, we need to break apart a type into its
2298 /// "constituent types" -- meaning, the types that it contains.
2299 ///
2300 /// Here are some (simple) examples:
2301 ///
2302 /// ```ignore (illustrative)
2303 /// (i32, u32) -> [i32, u32]
2304 /// Foo where struct Foo { x: i32, y: u32 } -> [i32, u32]
2305 /// Bar<i32> where struct Bar<T> { x: T, y: u32 } -> [i32, u32]
2306 /// Zed<i32> where enum Zed { A(T), B(u32) } -> [i32, u32]
2307 /// ```
2308 #[instrument(level = "debug", skip(self), ret)]
2309 fn constituent_types_for_ty(
2310 &self,
2311 t: ty::Binder<'tcx, Ty<'tcx>>,
2312 ) -> Result<ty::Binder<'tcx, Vec<Ty<'tcx>>>, SelectionError<'tcx>> {
2313 Ok(match *t.skip_binder().kind() {
2314 ty::Uint(_)
2315 | ty::Int(_)
2316 | ty::Bool
2317 | ty::Float(_)
2318 | ty::FnDef(..)
2319 | ty::FnPtr(..)
2320 | ty::Error(_)
2321 | ty::Infer(ty::IntVar(_) | ty::FloatVar(_))
2322 | ty::Never
2323 | ty::Char => ty::Binder::dummy(Vec::new()),
2324
2325 // This branch is only for `experimental_default_bounds`.
2326 // Other foreign types were rejected earlier in
2327 // `assemble_candidates_from_auto_impls`.
2328 ty::Foreign(..) => ty::Binder::dummy(Vec::new()),
2329
2330 // FIXME(unsafe_binders): Squash the double binder for now, I guess.
2331 ty::UnsafeBinder(_) => return Err(SelectionError::Unimplemented),
2332
2333 // Treat this like `struct str([u8]);`
2334 ty::Str => ty::Binder::dummy(vec![Ty::new_slice(self.tcx(), self.tcx().types.u8)]),
2335
2336 ty::Placeholder(..)
2337 | ty::Dynamic(..)
2338 | ty::Param(..)
2339 | ty::Alias(ty::Projection | ty::Inherent | ty::Free, ..)
2340 | ty::Bound(..)
2341 | ty::Infer(ty::TyVar(_) | ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
2342 bug!("asked to assemble constituent types of unexpected type: {:?}", t);
2343 }
2344
2345 ty::RawPtr(element_ty, _) | ty::Ref(_, element_ty, _) => t.rebind(vec![element_ty]),
2346
2347 ty::Pat(ty, _) | ty::Array(ty, _) | ty::Slice(ty) => t.rebind(vec![ty]),
2348
2349 ty::Tuple(tys) => {
2350 // (T1, ..., Tn) -- meets any bound that all of T1...Tn meet
2351 t.rebind(tys.iter().collect())
2352 }
2353
2354 ty::Closure(_, args) => {
2355 let ty = self.infcx.shallow_resolve(args.as_closure().tupled_upvars_ty());
2356 t.rebind(vec![ty])
2357 }
2358
2359 ty::CoroutineClosure(_, args) => {
2360 let ty = self.infcx.shallow_resolve(args.as_coroutine_closure().tupled_upvars_ty());
2361 t.rebind(vec![ty])
2362 }
2363
2364 ty::Coroutine(_, args) => {
2365 let ty = self.infcx.shallow_resolve(args.as_coroutine().tupled_upvars_ty());
2366 let witness = args.as_coroutine().witness();
2367 t.rebind([ty].into_iter().chain(iter::once(witness)).collect())
2368 }
2369
2370 ty::CoroutineWitness(def_id, args) => {
2371 rebind_coroutine_witness_types(self.infcx.tcx, def_id, args, t.bound_vars())
2372 }
2373
2374 // For `PhantomData<T>`, we pass `T`.
2375 ty::Adt(def, args) if def.is_phantom_data() => t.rebind(args.types().collect()),
2376
2377 ty::Adt(def, args) => {
2378 t.rebind(def.all_fields().map(|f| f.ty(self.tcx(), args)).collect())
2379 }
2380
2381 ty::Alias(ty::Opaque, ty::AliasTy { def_id, args, .. }) => {
2382 if self.infcx.can_define_opaque_ty(def_id) {
2383 unreachable!()
2384 } else {
2385 // We can resolve the `impl Trait` to its concrete type,
2386 // which enforces a DAG between the functions requiring
2387 // the auto trait bounds in question.
2388 match self.tcx().type_of_opaque(def_id) {
2389 Ok(ty) => t.rebind(vec![ty.instantiate(self.tcx(), args)]),
2390 Err(_) => {
2391 return Err(SelectionError::OpaqueTypeAutoTraitLeakageUnknown(def_id));
2392 }
2393 }
2394 }
2395 }
2396 })
2397 }
2398
2399 fn collect_predicates_for_types(
2400 &mut self,
2401 param_env: ty::ParamEnv<'tcx>,
2402 cause: ObligationCause<'tcx>,
2403 recursion_depth: usize,
2404 trait_def_id: DefId,
2405 types: Vec<Ty<'tcx>>,
2406 ) -> PredicateObligations<'tcx> {
2407 // Because the types were potentially derived from
2408 // higher-ranked obligations they may reference late-bound
2409 // regions. For example, `for<'a> Foo<&'a i32> : Copy` would
2410 // yield a type like `for<'a> &'a i32`. In general, we
2411 // maintain the invariant that we never manipulate bound
2412 // regions, so we have to process these bound regions somehow.
2413 //
2414 // The strategy is to:
2415 //
2416 // 1. Instantiate those regions to placeholder regions (e.g.,
2417 // `for<'a> &'a i32` becomes `&0 i32`.
2418 // 2. Produce something like `&'0 i32 : Copy`
2419 // 3. Re-bind the regions back to `for<'a> &'a i32 : Copy`
2420
2421 types
2422 .into_iter()
2423 .flat_map(|placeholder_ty| {
2424 let Normalized { value: normalized_ty, mut obligations } =
2425 ensure_sufficient_stack(|| {
2426 normalize_with_depth(
2427 self,
2428 param_env,
2429 cause.clone(),
2430 recursion_depth,
2431 placeholder_ty,
2432 )
2433 });
2434
2435 let tcx = self.tcx();
2436 let trait_ref = if tcx.generics_of(trait_def_id).own_params.len() == 1 {
2437 ty::TraitRef::new(tcx, trait_def_id, [normalized_ty])
2438 } else {
2439 // If this is an ill-formed auto/built-in trait, then synthesize
2440 // new error args for the missing generics.
2441 let err_args = ty::GenericArgs::extend_with_error(
2442 tcx,
2443 trait_def_id,
2444 &[normalized_ty.into()],
2445 );
2446 ty::TraitRef::new_from_args(tcx, trait_def_id, err_args)
2447 };
2448
2449 let obligation = Obligation::new(self.tcx(), cause.clone(), param_env, trait_ref);
2450 obligations.push(obligation);
2451 obligations
2452 })
2453 .collect()
2454 }
2455
2456 ///////////////////////////////////////////////////////////////////////////
2457 // Matching
2458 //
2459 // Matching is a common path used for both evaluation and
2460 // confirmation. It basically unifies types that appear in impls
2461 // and traits. This does affect the surrounding environment;
2462 // therefore, when used during evaluation, match routines must be
2463 // run inside of a `probe()` so that their side-effects are
2464 // contained.
2465
2466 fn rematch_impl(
2467 &mut self,
2468 impl_def_id: DefId,
2469 obligation: &PolyTraitObligation<'tcx>,
2470 ) -> Normalized<'tcx, GenericArgsRef<'tcx>> {
2471 let impl_trait_header = self.tcx().impl_trait_header(impl_def_id).unwrap();
2472 match self.match_impl(impl_def_id, impl_trait_header, obligation) {
2473 Ok(args) => args,
2474 Err(()) => {
2475 let predicate = self.infcx.resolve_vars_if_possible(obligation.predicate);
2476 bug!("impl {impl_def_id:?} was matchable against {predicate:?} but now is not")
2477 }
2478 }
2479 }
2480
2481 #[instrument(level = "debug", skip(self), ret)]
2482 fn match_impl(
2483 &mut self,
2484 impl_def_id: DefId,
2485 impl_trait_header: ty::ImplTraitHeader<'tcx>,
2486 obligation: &PolyTraitObligation<'tcx>,
2487 ) -> Result<Normalized<'tcx, GenericArgsRef<'tcx>>, ()> {
2488 let placeholder_obligation =
2489 self.infcx.enter_forall_and_leak_universe(obligation.predicate);
2490 let placeholder_obligation_trait_ref = placeholder_obligation.trait_ref;
2491
2492 let impl_args = self.infcx.fresh_args_for_item(obligation.cause.span, impl_def_id);
2493
2494 let trait_ref = impl_trait_header.trait_ref.instantiate(self.tcx(), impl_args);
2495 debug!(?impl_trait_header);
2496
2497 let Normalized { value: impl_trait_ref, obligations: mut nested_obligations } =
2498 ensure_sufficient_stack(|| {
2499 normalize_with_depth(
2500 self,
2501 obligation.param_env,
2502 obligation.cause.clone(),
2503 obligation.recursion_depth + 1,
2504 trait_ref,
2505 )
2506 });
2507
2508 debug!(?impl_trait_ref, ?placeholder_obligation_trait_ref);
2509
2510 let cause = ObligationCause::new(
2511 obligation.cause.span,
2512 obligation.cause.body_id,
2513 ObligationCauseCode::MatchImpl(obligation.cause.clone(), impl_def_id),
2514 );
2515
2516 let InferOk { obligations, .. } = self
2517 .infcx
2518 .at(&cause, obligation.param_env)
2519 .eq(DefineOpaqueTypes::No, placeholder_obligation_trait_ref, impl_trait_ref)
2520 .map_err(|e| {
2521 debug!("match_impl: failed eq_trait_refs due to `{}`", e.to_string(self.tcx()))
2522 })?;
2523 nested_obligations.extend(obligations);
2524
2525 if impl_trait_header.polarity == ty::ImplPolarity::Reservation
2526 && !matches!(self.infcx.typing_mode(), TypingMode::Coherence)
2527 {
2528 debug!("reservation impls only apply in intercrate mode");
2529 return Err(());
2530 }
2531
2532 Ok(Normalized { value: impl_args, obligations: nested_obligations })
2533 }
2534
2535 fn match_upcast_principal(
2536 &mut self,
2537 obligation: &PolyTraitObligation<'tcx>,
2538 unnormalized_upcast_principal: ty::PolyTraitRef<'tcx>,
2539 a_data: &'tcx ty::List<ty::PolyExistentialPredicate<'tcx>>,
2540 b_data: &'tcx ty::List<ty::PolyExistentialPredicate<'tcx>>,
2541 a_region: ty::Region<'tcx>,
2542 b_region: ty::Region<'tcx>,
2543 ) -> SelectionResult<'tcx, PredicateObligations<'tcx>> {
2544 let tcx = self.tcx();
2545 let mut nested = PredicateObligations::new();
2546
2547 // We may upcast to auto traits that are either explicitly listed in
2548 // the object type's bounds, or implied by the principal trait ref's
2549 // supertraits.
2550 let a_auto_traits: FxIndexSet<DefId> = a_data
2551 .auto_traits()
2552 .chain(a_data.principal_def_id().into_iter().flat_map(|principal_def_id| {
2553 elaborate::supertrait_def_ids(tcx, principal_def_id)
2554 .filter(|def_id| tcx.trait_is_auto(*def_id))
2555 }))
2556 .collect();
2557
2558 let upcast_principal = normalize_with_depth_to(
2559 self,
2560 obligation.param_env,
2561 obligation.cause.clone(),
2562 obligation.recursion_depth + 1,
2563 unnormalized_upcast_principal,
2564 &mut nested,
2565 );
2566
2567 for bound in b_data {
2568 match bound.skip_binder() {
2569 // Check that a_ty's supertrait (upcast_principal) is compatible
2570 // with the target (b_ty).
2571 ty::ExistentialPredicate::Trait(target_principal) => {
2572 let hr_source_principal = upcast_principal.map_bound(|trait_ref| {
2573 ty::ExistentialTraitRef::erase_self_ty(tcx, trait_ref)
2574 });
2575 let hr_target_principal = bound.rebind(target_principal);
2576
2577 nested.extend(
2578 self.infcx
2579 .enter_forall(hr_target_principal, |target_principal| {
2580 let source_principal =
2581 self.infcx.instantiate_binder_with_fresh_vars(
2582 obligation.cause.span,
2583 HigherRankedType,
2584 hr_source_principal,
2585 );
2586 self.infcx.at(&obligation.cause, obligation.param_env).eq_trace(
2587 DefineOpaqueTypes::Yes,
2588 ToTrace::to_trace(
2589 &obligation.cause,
2590 hr_target_principal,
2591 hr_source_principal,
2592 ),
2593 target_principal,
2594 source_principal,
2595 )
2596 })
2597 .map_err(|_| SelectionError::Unimplemented)?
2598 .into_obligations(),
2599 );
2600 }
2601 // Check that b_ty's projection is satisfied by exactly one of
2602 // a_ty's projections. First, we look through the list to see if
2603 // any match. If not, error. Then, if *more* than one matches, we
2604 // return ambiguity. Otherwise, if exactly one matches, equate
2605 // it with b_ty's projection.
2606 ty::ExistentialPredicate::Projection(target_projection) => {
2607 let hr_target_projection = bound.rebind(target_projection);
2608
2609 let mut matching_projections =
2610 a_data.projection_bounds().filter(|&hr_source_projection| {
2611 // Eager normalization means that we can just use can_eq
2612 // here instead of equating and processing obligations.
2613 hr_source_projection.item_def_id() == hr_target_projection.item_def_id()
2614 && self.infcx.probe(|_| {
2615 self.infcx
2616 .enter_forall(hr_target_projection, |target_projection| {
2617 let source_projection =
2618 self.infcx.instantiate_binder_with_fresh_vars(
2619 obligation.cause.span,
2620 HigherRankedType,
2621 hr_source_projection,
2622 );
2623 self.infcx
2624 .at(&obligation.cause, obligation.param_env)
2625 .eq_trace(
2626 DefineOpaqueTypes::Yes,
2627 ToTrace::to_trace(
2628 &obligation.cause,
2629 hr_target_projection,
2630 hr_source_projection,
2631 ),
2632 target_projection,
2633 source_projection,
2634 )
2635 })
2636 .is_ok()
2637 })
2638 });
2639
2640 let Some(hr_source_projection) = matching_projections.next() else {
2641 return Err(SelectionError::Unimplemented);
2642 };
2643 if matching_projections.next().is_some() {
2644 return Ok(None);
2645 }
2646 nested.extend(
2647 self.infcx
2648 .enter_forall(hr_target_projection, |target_projection| {
2649 let source_projection =
2650 self.infcx.instantiate_binder_with_fresh_vars(
2651 obligation.cause.span,
2652 HigherRankedType,
2653 hr_source_projection,
2654 );
2655 self.infcx.at(&obligation.cause, obligation.param_env).eq_trace(
2656 DefineOpaqueTypes::Yes,
2657 ToTrace::to_trace(
2658 &obligation.cause,
2659 hr_target_projection,
2660 hr_source_projection,
2661 ),
2662 target_projection,
2663 source_projection,
2664 )
2665 })
2666 .map_err(|_| SelectionError::Unimplemented)?
2667 .into_obligations(),
2668 );
2669 }
2670 // Check that b_ty's auto traits are present in a_ty's bounds.
2671 ty::ExistentialPredicate::AutoTrait(def_id) => {
2672 if !a_auto_traits.contains(&def_id) {
2673 return Err(SelectionError::Unimplemented);
2674 }
2675 }
2676 }
2677 }
2678
2679 nested.push(Obligation::with_depth(
2680 tcx,
2681 obligation.cause.clone(),
2682 obligation.recursion_depth + 1,
2683 obligation.param_env,
2684 ty::Binder::dummy(ty::OutlivesPredicate(a_region, b_region)),
2685 ));
2686
2687 Ok(Some(nested))
2688 }
2689
2690 /// Normalize `where_clause_trait_ref` and try to match it against
2691 /// `obligation`. If successful, return any predicates that
2692 /// result from the normalization.
2693 fn match_where_clause_trait_ref(
2694 &mut self,
2695 obligation: &PolyTraitObligation<'tcx>,
2696 where_clause_trait_ref: ty::PolyTraitRef<'tcx>,
2697 ) -> Result<PredicateObligations<'tcx>, ()> {
2698 self.match_poly_trait_ref(obligation, where_clause_trait_ref)
2699 }
2700
2701 /// Returns `Ok` if `poly_trait_ref` being true implies that the
2702 /// obligation is satisfied.
2703 #[instrument(skip(self), level = "debug")]
2704 fn match_poly_trait_ref(
2705 &mut self,
2706 obligation: &PolyTraitObligation<'tcx>,
2707 poly_trait_ref: ty::PolyTraitRef<'tcx>,
2708 ) -> Result<PredicateObligations<'tcx>, ()> {
2709 let predicate = self.infcx.enter_forall_and_leak_universe(obligation.predicate);
2710 let trait_ref = self.infcx.instantiate_binder_with_fresh_vars(
2711 obligation.cause.span,
2712 HigherRankedType,
2713 poly_trait_ref,
2714 );
2715 self.infcx
2716 .at(&obligation.cause, obligation.param_env)
2717 .eq(DefineOpaqueTypes::No, predicate.trait_ref, trait_ref)
2718 .map(|InferOk { obligations, .. }| obligations)
2719 .map_err(|_| ())
2720 }
2721
2722 ///////////////////////////////////////////////////////////////////////////
2723 // Miscellany
2724
2725 fn match_fresh_trait_refs(
2726 &self,
2727 previous: ty::PolyTraitPredicate<'tcx>,
2728 current: ty::PolyTraitPredicate<'tcx>,
2729 ) -> bool {
2730 let mut matcher = _match::MatchAgainstFreshVars::new(self.tcx());
2731 matcher.relate(previous, current).is_ok()
2732 }
2733
2734 fn push_stack<'o>(
2735 &mut self,
2736 previous_stack: TraitObligationStackList<'o, 'tcx>,
2737 obligation: &'o PolyTraitObligation<'tcx>,
2738 ) -> TraitObligationStack<'o, 'tcx> {
2739 let fresh_trait_pred = obligation.predicate.fold_with(&mut self.freshener);
2740
2741 let dfn = previous_stack.cache.next_dfn();
2742 let depth = previous_stack.depth() + 1;
2743 TraitObligationStack {
2744 obligation,
2745 fresh_trait_pred,
2746 reached_depth: Cell::new(depth),
2747 previous: previous_stack,
2748 dfn,
2749 depth,
2750 }
2751 }
2752
2753 #[instrument(skip(self), level = "debug")]
2754 fn closure_trait_ref_unnormalized(
2755 &mut self,
2756 self_ty: Ty<'tcx>,
2757 fn_trait_def_id: DefId,
2758 ) -> ty::PolyTraitRef<'tcx> {
2759 let ty::Closure(_, args) = *self_ty.kind() else {
2760 bug!("expected closure, found {self_ty}");
2761 };
2762 let closure_sig = args.as_closure().sig();
2763
2764 closure_trait_ref_and_return_type(
2765 self.tcx(),
2766 fn_trait_def_id,
2767 self_ty,
2768 closure_sig,
2769 util::TupleArgumentsFlag::No,
2770 )
2771 .map_bound(|(trait_ref, _)| trait_ref)
2772 }
2773
2774 /// Returns the obligations that are implied by instantiating an
2775 /// impl or trait. The obligations are instantiated and fully
2776 /// normalized. This is used when confirming an impl or default
2777 /// impl.
2778 #[instrument(level = "debug", skip(self, cause, param_env))]
2779 fn impl_or_trait_obligations(
2780 &mut self,
2781 cause: &ObligationCause<'tcx>,
2782 recursion_depth: usize,
2783 param_env: ty::ParamEnv<'tcx>,
2784 def_id: DefId, // of impl or trait
2785 args: GenericArgsRef<'tcx>, // for impl or trait
2786 parent_trait_pred: ty::Binder<'tcx, ty::TraitPredicate<'tcx>>,
2787 ) -> PredicateObligations<'tcx> {
2788 let tcx = self.tcx();
2789
2790 // To allow for one-pass evaluation of the nested obligation,
2791 // each predicate must be preceded by the obligations required
2792 // to normalize it.
2793 // for example, if we have:
2794 // impl<U: Iterator<Item: Copy>, V: Iterator<Item = U>> Foo for V
2795 // the impl will have the following predicates:
2796 // <V as Iterator>::Item = U,
2797 // U: Iterator, U: Sized,
2798 // V: Iterator, V: Sized,
2799 // <U as Iterator>::Item: Copy
2800 // When we instantiate, say, `V => IntoIter<u32>, U => $0`, the last
2801 // obligation will normalize to `<$0 as Iterator>::Item = $1` and
2802 // `$1: Copy`, so we must ensure the obligations are emitted in
2803 // that order.
2804 let predicates = tcx.predicates_of(def_id);
2805 assert_eq!(predicates.parent, None);
2806 let predicates = predicates.instantiate_own(tcx, args);
2807 let mut obligations = PredicateObligations::with_capacity(predicates.len());
2808 for (index, (predicate, span)) in predicates.into_iter().enumerate() {
2809 let cause = if tcx.is_lang_item(parent_trait_pred.def_id(), LangItem::CoerceUnsized) {
2810 cause.clone()
2811 } else {
2812 cause.clone().derived_cause(parent_trait_pred, |derived| {
2813 ObligationCauseCode::ImplDerived(Box::new(ImplDerivedCause {
2814 derived,
2815 impl_or_alias_def_id: def_id,
2816 impl_def_predicate_index: Some(index),
2817 span,
2818 }))
2819 })
2820 };
2821 let clause = normalize_with_depth_to(
2822 self,
2823 param_env,
2824 cause.clone(),
2825 recursion_depth,
2826 predicate,
2827 &mut obligations,
2828 );
2829 obligations.push(Obligation {
2830 cause,
2831 recursion_depth,
2832 param_env,
2833 predicate: clause.as_predicate(),
2834 });
2835 }
2836
2837 // Register any outlives obligations from the trait here, cc #124336.
2838 if matches!(tcx.def_kind(def_id), DefKind::Impl { of_trait: true }) {
2839 for clause in tcx.impl_super_outlives(def_id).iter_instantiated(tcx, args) {
2840 let clause = normalize_with_depth_to(
2841 self,
2842 param_env,
2843 cause.clone(),
2844 recursion_depth,
2845 clause,
2846 &mut obligations,
2847 );
2848 obligations.push(Obligation {
2849 cause: cause.clone(),
2850 recursion_depth,
2851 param_env,
2852 predicate: clause.as_predicate(),
2853 });
2854 }
2855 }
2856
2857 obligations
2858 }
2859}
2860
2861fn rebind_coroutine_witness_types<'tcx>(
2862 tcx: TyCtxt<'tcx>,
2863 def_id: DefId,
2864 args: ty::GenericArgsRef<'tcx>,
2865 bound_vars: &'tcx ty::List<ty::BoundVariableKind>,
2866) -> ty::Binder<'tcx, Vec<Ty<'tcx>>> {
2867 let bound_coroutine_types = tcx.coroutine_hidden_types(def_id).skip_binder();
2868 let shifted_coroutine_types =
2869 tcx.shift_bound_var_indices(bound_vars.len(), bound_coroutine_types.skip_binder());
2870 ty::Binder::bind_with_vars(
2871 ty::EarlyBinder::bind(shifted_coroutine_types.types.to_vec()).instantiate(tcx, args),
2872 tcx.mk_bound_variable_kinds_from_iter(
2873 bound_vars.iter().chain(bound_coroutine_types.bound_vars()),
2874 ),
2875 )
2876}
2877
2878impl<'o, 'tcx> TraitObligationStack<'o, 'tcx> {
2879 fn list(&'o self) -> TraitObligationStackList<'o, 'tcx> {
2880 TraitObligationStackList::with(self)
2881 }
2882
2883 fn cache(&self) -> &'o ProvisionalEvaluationCache<'tcx> {
2884 self.previous.cache
2885 }
2886
2887 fn iter(&'o self) -> TraitObligationStackList<'o, 'tcx> {
2888 self.list()
2889 }
2890
2891 /// Indicates that attempting to evaluate this stack entry
2892 /// required accessing something from the stack at depth `reached_depth`.
2893 fn update_reached_depth(&self, reached_depth: usize) {
2894 assert!(
2895 self.depth >= reached_depth,
2896 "invoked `update_reached_depth` with something under this stack: \
2897 self.depth={} reached_depth={}",
2898 self.depth,
2899 reached_depth,
2900 );
2901 debug!(reached_depth, "update_reached_depth");
2902 let mut p = self;
2903 while reached_depth < p.depth {
2904 debug!(?p.fresh_trait_pred, "update_reached_depth: marking as cycle participant");
2905 p.reached_depth.set(p.reached_depth.get().min(reached_depth));
2906 p = p.previous.head.unwrap();
2907 }
2908 }
2909}
2910
2911/// The "provisional evaluation cache" is used to store intermediate cache results
2912/// when solving auto traits. Auto traits are unusual in that they can support
2913/// cycles. So, for example, a "proof tree" like this would be ok:
2914///
2915/// - `Foo<T>: Send` :-
2916/// - `Bar<T>: Send` :-
2917/// - `Foo<T>: Send` -- cycle, but ok
2918/// - `Baz<T>: Send`
2919///
2920/// Here, to prove `Foo<T>: Send`, we have to prove `Bar<T>: Send` and
2921/// `Baz<T>: Send`. Proving `Bar<T>: Send` in turn required `Foo<T>: Send`.
2922/// For non-auto traits, this cycle would be an error, but for auto traits (because
2923/// they are coinductive) it is considered ok.
2924///
2925/// However, there is a complication: at the point where we have
2926/// "proven" `Bar<T>: Send`, we have in fact only proven it
2927/// *provisionally*. In particular, we proved that `Bar<T>: Send`
2928/// *under the assumption* that `Foo<T>: Send`. But what if we later
2929/// find out this assumption is wrong? Specifically, we could
2930/// encounter some kind of error proving `Baz<T>: Send`. In that case,
2931/// `Bar<T>: Send` didn't turn out to be true.
2932///
2933/// In Issue #60010, we found a bug in rustc where it would cache
2934/// these intermediate results. This was fixed in #60444 by disabling
2935/// *all* caching for things involved in a cycle -- in our example,
2936/// that would mean we don't cache that `Bar<T>: Send`. But this led
2937/// to large slowdowns.
2938///
2939/// Specifically, imagine this scenario, where proving `Baz<T>: Send`
2940/// first requires proving `Bar<T>: Send` (which is true:
2941///
2942/// - `Foo<T>: Send` :-
2943/// - `Bar<T>: Send` :-
2944/// - `Foo<T>: Send` -- cycle, but ok
2945/// - `Baz<T>: Send`
2946/// - `Bar<T>: Send` -- would be nice for this to be a cache hit!
2947/// - `*const T: Send` -- but what if we later encounter an error?
2948///
2949/// The *provisional evaluation cache* resolves this issue. It stores
2950/// cache results that we've proven but which were involved in a cycle
2951/// in some way. We track the minimal stack depth (i.e., the
2952/// farthest from the top of the stack) that we are dependent on.
2953/// The idea is that the cache results within are all valid -- so long as
2954/// none of the nodes in between the current node and the node at that minimum
2955/// depth result in an error (in which case the cached results are just thrown away).
2956///
2957/// During evaluation, we consult this provisional cache and rely on
2958/// it. Accessing a cached value is considered equivalent to accessing
2959/// a result at `reached_depth`, so it marks the *current* solution as
2960/// provisional as well. If an error is encountered, we toss out any
2961/// provisional results added from the subtree that encountered the
2962/// error. When we pop the node at `reached_depth` from the stack, we
2963/// can commit all the things that remain in the provisional cache.
2964struct ProvisionalEvaluationCache<'tcx> {
2965 /// next "depth first number" to issue -- just a counter
2966 dfn: Cell<usize>,
2967
2968 /// Map from cache key to the provisionally evaluated thing.
2969 /// The cache entries contain the result but also the DFN in which they
2970 /// were added. The DFN is used to clear out values on failure.
2971 ///
2972 /// Imagine we have a stack like:
2973 ///
2974 /// - `A B C` and we add a cache for the result of C (DFN 2)
2975 /// - Then we have a stack `A B D` where `D` has DFN 3
2976 /// - We try to solve D by evaluating E: `A B D E` (DFN 4)
2977 /// - `E` generates various cache entries which have cyclic dependencies on `B`
2978 /// - `A B D E F` and so forth
2979 /// - the DFN of `F` for example would be 5
2980 /// - then we determine that `E` is in error -- we will then clear
2981 /// all cache values whose DFN is >= 4 -- in this case, that
2982 /// means the cached value for `F`.
2983 map: RefCell<FxIndexMap<ty::PolyTraitPredicate<'tcx>, ProvisionalEvaluation>>,
2984
2985 /// The stack of terms that we assume to be well-formed because a `WF(term)` predicate
2986 /// is on the stack above (and because of wellformedness is coinductive).
2987 /// In an "ideal" world, this would share a stack with trait predicates in
2988 /// `TraitObligationStack`. However, trait predicates are *much* hotter than
2989 /// `WellFormed` predicates, and it's very likely that the additional matches
2990 /// will have a perf effect. The value here is the well-formed `GenericArg`
2991 /// and the depth of the trait predicate *above* that well-formed predicate.
2992 wf_args: RefCell<Vec<(ty::Term<'tcx>, usize)>>,
2993}
2994
2995/// A cache value for the provisional cache: contains the depth-first
2996/// number (DFN) and result.
2997#[derive(Copy, Clone, Debug)]
2998struct ProvisionalEvaluation {
2999 from_dfn: usize,
3000 reached_depth: usize,
3001 result: EvaluationResult,
3002}
3003
3004impl<'tcx> Default for ProvisionalEvaluationCache<'tcx> {
3005 fn default() -> Self {
3006 Self { dfn: Cell::new(0), map: Default::default(), wf_args: Default::default() }
3007 }
3008}
3009
3010impl<'tcx> ProvisionalEvaluationCache<'tcx> {
3011 /// Get the next DFN in sequence (basically a counter).
3012 fn next_dfn(&self) -> usize {
3013 let result = self.dfn.get();
3014 self.dfn.set(result + 1);
3015 result
3016 }
3017
3018 /// Check the provisional cache for any result for
3019 /// `fresh_trait_ref`. If there is a hit, then you must consider
3020 /// it an access to the stack slots at depth
3021 /// `reached_depth` (from the returned value).
3022 fn get_provisional(
3023 &self,
3024 fresh_trait_pred: ty::PolyTraitPredicate<'tcx>,
3025 ) -> Option<ProvisionalEvaluation> {
3026 debug!(
3027 ?fresh_trait_pred,
3028 "get_provisional = {:#?}",
3029 self.map.borrow().get(&fresh_trait_pred),
3030 );
3031 Some(*self.map.borrow().get(&fresh_trait_pred)?)
3032 }
3033
3034 /// Insert a provisional result into the cache. The result came
3035 /// from the node with the given DFN. It accessed a minimum depth
3036 /// of `reached_depth` to compute. It evaluated `fresh_trait_pred`
3037 /// and resulted in `result`.
3038 fn insert_provisional(
3039 &self,
3040 from_dfn: usize,
3041 reached_depth: usize,
3042 fresh_trait_pred: ty::PolyTraitPredicate<'tcx>,
3043 result: EvaluationResult,
3044 ) {
3045 debug!(?from_dfn, ?fresh_trait_pred, ?result, "insert_provisional");
3046
3047 let mut map = self.map.borrow_mut();
3048
3049 // Subtle: when we complete working on the DFN `from_dfn`, anything
3050 // that remains in the provisional cache must be dependent on some older
3051 // stack entry than `from_dfn`. We have to update their depth with our transitive
3052 // depth in that case or else it would be referring to some popped note.
3053 //
3054 // Example:
3055 // A (reached depth 0)
3056 // ...
3057 // B // depth 1 -- reached depth = 0
3058 // C // depth 2 -- reached depth = 1 (should be 0)
3059 // B
3060 // A // depth 0
3061 // D (reached depth 1)
3062 // C (cache -- reached depth = 2)
3063 for (_k, v) in &mut *map {
3064 if v.from_dfn >= from_dfn {
3065 v.reached_depth = reached_depth.min(v.reached_depth);
3066 }
3067 }
3068
3069 map.insert(fresh_trait_pred, ProvisionalEvaluation { from_dfn, reached_depth, result });
3070 }
3071
3072 /// Invoked when the node with dfn `dfn` does not get a successful
3073 /// result. This will clear out any provisional cache entries
3074 /// that were added since `dfn` was created. This is because the
3075 /// provisional entries are things which must assume that the
3076 /// things on the stack at the time of their creation succeeded --
3077 /// since the failing node is presently at the top of the stack,
3078 /// these provisional entries must either depend on it or some
3079 /// ancestor of it.
3080 fn on_failure(&self, dfn: usize) {
3081 debug!(?dfn, "on_failure");
3082 self.map.borrow_mut().retain(|key, eval| {
3083 if !eval.from_dfn >= dfn {
3084 debug!("on_failure: removing {:?}", key);
3085 false
3086 } else {
3087 true
3088 }
3089 });
3090 }
3091
3092 /// Invoked when the node at depth `depth` completed without
3093 /// depending on anything higher in the stack (if that completion
3094 /// was a failure, then `on_failure` should have been invoked
3095 /// already).
3096 ///
3097 /// Note that we may still have provisional cache items remaining
3098 /// in the cache when this is done. For example, if there is a
3099 /// cycle:
3100 ///
3101 /// * A depends on...
3102 /// * B depends on A
3103 /// * C depends on...
3104 /// * D depends on C
3105 /// * ...
3106 ///
3107 /// Then as we complete the C node we will have a provisional cache
3108 /// with results for A, B, C, and D. This method would clear out
3109 /// the C and D results, but leave A and B provisional.
3110 ///
3111 /// This is determined based on the DFN: we remove any provisional
3112 /// results created since `dfn` started (e.g., in our example, dfn
3113 /// would be 2, representing the C node, and hence we would
3114 /// remove the result for D, which has DFN 3, but not the results for
3115 /// A and B, which have DFNs 0 and 1 respectively).
3116 ///
3117 /// Note that we *do not* attempt to cache these cycle participants
3118 /// in the evaluation cache. Doing so would require carefully computing
3119 /// the correct `DepNode` to store in the cache entry:
3120 /// cycle participants may implicitly depend on query results
3121 /// related to other participants in the cycle, due to our logic
3122 /// which examines the evaluation stack.
3123 ///
3124 /// We used to try to perform this caching,
3125 /// but it lead to multiple incremental compilation ICEs
3126 /// (see #92987 and #96319), and was very hard to understand.
3127 /// Fortunately, removing the caching didn't seem to
3128 /// have a performance impact in practice.
3129 fn on_completion(&self, dfn: usize) {
3130 debug!(?dfn, "on_completion");
3131 self.map.borrow_mut().retain(|fresh_trait_pred, eval| {
3132 if eval.from_dfn >= dfn {
3133 debug!(?fresh_trait_pred, ?eval, "on_completion");
3134 return false;
3135 }
3136 true
3137 });
3138 }
3139}
3140
3141#[derive(Copy, Clone)]
3142struct TraitObligationStackList<'o, 'tcx> {
3143 cache: &'o ProvisionalEvaluationCache<'tcx>,
3144 head: Option<&'o TraitObligationStack<'o, 'tcx>>,
3145}
3146
3147impl<'o, 'tcx> TraitObligationStackList<'o, 'tcx> {
3148 fn empty(cache: &'o ProvisionalEvaluationCache<'tcx>) -> TraitObligationStackList<'o, 'tcx> {
3149 TraitObligationStackList { cache, head: None }
3150 }
3151
3152 fn with(r: &'o TraitObligationStack<'o, 'tcx>) -> TraitObligationStackList<'o, 'tcx> {
3153 TraitObligationStackList { cache: r.cache(), head: Some(r) }
3154 }
3155
3156 fn head(&self) -> Option<&'o TraitObligationStack<'o, 'tcx>> {
3157 self.head
3158 }
3159
3160 fn depth(&self) -> usize {
3161 if let Some(head) = self.head { head.depth } else { 0 }
3162 }
3163}
3164
3165impl<'o, 'tcx> Iterator for TraitObligationStackList<'o, 'tcx> {
3166 type Item = &'o TraitObligationStack<'o, 'tcx>;
3167
3168 fn next(&mut self) -> Option<&'o TraitObligationStack<'o, 'tcx>> {
3169 let o = self.head?;
3170 *self = o.previous;
3171 Some(o)
3172 }
3173}
3174
3175impl<'o, 'tcx> fmt::Debug for TraitObligationStack<'o, 'tcx> {
3176 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
3177 write!(f, "TraitObligationStack({:?})", self.obligation)
3178 }
3179}
3180
3181pub(crate) enum ProjectionMatchesProjection {
3182 Yes,
3183 Ambiguous,
3184 No,
3185}