rustc_mir_transform/
validate.rs

1//! Validates the MIR to ensure that invariants are upheld.
2
3use rustc_abi::{ExternAbi, FIRST_VARIANT, Size};
4use rustc_attr_data_structures::InlineAttr;
5use rustc_data_structures::fx::{FxHashMap, FxHashSet};
6use rustc_hir::LangItem;
7use rustc_index::IndexVec;
8use rustc_index::bit_set::DenseBitSet;
9use rustc_infer::infer::TyCtxtInferExt;
10use rustc_infer::traits::{Obligation, ObligationCause};
11use rustc_middle::mir::coverage::CoverageKind;
12use rustc_middle::mir::visit::{NonUseContext, PlaceContext, Visitor};
13use rustc_middle::mir::*;
14use rustc_middle::ty::adjustment::PointerCoercion;
15use rustc_middle::ty::print::with_no_trimmed_paths;
16use rustc_middle::ty::{
17    self, CoroutineArgsExt, InstanceKind, ScalarInt, Ty, TyCtxt, TypeVisitableExt, Upcast, Variance,
18};
19use rustc_middle::{bug, span_bug};
20use rustc_trait_selection::traits::ObligationCtxt;
21
22use crate::util::{self, is_within_packed};
23
24#[derive(Copy, Clone, Debug, PartialEq, Eq)]
25enum EdgeKind {
26    Unwind,
27    Normal,
28}
29
30pub(super) struct Validator {
31    /// Describes at which point in the pipeline this validation is happening.
32    pub when: String,
33}
34
35impl<'tcx> crate::MirPass<'tcx> for Validator {
36    fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
37        // FIXME(JakobDegen): These bodies never instantiated in codegend anyway, so it's not
38        // terribly important that they pass the validator. However, I think other passes might
39        // still see them, in which case they might be surprised. It would probably be better if we
40        // didn't put this through the MIR pipeline at all.
41        if matches!(body.source.instance, InstanceKind::Intrinsic(..) | InstanceKind::Virtual(..)) {
42            return;
43        }
44        let def_id = body.source.def_id();
45        let typing_env = body.typing_env(tcx);
46        let can_unwind = if body.phase <= MirPhase::Runtime(RuntimePhase::Initial) {
47            // In this case `AbortUnwindingCalls` haven't yet been executed.
48            true
49        } else if !tcx.def_kind(def_id).is_fn_like() {
50            true
51        } else {
52            let body_ty = tcx.type_of(def_id).skip_binder();
53            let body_abi = match body_ty.kind() {
54                ty::FnDef(..) => body_ty.fn_sig(tcx).abi(),
55                ty::Closure(..) => ExternAbi::RustCall,
56                ty::CoroutineClosure(..) => ExternAbi::RustCall,
57                ty::Coroutine(..) => ExternAbi::Rust,
58                // No need to do MIR validation on error bodies
59                ty::Error(_) => return,
60                _ => span_bug!(body.span, "unexpected body ty: {body_ty}"),
61            };
62
63            ty::layout::fn_can_unwind(tcx, Some(def_id), body_abi)
64        };
65
66        let mut cfg_checker = CfgChecker {
67            when: &self.when,
68            body,
69            tcx,
70            unwind_edge_count: 0,
71            reachable_blocks: traversal::reachable_as_bitset(body),
72            value_cache: FxHashSet::default(),
73            can_unwind,
74        };
75        cfg_checker.visit_body(body);
76        cfg_checker.check_cleanup_control_flow();
77
78        // Also run the TypeChecker.
79        for (location, msg) in validate_types(tcx, typing_env, body, body) {
80            cfg_checker.fail(location, msg);
81        }
82
83        if let MirPhase::Runtime(_) = body.phase {
84            if let ty::InstanceKind::Item(_) = body.source.instance {
85                if body.has_free_regions() {
86                    cfg_checker.fail(
87                        Location::START,
88                        format!("Free regions in optimized {} MIR", body.phase.name()),
89                    );
90                }
91            }
92        }
93    }
94
95    fn is_required(&self) -> bool {
96        true
97    }
98}
99
100/// This checker covers basic properties of the control-flow graph, (dis)allowed statements and terminators.
101/// Everything checked here must be stable under substitution of generic parameters. In other words,
102/// this is about the *structure* of the MIR, not the *contents*.
103///
104/// Everything that depends on types, or otherwise can be affected by generic parameters,
105/// must be checked in `TypeChecker`.
106struct CfgChecker<'a, 'tcx> {
107    when: &'a str,
108    body: &'a Body<'tcx>,
109    tcx: TyCtxt<'tcx>,
110    unwind_edge_count: usize,
111    reachable_blocks: DenseBitSet<BasicBlock>,
112    value_cache: FxHashSet<u128>,
113    // If `false`, then the MIR must not contain `UnwindAction::Continue` or
114    // `TerminatorKind::Resume`.
115    can_unwind: bool,
116}
117
118impl<'a, 'tcx> CfgChecker<'a, 'tcx> {
119    #[track_caller]
120    fn fail(&self, location: Location, msg: impl AsRef<str>) {
121        // We might see broken MIR when other errors have already occurred.
122        assert!(
123            self.tcx.dcx().has_errors().is_some(),
124            "broken MIR in {:?} ({}) at {:?}:\n{}",
125            self.body.source.instance,
126            self.when,
127            location,
128            msg.as_ref(),
129        );
130    }
131
132    fn check_edge(&mut self, location: Location, bb: BasicBlock, edge_kind: EdgeKind) {
133        if bb == START_BLOCK {
134            self.fail(location, "start block must not have predecessors")
135        }
136        if let Some(bb) = self.body.basic_blocks.get(bb) {
137            let src = self.body.basic_blocks.get(location.block).unwrap();
138            match (src.is_cleanup, bb.is_cleanup, edge_kind) {
139                // Non-cleanup blocks can jump to non-cleanup blocks along non-unwind edges
140                (false, false, EdgeKind::Normal)
141                // Cleanup blocks can jump to cleanup blocks along non-unwind edges
142                | (true, true, EdgeKind::Normal) => {}
143                // Non-cleanup blocks can jump to cleanup blocks along unwind edges
144                (false, true, EdgeKind::Unwind) => {
145                    self.unwind_edge_count += 1;
146                }
147                // All other jumps are invalid
148                _ => {
149                    self.fail(
150                        location,
151                        format!(
152                            "{:?} edge to {:?} violates unwind invariants (cleanup {:?} -> {:?})",
153                            edge_kind,
154                            bb,
155                            src.is_cleanup,
156                            bb.is_cleanup,
157                        )
158                    )
159                }
160            }
161        } else {
162            self.fail(location, format!("encountered jump to invalid basic block {bb:?}"))
163        }
164    }
165
166    fn check_cleanup_control_flow(&self) {
167        if self.unwind_edge_count <= 1 {
168            return;
169        }
170        let doms = self.body.basic_blocks.dominators();
171        let mut post_contract_node = FxHashMap::default();
172        // Reusing the allocation across invocations of the closure
173        let mut dom_path = vec![];
174        let mut get_post_contract_node = |mut bb| {
175            let root = loop {
176                if let Some(root) = post_contract_node.get(&bb) {
177                    break *root;
178                }
179                let parent = doms.immediate_dominator(bb).unwrap();
180                dom_path.push(bb);
181                if !self.body.basic_blocks[parent].is_cleanup {
182                    break bb;
183                }
184                bb = parent;
185            };
186            for bb in dom_path.drain(..) {
187                post_contract_node.insert(bb, root);
188            }
189            root
190        };
191
192        let mut parent = IndexVec::from_elem(None, &self.body.basic_blocks);
193        for (bb, bb_data) in self.body.basic_blocks.iter_enumerated() {
194            if !bb_data.is_cleanup || !self.reachable_blocks.contains(bb) {
195                continue;
196            }
197            let bb = get_post_contract_node(bb);
198            for s in bb_data.terminator().successors() {
199                let s = get_post_contract_node(s);
200                if s == bb {
201                    continue;
202                }
203                let parent = &mut parent[bb];
204                match parent {
205                    None => {
206                        *parent = Some(s);
207                    }
208                    Some(e) if *e == s => (),
209                    Some(e) => self.fail(
210                        Location { block: bb, statement_index: 0 },
211                        format!(
212                            "Cleanup control flow violation: The blocks dominated by {:?} have edges to both {:?} and {:?}",
213                            bb,
214                            s,
215                            *e
216                        )
217                    ),
218                }
219            }
220        }
221
222        // Check for cycles
223        let mut stack = FxHashSet::default();
224        for (mut bb, parent) in parent.iter_enumerated_mut() {
225            stack.clear();
226            stack.insert(bb);
227            loop {
228                let Some(parent) = parent.take() else { break };
229                let no_cycle = stack.insert(parent);
230                if !no_cycle {
231                    self.fail(
232                        Location { block: bb, statement_index: 0 },
233                        format!(
234                            "Cleanup control flow violation: Cycle involving edge {bb:?} -> {parent:?}",
235                        ),
236                    );
237                    break;
238                }
239                bb = parent;
240            }
241        }
242    }
243
244    fn check_unwind_edge(&mut self, location: Location, unwind: UnwindAction) {
245        let is_cleanup = self.body.basic_blocks[location.block].is_cleanup;
246        match unwind {
247            UnwindAction::Cleanup(unwind) => {
248                if is_cleanup {
249                    self.fail(location, "`UnwindAction::Cleanup` in cleanup block");
250                }
251                self.check_edge(location, unwind, EdgeKind::Unwind);
252            }
253            UnwindAction::Continue => {
254                if is_cleanup {
255                    self.fail(location, "`UnwindAction::Continue` in cleanup block");
256                }
257
258                if !self.can_unwind {
259                    self.fail(location, "`UnwindAction::Continue` in no-unwind function");
260                }
261            }
262            UnwindAction::Terminate(UnwindTerminateReason::InCleanup) => {
263                if !is_cleanup {
264                    self.fail(
265                        location,
266                        "`UnwindAction::Terminate(InCleanup)` in a non-cleanup block",
267                    );
268                }
269            }
270            // These are allowed everywhere.
271            UnwindAction::Unreachable | UnwindAction::Terminate(UnwindTerminateReason::Abi) => (),
272        }
273    }
274
275    fn is_critical_call_edge(&self, target: Option<BasicBlock>, unwind: UnwindAction) -> bool {
276        let Some(target) = target else { return false };
277        matches!(unwind, UnwindAction::Cleanup(_) | UnwindAction::Terminate(_))
278            && self.body.basic_blocks.predecessors()[target].len() > 1
279    }
280}
281
282impl<'a, 'tcx> Visitor<'tcx> for CfgChecker<'a, 'tcx> {
283    fn visit_local(&mut self, local: Local, _context: PlaceContext, location: Location) {
284        if self.body.local_decls.get(local).is_none() {
285            self.fail(
286                location,
287                format!("local {local:?} has no corresponding declaration in `body.local_decls`"),
288            );
289        }
290    }
291
292    fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) {
293        match &statement.kind {
294            StatementKind::AscribeUserType(..) => {
295                if self.body.phase >= MirPhase::Runtime(RuntimePhase::Initial) {
296                    self.fail(
297                        location,
298                        "`AscribeUserType` should have been removed after drop lowering phase",
299                    );
300                }
301            }
302            StatementKind::FakeRead(..) => {
303                if self.body.phase >= MirPhase::Runtime(RuntimePhase::Initial) {
304                    self.fail(
305                        location,
306                        "`FakeRead` should have been removed after drop lowering phase",
307                    );
308                }
309            }
310            StatementKind::SetDiscriminant { .. } => {
311                if self.body.phase < MirPhase::Runtime(RuntimePhase::Initial) {
312                    self.fail(location, "`SetDiscriminant`is not allowed until deaggregation");
313                }
314            }
315            StatementKind::Deinit(..) => {
316                if self.body.phase < MirPhase::Runtime(RuntimePhase::Initial) {
317                    self.fail(location, "`Deinit`is not allowed until deaggregation");
318                }
319            }
320            StatementKind::Retag(kind, _) => {
321                // FIXME(JakobDegen) The validator should check that `self.body.phase <
322                // DropsLowered`. However, this causes ICEs with generation of drop shims, which
323                // seem to fail to set their `MirPhase` correctly.
324                if matches!(kind, RetagKind::TwoPhase) {
325                    self.fail(location, format!("explicit `{kind:?}` is forbidden"));
326                }
327            }
328            StatementKind::Coverage(kind) => {
329                if self.body.phase >= MirPhase::Analysis(AnalysisPhase::PostCleanup)
330                    && let CoverageKind::BlockMarker { .. } | CoverageKind::SpanMarker { .. } = kind
331                {
332                    self.fail(
333                        location,
334                        format!("{kind:?} should have been removed after analysis"),
335                    );
336                }
337            }
338            StatementKind::Assign(..)
339            | StatementKind::StorageLive(_)
340            | StatementKind::StorageDead(_)
341            | StatementKind::Intrinsic(_)
342            | StatementKind::ConstEvalCounter
343            | StatementKind::PlaceMention(..)
344            | StatementKind::BackwardIncompatibleDropHint { .. }
345            | StatementKind::Nop => {}
346        }
347
348        self.super_statement(statement, location);
349    }
350
351    fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) {
352        match &terminator.kind {
353            TerminatorKind::Goto { target } => {
354                self.check_edge(location, *target, EdgeKind::Normal);
355            }
356            TerminatorKind::SwitchInt { targets, discr: _ } => {
357                for (_, target) in targets.iter() {
358                    self.check_edge(location, target, EdgeKind::Normal);
359                }
360                self.check_edge(location, targets.otherwise(), EdgeKind::Normal);
361
362                self.value_cache.clear();
363                self.value_cache.extend(targets.iter().map(|(value, _)| value));
364                let has_duplicates = targets.iter().len() != self.value_cache.len();
365                if has_duplicates {
366                    self.fail(
367                        location,
368                        format!(
369                            "duplicated values in `SwitchInt` terminator: {:?}",
370                            terminator.kind,
371                        ),
372                    );
373                }
374            }
375            TerminatorKind::Drop { target, unwind, drop, .. } => {
376                self.check_edge(location, *target, EdgeKind::Normal);
377                self.check_unwind_edge(location, *unwind);
378                if let Some(drop) = drop {
379                    self.check_edge(location, *drop, EdgeKind::Normal);
380                }
381            }
382            TerminatorKind::Call { func, args, .. }
383            | TerminatorKind::TailCall { func, args, .. } => {
384                // FIXME(explicit_tail_calls): refactor this & add tail-call specific checks
385                if let TerminatorKind::Call { target, unwind, destination, .. } = terminator.kind {
386                    if let Some(target) = target {
387                        self.check_edge(location, target, EdgeKind::Normal);
388                    }
389                    self.check_unwind_edge(location, unwind);
390
391                    // The code generation assumes that there are no critical call edges. The
392                    // assumption is used to simplify inserting code that should be executed along
393                    // the return edge from the call. FIXME(tmiasko): Since this is a strictly code
394                    // generation concern, the code generation should be responsible for handling
395                    // it.
396                    if self.body.phase >= MirPhase::Runtime(RuntimePhase::Optimized)
397                        && self.is_critical_call_edge(target, unwind)
398                    {
399                        self.fail(
400                            location,
401                            format!(
402                                "encountered critical edge in `Call` terminator {:?}",
403                                terminator.kind,
404                            ),
405                        );
406                    }
407
408                    // The call destination place and Operand::Move place used as an argument might
409                    // be passed by a reference to the callee. Consequently they cannot be packed.
410                    if is_within_packed(self.tcx, &self.body.local_decls, destination).is_some() {
411                        // This is bad! The callee will expect the memory to be aligned.
412                        self.fail(
413                            location,
414                            format!(
415                                "encountered packed place in `Call` terminator destination: {:?}",
416                                terminator.kind,
417                            ),
418                        );
419                    }
420                }
421
422                for arg in args {
423                    if let Operand::Move(place) = &arg.node {
424                        if is_within_packed(self.tcx, &self.body.local_decls, *place).is_some() {
425                            // This is bad! The callee will expect the memory to be aligned.
426                            self.fail(
427                                location,
428                                format!(
429                                    "encountered `Move` of a packed place in `Call` terminator: {:?}",
430                                    terminator.kind,
431                                ),
432                            );
433                        }
434                    }
435                }
436
437                if let ty::FnDef(did, ..) = func.ty(&self.body.local_decls, self.tcx).kind()
438                    && self.body.phase >= MirPhase::Runtime(RuntimePhase::Optimized)
439                    && matches!(self.tcx.codegen_fn_attrs(did).inline, InlineAttr::Force { .. })
440                {
441                    self.fail(location, "`#[rustc_force_inline]`-annotated function not inlined");
442                }
443            }
444            TerminatorKind::Assert { target, unwind, .. } => {
445                self.check_edge(location, *target, EdgeKind::Normal);
446                self.check_unwind_edge(location, *unwind);
447            }
448            TerminatorKind::Yield { resume, drop, .. } => {
449                if self.body.coroutine.is_none() {
450                    self.fail(location, "`Yield` cannot appear outside coroutine bodies");
451                }
452                if self.body.phase >= MirPhase::Runtime(RuntimePhase::Initial) {
453                    self.fail(location, "`Yield` should have been replaced by coroutine lowering");
454                }
455                self.check_edge(location, *resume, EdgeKind::Normal);
456                if let Some(drop) = drop {
457                    self.check_edge(location, *drop, EdgeKind::Normal);
458                }
459            }
460            TerminatorKind::FalseEdge { real_target, imaginary_target } => {
461                if self.body.phase >= MirPhase::Runtime(RuntimePhase::Initial) {
462                    self.fail(
463                        location,
464                        "`FalseEdge` should have been removed after drop elaboration",
465                    );
466                }
467                self.check_edge(location, *real_target, EdgeKind::Normal);
468                self.check_edge(location, *imaginary_target, EdgeKind::Normal);
469            }
470            TerminatorKind::FalseUnwind { real_target, unwind } => {
471                if self.body.phase >= MirPhase::Runtime(RuntimePhase::Initial) {
472                    self.fail(
473                        location,
474                        "`FalseUnwind` should have been removed after drop elaboration",
475                    );
476                }
477                self.check_edge(location, *real_target, EdgeKind::Normal);
478                self.check_unwind_edge(location, *unwind);
479            }
480            TerminatorKind::InlineAsm { targets, unwind, .. } => {
481                for &target in targets {
482                    self.check_edge(location, target, EdgeKind::Normal);
483                }
484                self.check_unwind_edge(location, *unwind);
485            }
486            TerminatorKind::CoroutineDrop => {
487                if self.body.coroutine.is_none() {
488                    self.fail(location, "`CoroutineDrop` cannot appear outside coroutine bodies");
489                }
490                if self.body.phase >= MirPhase::Runtime(RuntimePhase::Initial) {
491                    self.fail(
492                        location,
493                        "`CoroutineDrop` should have been replaced by coroutine lowering",
494                    );
495                }
496            }
497            TerminatorKind::UnwindResume => {
498                let bb = location.block;
499                if !self.body.basic_blocks[bb].is_cleanup {
500                    self.fail(location, "Cannot `UnwindResume` from non-cleanup basic block")
501                }
502                if !self.can_unwind {
503                    self.fail(location, "Cannot `UnwindResume` in a function that cannot unwind")
504                }
505            }
506            TerminatorKind::UnwindTerminate(_) => {
507                let bb = location.block;
508                if !self.body.basic_blocks[bb].is_cleanup {
509                    self.fail(location, "Cannot `UnwindTerminate` from non-cleanup basic block")
510                }
511            }
512            TerminatorKind::Return => {
513                let bb = location.block;
514                if self.body.basic_blocks[bb].is_cleanup {
515                    self.fail(location, "Cannot `Return` from cleanup basic block")
516                }
517            }
518            TerminatorKind::Unreachable => {}
519        }
520
521        self.super_terminator(terminator, location);
522    }
523
524    fn visit_source_scope(&mut self, scope: SourceScope) {
525        if self.body.source_scopes.get(scope).is_none() {
526            self.tcx.dcx().span_bug(
527                self.body.span,
528                format!(
529                    "broken MIR in {:?} ({}):\ninvalid source scope {:?}",
530                    self.body.source.instance, self.when, scope,
531                ),
532            );
533        }
534    }
535}
536
537/// A faster version of the validation pass that only checks those things which may break when
538/// instantiating any generic parameters.
539///
540/// `caller_body` is used to detect cycles in MIR inlining and MIR validation before
541/// `optimized_mir` is available.
542pub(super) fn validate_types<'tcx>(
543    tcx: TyCtxt<'tcx>,
544    typing_env: ty::TypingEnv<'tcx>,
545    body: &Body<'tcx>,
546    caller_body: &Body<'tcx>,
547) -> Vec<(Location, String)> {
548    let mut type_checker = TypeChecker { body, caller_body, tcx, typing_env, failures: Vec::new() };
549    // The type checker formats a bunch of strings with type names in it, but these strings
550    // are not always going to be encountered on the error path since the inliner also uses
551    // the validator, and there are certain kinds of inlining (even for valid code) that
552    // can cause validation errors (mostly around where clauses and rigid projections).
553    with_no_trimmed_paths!({
554        type_checker.visit_body(body);
555    });
556    type_checker.failures
557}
558
559struct TypeChecker<'a, 'tcx> {
560    body: &'a Body<'tcx>,
561    caller_body: &'a Body<'tcx>,
562    tcx: TyCtxt<'tcx>,
563    typing_env: ty::TypingEnv<'tcx>,
564    failures: Vec<(Location, String)>,
565}
566
567impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
568    fn fail(&mut self, location: Location, msg: impl Into<String>) {
569        self.failures.push((location, msg.into()));
570    }
571
572    /// Check if src can be assigned into dest.
573    /// This is not precise, it will accept some incorrect assignments.
574    fn mir_assign_valid_types(&self, src: Ty<'tcx>, dest: Ty<'tcx>) -> bool {
575        // Fast path before we normalize.
576        if src == dest {
577            // Equal types, all is good.
578            return true;
579        }
580
581        // We sometimes have to use `defining_opaque_types` for subtyping
582        // to succeed here and figuring out how exactly that should work
583        // is annoying. It is harmless enough to just not validate anything
584        // in that case. We still check this after analysis as all opaque
585        // types have been revealed at this point.
586        if (src, dest).has_opaque_types() {
587            return true;
588        }
589
590        // After borrowck subtyping should be fully explicit via
591        // `Subtype` projections.
592        let variance = if self.body.phase >= MirPhase::Runtime(RuntimePhase::Initial) {
593            Variance::Invariant
594        } else {
595            Variance::Covariant
596        };
597
598        crate::util::relate_types(self.tcx, self.typing_env, variance, src, dest)
599    }
600
601    /// Check that the given predicate definitely holds in the param-env of this MIR body.
602    fn predicate_must_hold_modulo_regions(
603        &self,
604        pred: impl Upcast<TyCtxt<'tcx>, ty::Predicate<'tcx>>,
605    ) -> bool {
606        let pred: ty::Predicate<'tcx> = pred.upcast(self.tcx);
607
608        // We sometimes have to use `defining_opaque_types` for predicates
609        // to succeed here and figuring out how exactly that should work
610        // is annoying. It is harmless enough to just not validate anything
611        // in that case. We still check this after analysis as all opaque
612        // types have been revealed at this point.
613        if pred.has_opaque_types() {
614            return true;
615        }
616
617        let (infcx, param_env) = self.tcx.infer_ctxt().build_with_typing_env(self.typing_env);
618        let ocx = ObligationCtxt::new(&infcx);
619        ocx.register_obligation(Obligation::new(
620            self.tcx,
621            ObligationCause::dummy(),
622            param_env,
623            pred,
624        ));
625        ocx.select_all_or_error().is_empty()
626    }
627}
628
629impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
630    fn visit_operand(&mut self, operand: &Operand<'tcx>, location: Location) {
631        // This check is somewhat expensive, so only run it when -Zvalidate-mir is passed.
632        if self.tcx.sess.opts.unstable_opts.validate_mir
633            && self.body.phase < MirPhase::Runtime(RuntimePhase::Initial)
634        {
635            // `Operand::Copy` is only supposed to be used with `Copy` types.
636            if let Operand::Copy(place) = operand {
637                let ty = place.ty(&self.body.local_decls, self.tcx).ty;
638
639                if !self.tcx.type_is_copy_modulo_regions(self.typing_env, ty) {
640                    self.fail(location, format!("`Operand::Copy` with non-`Copy` type {ty}"));
641                }
642            }
643        }
644
645        self.super_operand(operand, location);
646    }
647
648    fn visit_projection_elem(
649        &mut self,
650        place_ref: PlaceRef<'tcx>,
651        elem: PlaceElem<'tcx>,
652        context: PlaceContext,
653        location: Location,
654    ) {
655        match elem {
656            ProjectionElem::OpaqueCast(ty)
657                if self.body.phase >= MirPhase::Runtime(RuntimePhase::Initial) =>
658            {
659                self.fail(
660                    location,
661                    format!("explicit opaque type cast to `{ty}` after `PostAnalysisNormalize`"),
662                )
663            }
664            ProjectionElem::Index(index) => {
665                let index_ty = self.body.local_decls[index].ty;
666                if index_ty != self.tcx.types.usize {
667                    self.fail(location, format!("bad index ({index_ty} != usize)"))
668                }
669            }
670            ProjectionElem::Deref
671                if self.body.phase >= MirPhase::Runtime(RuntimePhase::PostCleanup) =>
672            {
673                let base_ty = place_ref.ty(&self.body.local_decls, self.tcx).ty;
674
675                if base_ty.is_box() {
676                    self.fail(location, format!("{base_ty} dereferenced after ElaborateBoxDerefs"))
677                }
678            }
679            ProjectionElem::Field(f, ty) => {
680                let parent_ty = place_ref.ty(&self.body.local_decls, self.tcx);
681                let fail_out_of_bounds = |this: &mut Self, location| {
682                    this.fail(location, format!("Out of bounds field {f:?} for {parent_ty:?}"));
683                };
684                let check_equal = |this: &mut Self, location, f_ty| {
685                    if !this.mir_assign_valid_types(ty, f_ty) {
686                        this.fail(
687                            location,
688                            format!(
689                                "Field projection `{place_ref:?}.{f:?}` specified type `{ty}`, but actual type is `{f_ty}`"
690                            )
691                        )
692                    }
693                };
694
695                let kind = match parent_ty.ty.kind() {
696                    &ty::Alias(ty::Opaque, ty::AliasTy { def_id, args, .. }) => {
697                        self.tcx.type_of(def_id).instantiate(self.tcx, args).kind()
698                    }
699                    kind => kind,
700                };
701
702                match kind {
703                    ty::Tuple(fields) => {
704                        let Some(f_ty) = fields.get(f.as_usize()) else {
705                            fail_out_of_bounds(self, location);
706                            return;
707                        };
708                        check_equal(self, location, *f_ty);
709                    }
710                    ty::Adt(adt_def, args) => {
711                        // see <https://github.com/rust-lang/rust/blob/7601adcc764d42c9f2984082b49948af652df986/compiler/rustc_middle/src/ty/layout.rs#L861-L864>
712                        if self.tcx.is_lang_item(adt_def.did(), LangItem::DynMetadata) {
713                            self.fail(
714                                location,
715                                format!(
716                                    "You can't project to field {f:?} of `DynMetadata` because \
717                                     layout is weird and thinks it doesn't have fields."
718                                ),
719                            );
720                        }
721
722                        let var = parent_ty.variant_index.unwrap_or(FIRST_VARIANT);
723                        let Some(field) = adt_def.variant(var).fields.get(f) else {
724                            fail_out_of_bounds(self, location);
725                            return;
726                        };
727                        check_equal(self, location, field.ty(self.tcx, args));
728                    }
729                    ty::Closure(_, args) => {
730                        let args = args.as_closure();
731                        let Some(&f_ty) = args.upvar_tys().get(f.as_usize()) else {
732                            fail_out_of_bounds(self, location);
733                            return;
734                        };
735                        check_equal(self, location, f_ty);
736                    }
737                    ty::CoroutineClosure(_, args) => {
738                        let args = args.as_coroutine_closure();
739                        let Some(&f_ty) = args.upvar_tys().get(f.as_usize()) else {
740                            fail_out_of_bounds(self, location);
741                            return;
742                        };
743                        check_equal(self, location, f_ty);
744                    }
745                    &ty::Coroutine(def_id, args) => {
746                        let f_ty = if let Some(var) = parent_ty.variant_index {
747                            // If we're currently validating an inlined copy of this body,
748                            // then it will no longer be parameterized over the original
749                            // args of the coroutine. Otherwise, we prefer to use this body
750                            // since we may be in the process of computing this MIR in the
751                            // first place.
752                            let layout = if def_id == self.caller_body.source.def_id() {
753                                self.caller_body
754                                    .coroutine_layout_raw()
755                                    .or_else(|| self.tcx.coroutine_layout(def_id, args).ok())
756                            } else if self.tcx.needs_coroutine_by_move_body_def_id(def_id)
757                                && let ty::ClosureKind::FnOnce =
758                                    args.as_coroutine().kind_ty().to_opt_closure_kind().unwrap()
759                                && self.caller_body.source.def_id()
760                                    == self.tcx.coroutine_by_move_body_def_id(def_id)
761                            {
762                                // Same if this is the by-move body of a coroutine-closure.
763                                self.caller_body.coroutine_layout_raw()
764                            } else {
765                                self.tcx.coroutine_layout(def_id, args).ok()
766                            };
767
768                            let Some(layout) = layout else {
769                                self.fail(
770                                    location,
771                                    format!("No coroutine layout for {parent_ty:?}"),
772                                );
773                                return;
774                            };
775
776                            let Some(&local) = layout.variant_fields[var].get(f) else {
777                                fail_out_of_bounds(self, location);
778                                return;
779                            };
780
781                            let Some(f_ty) = layout.field_tys.get(local) else {
782                                self.fail(
783                                    location,
784                                    format!("Out of bounds local {local:?} for {parent_ty:?}"),
785                                );
786                                return;
787                            };
788
789                            ty::EarlyBinder::bind(f_ty.ty).instantiate(self.tcx, args)
790                        } else {
791                            let Some(&f_ty) = args.as_coroutine().prefix_tys().get(f.index())
792                            else {
793                                fail_out_of_bounds(self, location);
794                                return;
795                            };
796
797                            f_ty
798                        };
799
800                        check_equal(self, location, f_ty);
801                    }
802                    _ => {
803                        self.fail(location, format!("{:?} does not have fields", parent_ty.ty));
804                    }
805                }
806            }
807            ProjectionElem::Subtype(ty) => {
808                if !util::sub_types(
809                    self.tcx,
810                    self.typing_env,
811                    ty,
812                    place_ref.ty(&self.body.local_decls, self.tcx).ty,
813                ) {
814                    self.fail(
815                        location,
816                        format!(
817                            "Failed subtyping {ty} and {}",
818                            place_ref.ty(&self.body.local_decls, self.tcx).ty
819                        ),
820                    )
821                }
822            }
823            ProjectionElem::UnwrapUnsafeBinder(unwrapped_ty) => {
824                let binder_ty = place_ref.ty(&self.body.local_decls, self.tcx);
825                let ty::UnsafeBinder(binder_ty) = *binder_ty.ty.kind() else {
826                    self.fail(
827                        location,
828                        format!("WrapUnsafeBinder does not produce a ty::UnsafeBinder"),
829                    );
830                    return;
831                };
832                let binder_inner_ty = self.tcx.instantiate_bound_regions_with_erased(*binder_ty);
833                if !self.mir_assign_valid_types(unwrapped_ty, binder_inner_ty) {
834                    self.fail(
835                        location,
836                        format!(
837                            "Cannot unwrap unsafe binder {binder_ty:?} into type {unwrapped_ty}"
838                        ),
839                    );
840                }
841            }
842            _ => {}
843        }
844        self.super_projection_elem(place_ref, elem, context, location);
845    }
846
847    fn visit_var_debug_info(&mut self, debuginfo: &VarDebugInfo<'tcx>) {
848        if let Some(box VarDebugInfoFragment { ty, ref projection }) = debuginfo.composite {
849            if ty.is_union() || ty.is_enum() {
850                self.fail(
851                    START_BLOCK.start_location(),
852                    format!("invalid type {ty} in debuginfo for {:?}", debuginfo.name),
853                );
854            }
855            if projection.is_empty() {
856                self.fail(
857                    START_BLOCK.start_location(),
858                    format!("invalid empty projection in debuginfo for {:?}", debuginfo.name),
859                );
860            }
861            if projection.iter().any(|p| !matches!(p, PlaceElem::Field(..))) {
862                self.fail(
863                    START_BLOCK.start_location(),
864                    format!(
865                        "illegal projection {:?} in debuginfo for {:?}",
866                        projection, debuginfo.name
867                    ),
868                );
869            }
870        }
871        match debuginfo.value {
872            VarDebugInfoContents::Const(_) => {}
873            VarDebugInfoContents::Place(place) => {
874                if place.projection.iter().any(|p| !p.can_use_in_debuginfo()) {
875                    self.fail(
876                        START_BLOCK.start_location(),
877                        format!("illegal place {:?} in debuginfo for {:?}", place, debuginfo.name),
878                    );
879                }
880            }
881        }
882        self.super_var_debug_info(debuginfo);
883    }
884
885    fn visit_place(&mut self, place: &Place<'tcx>, cntxt: PlaceContext, location: Location) {
886        // Set off any `bug!`s in the type computation code
887        let _ = place.ty(&self.body.local_decls, self.tcx);
888
889        if self.body.phase >= MirPhase::Runtime(RuntimePhase::Initial)
890            && place.projection.len() > 1
891            && cntxt != PlaceContext::NonUse(NonUseContext::VarDebugInfo)
892            && place.projection[1..].contains(&ProjectionElem::Deref)
893        {
894            self.fail(
895                location,
896                format!("place {place:?} has deref as a later projection (it is only permitted as the first projection)"),
897            );
898        }
899
900        // Ensure all downcast projections are followed by field projections.
901        let mut projections_iter = place.projection.iter();
902        while let Some(proj) = projections_iter.next() {
903            if matches!(proj, ProjectionElem::Downcast(..)) {
904                if !matches!(projections_iter.next(), Some(ProjectionElem::Field(..))) {
905                    self.fail(
906                        location,
907                        format!(
908                            "place {place:?} has `Downcast` projection not followed by `Field`"
909                        ),
910                    );
911                }
912            }
913        }
914
915        self.super_place(place, cntxt, location);
916    }
917
918    fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, location: Location) {
919        macro_rules! check_kinds {
920            ($t:expr, $text:literal, $typat:pat) => {
921                if !matches!(($t).kind(), $typat) {
922                    self.fail(location, format!($text, $t));
923                }
924            };
925        }
926        match rvalue {
927            Rvalue::Use(_) | Rvalue::CopyForDeref(_) => {}
928            Rvalue::Aggregate(kind, fields) => match **kind {
929                AggregateKind::Tuple => {}
930                AggregateKind::Array(dest) => {
931                    for src in fields {
932                        if !self.mir_assign_valid_types(src.ty(self.body, self.tcx), dest) {
933                            self.fail(location, "array field has the wrong type");
934                        }
935                    }
936                }
937                AggregateKind::Adt(def_id, idx, args, _, Some(field)) => {
938                    let adt_def = self.tcx.adt_def(def_id);
939                    assert!(adt_def.is_union());
940                    assert_eq!(idx, FIRST_VARIANT);
941                    let dest_ty = self.tcx.normalize_erasing_regions(
942                        self.typing_env,
943                        adt_def.non_enum_variant().fields[field].ty(self.tcx, args),
944                    );
945                    if let [field] = fields.raw.as_slice() {
946                        let src_ty = field.ty(self.body, self.tcx);
947                        if !self.mir_assign_valid_types(src_ty, dest_ty) {
948                            self.fail(location, "union field has the wrong type");
949                        }
950                    } else {
951                        self.fail(location, "unions should have one initialized field");
952                    }
953                }
954                AggregateKind::Adt(def_id, idx, args, _, None) => {
955                    let adt_def = self.tcx.adt_def(def_id);
956                    assert!(!adt_def.is_union());
957                    let variant = &adt_def.variants()[idx];
958                    if variant.fields.len() != fields.len() {
959                        self.fail(location, "adt has the wrong number of initialized fields");
960                    }
961                    for (src, dest) in std::iter::zip(fields, &variant.fields) {
962                        let dest_ty = self
963                            .tcx
964                            .normalize_erasing_regions(self.typing_env, dest.ty(self.tcx, args));
965                        if !self.mir_assign_valid_types(src.ty(self.body, self.tcx), dest_ty) {
966                            self.fail(location, "adt field has the wrong type");
967                        }
968                    }
969                }
970                AggregateKind::Closure(_, args) => {
971                    let upvars = args.as_closure().upvar_tys();
972                    if upvars.len() != fields.len() {
973                        self.fail(location, "closure has the wrong number of initialized fields");
974                    }
975                    for (src, dest) in std::iter::zip(fields, upvars) {
976                        if !self.mir_assign_valid_types(src.ty(self.body, self.tcx), dest) {
977                            self.fail(location, "closure field has the wrong type");
978                        }
979                    }
980                }
981                AggregateKind::Coroutine(_, args) => {
982                    let upvars = args.as_coroutine().upvar_tys();
983                    if upvars.len() != fields.len() {
984                        self.fail(location, "coroutine has the wrong number of initialized fields");
985                    }
986                    for (src, dest) in std::iter::zip(fields, upvars) {
987                        if !self.mir_assign_valid_types(src.ty(self.body, self.tcx), dest) {
988                            self.fail(location, "coroutine field has the wrong type");
989                        }
990                    }
991                }
992                AggregateKind::CoroutineClosure(_, args) => {
993                    let upvars = args.as_coroutine_closure().upvar_tys();
994                    if upvars.len() != fields.len() {
995                        self.fail(
996                            location,
997                            "coroutine-closure has the wrong number of initialized fields",
998                        );
999                    }
1000                    for (src, dest) in std::iter::zip(fields, upvars) {
1001                        if !self.mir_assign_valid_types(src.ty(self.body, self.tcx), dest) {
1002                            self.fail(location, "coroutine-closure field has the wrong type");
1003                        }
1004                    }
1005                }
1006                AggregateKind::RawPtr(pointee_ty, mutability) => {
1007                    if !matches!(self.body.phase, MirPhase::Runtime(_)) {
1008                        // It would probably be fine to support this in earlier phases, but at the
1009                        // time of writing it's only ever introduced from intrinsic lowering, so
1010                        // earlier things just `bug!` on it.
1011                        self.fail(location, "RawPtr should be in runtime MIR only");
1012                    }
1013
1014                    if let [data_ptr, metadata] = fields.raw.as_slice() {
1015                        let data_ptr_ty = data_ptr.ty(self.body, self.tcx);
1016                        let metadata_ty = metadata.ty(self.body, self.tcx);
1017                        if let ty::RawPtr(in_pointee, in_mut) = data_ptr_ty.kind() {
1018                            if *in_mut != mutability {
1019                                self.fail(location, "input and output mutability must match");
1020                            }
1021
1022                            // FIXME: check `Thin` instead of `Sized`
1023                            if !in_pointee.is_sized(self.tcx, self.typing_env) {
1024                                self.fail(location, "input pointer must be thin");
1025                            }
1026                        } else {
1027                            self.fail(
1028                                location,
1029                                "first operand to raw pointer aggregate must be a raw pointer",
1030                            );
1031                        }
1032
1033                        // FIXME: Check metadata more generally
1034                        if pointee_ty.is_slice() {
1035                            if !self.mir_assign_valid_types(metadata_ty, self.tcx.types.usize) {
1036                                self.fail(location, "slice metadata must be usize");
1037                            }
1038                        } else if pointee_ty.is_sized(self.tcx, self.typing_env) {
1039                            if metadata_ty != self.tcx.types.unit {
1040                                self.fail(location, "metadata for pointer-to-thin must be unit");
1041                            }
1042                        }
1043                    } else {
1044                        self.fail(location, "raw pointer aggregate must have 2 fields");
1045                    }
1046                }
1047            },
1048            Rvalue::Ref(_, BorrowKind::Fake(_), _) => {
1049                if self.body.phase >= MirPhase::Runtime(RuntimePhase::Initial) {
1050                    self.fail(
1051                        location,
1052                        "`Assign` statement with a `Fake` borrow should have been removed in runtime MIR",
1053                    );
1054                }
1055            }
1056            Rvalue::Ref(..) => {}
1057            Rvalue::Len(p) => {
1058                let pty = p.ty(&self.body.local_decls, self.tcx).ty;
1059                check_kinds!(
1060                    pty,
1061                    "Cannot compute length of non-array type {:?}",
1062                    ty::Array(..) | ty::Slice(..)
1063                );
1064            }
1065            Rvalue::BinaryOp(op, vals) => {
1066                use BinOp::*;
1067                let a = vals.0.ty(&self.body.local_decls, self.tcx);
1068                let b = vals.1.ty(&self.body.local_decls, self.tcx);
1069                if crate::util::binop_right_homogeneous(*op) {
1070                    if let Eq | Lt | Le | Ne | Ge | Gt = op {
1071                        // The function pointer types can have lifetimes
1072                        if !self.mir_assign_valid_types(a, b) {
1073                            self.fail(
1074                                location,
1075                                format!("Cannot {op:?} compare incompatible types {a} and {b}"),
1076                            );
1077                        }
1078                    } else if a != b {
1079                        self.fail(
1080                            location,
1081                            format!("Cannot perform binary op {op:?} on unequal types {a} and {b}"),
1082                        );
1083                    }
1084                }
1085
1086                match op {
1087                    Offset => {
1088                        check_kinds!(a, "Cannot offset non-pointer type {:?}", ty::RawPtr(..));
1089                        if b != self.tcx.types.isize && b != self.tcx.types.usize {
1090                            self.fail(location, format!("Cannot offset by non-isize type {b}"));
1091                        }
1092                    }
1093                    Eq | Lt | Le | Ne | Ge | Gt => {
1094                        for x in [a, b] {
1095                            check_kinds!(
1096                                x,
1097                                "Cannot {op:?} compare type {:?}",
1098                                ty::Bool
1099                                    | ty::Char
1100                                    | ty::Int(..)
1101                                    | ty::Uint(..)
1102                                    | ty::Float(..)
1103                                    | ty::RawPtr(..)
1104                                    | ty::FnPtr(..)
1105                            )
1106                        }
1107                    }
1108                    Cmp => {
1109                        for x in [a, b] {
1110                            check_kinds!(
1111                                x,
1112                                "Cannot three-way compare non-integer type {:?}",
1113                                ty::Char | ty::Uint(..) | ty::Int(..)
1114                            )
1115                        }
1116                    }
1117                    AddUnchecked | AddWithOverflow | SubUnchecked | SubWithOverflow
1118                    | MulUnchecked | MulWithOverflow | Shl | ShlUnchecked | Shr | ShrUnchecked => {
1119                        for x in [a, b] {
1120                            check_kinds!(
1121                                x,
1122                                "Cannot {op:?} non-integer type {:?}",
1123                                ty::Uint(..) | ty::Int(..)
1124                            )
1125                        }
1126                    }
1127                    BitAnd | BitOr | BitXor => {
1128                        for x in [a, b] {
1129                            check_kinds!(
1130                                x,
1131                                "Cannot perform bitwise op {op:?} on type {:?}",
1132                                ty::Uint(..) | ty::Int(..) | ty::Bool
1133                            )
1134                        }
1135                    }
1136                    Add | Sub | Mul | Div | Rem => {
1137                        for x in [a, b] {
1138                            check_kinds!(
1139                                x,
1140                                "Cannot perform arithmetic {op:?} on type {:?}",
1141                                ty::Uint(..) | ty::Int(..) | ty::Float(..)
1142                            )
1143                        }
1144                    }
1145                }
1146            }
1147            Rvalue::UnaryOp(op, operand) => {
1148                let a = operand.ty(&self.body.local_decls, self.tcx);
1149                match op {
1150                    UnOp::Neg => {
1151                        check_kinds!(a, "Cannot negate type {:?}", ty::Int(..) | ty::Float(..))
1152                    }
1153                    UnOp::Not => {
1154                        check_kinds!(
1155                            a,
1156                            "Cannot binary not type {:?}",
1157                            ty::Int(..) | ty::Uint(..) | ty::Bool
1158                        );
1159                    }
1160                    UnOp::PtrMetadata => {
1161                        check_kinds!(
1162                            a,
1163                            "Cannot PtrMetadata non-pointer non-reference type {:?}",
1164                            ty::RawPtr(..) | ty::Ref(..)
1165                        );
1166                    }
1167                }
1168            }
1169            Rvalue::ShallowInitBox(operand, _) => {
1170                let a = operand.ty(&self.body.local_decls, self.tcx);
1171                check_kinds!(a, "Cannot shallow init type {:?}", ty::RawPtr(..));
1172            }
1173            Rvalue::Cast(kind, operand, target_type) => {
1174                let op_ty = operand.ty(self.body, self.tcx);
1175                match kind {
1176                    // FIXME: Add Checks for these
1177                    CastKind::PointerWithExposedProvenance | CastKind::PointerExposeProvenance => {}
1178                    CastKind::PointerCoercion(PointerCoercion::ReifyFnPointer, _) => {
1179                        // FIXME: check signature compatibility.
1180                        check_kinds!(
1181                            op_ty,
1182                            "CastKind::{kind:?} input must be a fn item, not {:?}",
1183                            ty::FnDef(..)
1184                        );
1185                        check_kinds!(
1186                            target_type,
1187                            "CastKind::{kind:?} output must be a fn pointer, not {:?}",
1188                            ty::FnPtr(..)
1189                        );
1190                    }
1191                    CastKind::PointerCoercion(PointerCoercion::UnsafeFnPointer, _) => {
1192                        // FIXME: check safety and signature compatibility.
1193                        check_kinds!(
1194                            op_ty,
1195                            "CastKind::{kind:?} input must be a fn pointer, not {:?}",
1196                            ty::FnPtr(..)
1197                        );
1198                        check_kinds!(
1199                            target_type,
1200                            "CastKind::{kind:?} output must be a fn pointer, not {:?}",
1201                            ty::FnPtr(..)
1202                        );
1203                    }
1204                    CastKind::PointerCoercion(PointerCoercion::ClosureFnPointer(..), _) => {
1205                        // FIXME: check safety, captures, and signature compatibility.
1206                        check_kinds!(
1207                            op_ty,
1208                            "CastKind::{kind:?} input must be a closure, not {:?}",
1209                            ty::Closure(..)
1210                        );
1211                        check_kinds!(
1212                            target_type,
1213                            "CastKind::{kind:?} output must be a fn pointer, not {:?}",
1214                            ty::FnPtr(..)
1215                        );
1216                    }
1217                    CastKind::PointerCoercion(PointerCoercion::MutToConstPointer, _) => {
1218                        // FIXME: check same pointee?
1219                        check_kinds!(
1220                            op_ty,
1221                            "CastKind::{kind:?} input must be a raw mut pointer, not {:?}",
1222                            ty::RawPtr(_, Mutability::Mut)
1223                        );
1224                        check_kinds!(
1225                            target_type,
1226                            "CastKind::{kind:?} output must be a raw const pointer, not {:?}",
1227                            ty::RawPtr(_, Mutability::Not)
1228                        );
1229                        if self.body.phase >= MirPhase::Analysis(AnalysisPhase::PostCleanup) {
1230                            self.fail(location, format!("After borrowck, MIR disallows {kind:?}"));
1231                        }
1232                    }
1233                    CastKind::PointerCoercion(PointerCoercion::ArrayToPointer, _) => {
1234                        // FIXME: Check pointee types
1235                        check_kinds!(
1236                            op_ty,
1237                            "CastKind::{kind:?} input must be a raw pointer, not {:?}",
1238                            ty::RawPtr(..)
1239                        );
1240                        check_kinds!(
1241                            target_type,
1242                            "CastKind::{kind:?} output must be a raw pointer, not {:?}",
1243                            ty::RawPtr(..)
1244                        );
1245                        if self.body.phase >= MirPhase::Analysis(AnalysisPhase::PostCleanup) {
1246                            self.fail(location, format!("After borrowck, MIR disallows {kind:?}"));
1247                        }
1248                    }
1249                    CastKind::PointerCoercion(PointerCoercion::Unsize, _) => {
1250                        // Pointers being unsize coerced should at least implement
1251                        // `CoerceUnsized`.
1252                        if !self.predicate_must_hold_modulo_regions(ty::TraitRef::new(
1253                            self.tcx,
1254                            self.tcx.require_lang_item(
1255                                LangItem::CoerceUnsized,
1256                                Some(self.body.source_info(location).span),
1257                            ),
1258                            [op_ty, *target_type],
1259                        )) {
1260                            self.fail(location, format!("Unsize coercion, but `{op_ty}` isn't coercible to `{target_type}`"));
1261                        }
1262                    }
1263                    CastKind::PointerCoercion(PointerCoercion::DynStar, _) => {
1264                        // FIXME(dyn-star): make sure nothing needs to be done here.
1265                    }
1266                    CastKind::IntToInt | CastKind::IntToFloat => {
1267                        let input_valid = op_ty.is_integral() || op_ty.is_char() || op_ty.is_bool();
1268                        let target_valid = target_type.is_numeric() || target_type.is_char();
1269                        if !input_valid || !target_valid {
1270                            self.fail(
1271                                location,
1272                                format!("Wrong cast kind {kind:?} for the type {op_ty}"),
1273                            );
1274                        }
1275                    }
1276                    CastKind::FnPtrToPtr => {
1277                        check_kinds!(
1278                            op_ty,
1279                            "CastKind::{kind:?} input must be a fn pointer, not {:?}",
1280                            ty::FnPtr(..)
1281                        );
1282                        check_kinds!(
1283                            target_type,
1284                            "CastKind::{kind:?} output must be a raw pointer, not {:?}",
1285                            ty::RawPtr(..)
1286                        );
1287                    }
1288                    CastKind::PtrToPtr => {
1289                        check_kinds!(
1290                            op_ty,
1291                            "CastKind::{kind:?} input must be a raw pointer, not {:?}",
1292                            ty::RawPtr(..)
1293                        );
1294                        check_kinds!(
1295                            target_type,
1296                            "CastKind::{kind:?} output must be a raw pointer, not {:?}",
1297                            ty::RawPtr(..)
1298                        );
1299                    }
1300                    CastKind::FloatToFloat | CastKind::FloatToInt => {
1301                        if !op_ty.is_floating_point() || !target_type.is_numeric() {
1302                            self.fail(
1303                                location,
1304                                format!(
1305                                    "Trying to cast non 'Float' as {kind:?} into {target_type:?}"
1306                                ),
1307                            );
1308                        }
1309                    }
1310                    CastKind::Transmute => {
1311                        if let MirPhase::Runtime(..) = self.body.phase {
1312                            // Unlike `mem::transmute`, a MIR `Transmute` is well-formed
1313                            // for any two `Sized` types, just potentially UB to run.
1314
1315                            if !self
1316                                .tcx
1317                                .normalize_erasing_regions(self.typing_env, op_ty)
1318                                .is_sized(self.tcx, self.typing_env)
1319                            {
1320                                self.fail(
1321                                    location,
1322                                    format!("Cannot transmute from non-`Sized` type {op_ty}"),
1323                                );
1324                            }
1325                            if !self
1326                                .tcx
1327                                .normalize_erasing_regions(self.typing_env, *target_type)
1328                                .is_sized(self.tcx, self.typing_env)
1329                            {
1330                                self.fail(
1331                                    location,
1332                                    format!("Cannot transmute to non-`Sized` type {target_type:?}"),
1333                                );
1334                            }
1335                        } else {
1336                            self.fail(
1337                                location,
1338                                format!(
1339                                    "Transmute is not supported in non-runtime phase {:?}.",
1340                                    self.body.phase
1341                                ),
1342                            );
1343                        }
1344                    }
1345                }
1346            }
1347            Rvalue::NullaryOp(NullOp::OffsetOf(indices), container) => {
1348                let fail_out_of_bounds = |this: &mut Self, location, field, ty| {
1349                    this.fail(location, format!("Out of bounds field {field:?} for {ty}"));
1350                };
1351
1352                let mut current_ty = *container;
1353
1354                for (variant, field) in indices.iter() {
1355                    match current_ty.kind() {
1356                        ty::Tuple(fields) => {
1357                            if variant != FIRST_VARIANT {
1358                                self.fail(
1359                                    location,
1360                                    format!("tried to get variant {variant:?} of tuple"),
1361                                );
1362                                return;
1363                            }
1364                            let Some(&f_ty) = fields.get(field.as_usize()) else {
1365                                fail_out_of_bounds(self, location, field, current_ty);
1366                                return;
1367                            };
1368
1369                            current_ty = self.tcx.normalize_erasing_regions(self.typing_env, f_ty);
1370                        }
1371                        ty::Adt(adt_def, args) => {
1372                            let Some(field) = adt_def.variant(variant).fields.get(field) else {
1373                                fail_out_of_bounds(self, location, field, current_ty);
1374                                return;
1375                            };
1376
1377                            let f_ty = field.ty(self.tcx, args);
1378                            current_ty = self.tcx.normalize_erasing_regions(self.typing_env, f_ty);
1379                        }
1380                        _ => {
1381                            self.fail(
1382                                location,
1383                                format!("Cannot get offset ({variant:?}, {field:?}) from type {current_ty}"),
1384                            );
1385                            return;
1386                        }
1387                    }
1388                }
1389            }
1390            Rvalue::Repeat(_, _)
1391            | Rvalue::ThreadLocalRef(_)
1392            | Rvalue::RawPtr(_, _)
1393            | Rvalue::NullaryOp(
1394                NullOp::SizeOf | NullOp::AlignOf | NullOp::UbChecks | NullOp::ContractChecks,
1395                _,
1396            )
1397            | Rvalue::Discriminant(_) => {}
1398
1399            Rvalue::WrapUnsafeBinder(op, ty) => {
1400                let unwrapped_ty = op.ty(self.body, self.tcx);
1401                let ty::UnsafeBinder(binder_ty) = *ty.kind() else {
1402                    self.fail(
1403                        location,
1404                        format!("WrapUnsafeBinder does not produce a ty::UnsafeBinder"),
1405                    );
1406                    return;
1407                };
1408                let binder_inner_ty = self.tcx.instantiate_bound_regions_with_erased(*binder_ty);
1409                if !self.mir_assign_valid_types(unwrapped_ty, binder_inner_ty) {
1410                    self.fail(
1411                        location,
1412                        format!("Cannot wrap {unwrapped_ty} into unsafe binder {binder_ty:?}"),
1413                    );
1414                }
1415            }
1416        }
1417        self.super_rvalue(rvalue, location);
1418    }
1419
1420    fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) {
1421        match &statement.kind {
1422            StatementKind::Assign(box (dest, rvalue)) => {
1423                // LHS and RHS of the assignment must have the same type.
1424                let left_ty = dest.ty(&self.body.local_decls, self.tcx).ty;
1425                let right_ty = rvalue.ty(&self.body.local_decls, self.tcx);
1426
1427                if !self.mir_assign_valid_types(right_ty, left_ty) {
1428                    self.fail(
1429                        location,
1430                        format!(
1431                            "encountered `{:?}` with incompatible types:\n\
1432                            left-hand side has type: {}\n\
1433                            right-hand side has type: {}",
1434                            statement.kind, left_ty, right_ty,
1435                        ),
1436                    );
1437                }
1438                if let Rvalue::CopyForDeref(place) = rvalue {
1439                    if place.ty(&self.body.local_decls, self.tcx).ty.builtin_deref(true).is_none() {
1440                        self.fail(
1441                            location,
1442                            "`CopyForDeref` should only be used for dereferenceable types",
1443                        )
1444                    }
1445                }
1446            }
1447            StatementKind::AscribeUserType(..) => {
1448                if self.body.phase >= MirPhase::Runtime(RuntimePhase::Initial) {
1449                    self.fail(
1450                        location,
1451                        "`AscribeUserType` should have been removed after drop lowering phase",
1452                    );
1453                }
1454            }
1455            StatementKind::FakeRead(..) => {
1456                if self.body.phase >= MirPhase::Runtime(RuntimePhase::Initial) {
1457                    self.fail(
1458                        location,
1459                        "`FakeRead` should have been removed after drop lowering phase",
1460                    );
1461                }
1462            }
1463            StatementKind::Intrinsic(box NonDivergingIntrinsic::Assume(op)) => {
1464                let ty = op.ty(&self.body.local_decls, self.tcx);
1465                if !ty.is_bool() {
1466                    self.fail(
1467                        location,
1468                        format!("`assume` argument must be `bool`, but got: `{ty}`"),
1469                    );
1470                }
1471            }
1472            StatementKind::Intrinsic(box NonDivergingIntrinsic::CopyNonOverlapping(
1473                CopyNonOverlapping { src, dst, count },
1474            )) => {
1475                let src_ty = src.ty(&self.body.local_decls, self.tcx);
1476                let op_src_ty = if let Some(src_deref) = src_ty.builtin_deref(true) {
1477                    src_deref
1478                } else {
1479                    self.fail(
1480                        location,
1481                        format!("Expected src to be ptr in copy_nonoverlapping, got: {src_ty}"),
1482                    );
1483                    return;
1484                };
1485                let dst_ty = dst.ty(&self.body.local_decls, self.tcx);
1486                let op_dst_ty = if let Some(dst_deref) = dst_ty.builtin_deref(true) {
1487                    dst_deref
1488                } else {
1489                    self.fail(
1490                        location,
1491                        format!("Expected dst to be ptr in copy_nonoverlapping, got: {dst_ty}"),
1492                    );
1493                    return;
1494                };
1495                // since CopyNonOverlapping is parametrized by 1 type,
1496                // we only need to check that they are equal and not keep an extra parameter.
1497                if !self.mir_assign_valid_types(op_src_ty, op_dst_ty) {
1498                    self.fail(location, format!("bad arg ({op_src_ty} != {op_dst_ty})"));
1499                }
1500
1501                let op_cnt_ty = count.ty(&self.body.local_decls, self.tcx);
1502                if op_cnt_ty != self.tcx.types.usize {
1503                    self.fail(location, format!("bad arg ({op_cnt_ty} != usize)"))
1504                }
1505            }
1506            StatementKind::SetDiscriminant { place, .. } => {
1507                if self.body.phase < MirPhase::Runtime(RuntimePhase::Initial) {
1508                    self.fail(location, "`SetDiscriminant`is not allowed until deaggregation");
1509                }
1510                let pty = place.ty(&self.body.local_decls, self.tcx).ty;
1511                if !matches!(
1512                    pty.kind(),
1513                    ty::Adt(..) | ty::Coroutine(..) | ty::Alias(ty::Opaque, ..)
1514                ) {
1515                    self.fail(
1516                        location,
1517                        format!(
1518                            "`SetDiscriminant` is only allowed on ADTs and coroutines, not {pty}"
1519                        ),
1520                    );
1521                }
1522            }
1523            StatementKind::Deinit(..) => {
1524                if self.body.phase < MirPhase::Runtime(RuntimePhase::Initial) {
1525                    self.fail(location, "`Deinit`is not allowed until deaggregation");
1526                }
1527            }
1528            StatementKind::Retag(kind, _) => {
1529                // FIXME(JakobDegen) The validator should check that `self.body.phase <
1530                // DropsLowered`. However, this causes ICEs with generation of drop shims, which
1531                // seem to fail to set their `MirPhase` correctly.
1532                if matches!(kind, RetagKind::TwoPhase) {
1533                    self.fail(location, format!("explicit `{kind:?}` is forbidden"));
1534                }
1535            }
1536            StatementKind::StorageLive(_)
1537            | StatementKind::StorageDead(_)
1538            | StatementKind::Coverage(_)
1539            | StatementKind::ConstEvalCounter
1540            | StatementKind::PlaceMention(..)
1541            | StatementKind::BackwardIncompatibleDropHint { .. }
1542            | StatementKind::Nop => {}
1543        }
1544
1545        self.super_statement(statement, location);
1546    }
1547
1548    fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) {
1549        match &terminator.kind {
1550            TerminatorKind::SwitchInt { targets, discr } => {
1551                let switch_ty = discr.ty(&self.body.local_decls, self.tcx);
1552
1553                let target_width = self.tcx.sess.target.pointer_width;
1554
1555                let size = Size::from_bits(match switch_ty.kind() {
1556                    ty::Uint(uint) => uint.normalize(target_width).bit_width().unwrap(),
1557                    ty::Int(int) => int.normalize(target_width).bit_width().unwrap(),
1558                    ty::Char => 32,
1559                    ty::Bool => 1,
1560                    other => bug!("unhandled type: {:?}", other),
1561                });
1562
1563                for (value, _) in targets.iter() {
1564                    if ScalarInt::try_from_uint(value, size).is_none() {
1565                        self.fail(
1566                            location,
1567                            format!("the value {value:#x} is not a proper {switch_ty}"),
1568                        )
1569                    }
1570                }
1571            }
1572            TerminatorKind::Call { func, .. } | TerminatorKind::TailCall { func, .. } => {
1573                let func_ty = func.ty(&self.body.local_decls, self.tcx);
1574                match func_ty.kind() {
1575                    ty::FnPtr(..) | ty::FnDef(..) => {}
1576                    _ => self.fail(
1577                        location,
1578                        format!(
1579                            "encountered non-callable type {func_ty} in `{}` terminator",
1580                            terminator.kind.name()
1581                        ),
1582                    ),
1583                }
1584
1585                if let TerminatorKind::TailCall { .. } = terminator.kind {
1586                    // FIXME(explicit_tail_calls): implement tail-call specific checks here (such
1587                    // as signature matching, forbidding closures, etc)
1588                }
1589            }
1590            TerminatorKind::Assert { cond, .. } => {
1591                let cond_ty = cond.ty(&self.body.local_decls, self.tcx);
1592                if cond_ty != self.tcx.types.bool {
1593                    self.fail(
1594                        location,
1595                        format!(
1596                            "encountered non-boolean condition of type {cond_ty} in `Assert` terminator"
1597                        ),
1598                    );
1599                }
1600            }
1601            TerminatorKind::Goto { .. }
1602            | TerminatorKind::Drop { .. }
1603            | TerminatorKind::Yield { .. }
1604            | TerminatorKind::FalseEdge { .. }
1605            | TerminatorKind::FalseUnwind { .. }
1606            | TerminatorKind::InlineAsm { .. }
1607            | TerminatorKind::CoroutineDrop
1608            | TerminatorKind::UnwindResume
1609            | TerminatorKind::UnwindTerminate(_)
1610            | TerminatorKind::Return
1611            | TerminatorKind::Unreachable => {}
1612        }
1613
1614        self.super_terminator(terminator, location);
1615    }
1616}