rustc_const_eval/interpret/
step.rs

1//! This module contains the `InterpCx` methods for executing a single step of the interpreter.
2//!
3//! The main entry point is the `step` method.
4
5use either::Either;
6use rustc_abi::{FIRST_VARIANT, FieldIdx};
7use rustc_index::IndexSlice;
8use rustc_middle::ty::{self, Instance, Ty};
9use rustc_middle::{bug, mir, span_bug};
10use rustc_span::source_map::Spanned;
11use rustc_target::callconv::FnAbi;
12use tracing::{info, instrument, trace};
13
14use super::{
15    FnArg, FnVal, ImmTy, Immediate, InterpCx, InterpResult, Machine, MemPlaceMeta, PlaceTy,
16    Projectable, Scalar, interp_ok, throw_ub, throw_unsup_format,
17};
18use crate::util;
19
20struct EvaluatedCalleeAndArgs<'tcx, M: Machine<'tcx>> {
21    callee: FnVal<'tcx, M::ExtraFnVal>,
22    args: Vec<FnArg<'tcx, M::Provenance>>,
23    fn_sig: ty::FnSig<'tcx>,
24    fn_abi: &'tcx FnAbi<'tcx, Ty<'tcx>>,
25    /// True if the function is marked as `#[track_caller]` ([`ty::InstanceKind::requires_caller_location`])
26    with_caller_location: bool,
27}
28
29impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
30    /// Returns `true` as long as there are more things to do.
31    ///
32    /// This is used by [priroda](https://github.com/oli-obk/priroda)
33    ///
34    /// This is marked `#inline(always)` to work around adversarial codegen when `opt-level = 3`
35    #[inline(always)]
36    pub fn step(&mut self) -> InterpResult<'tcx, bool> {
37        if self.stack().is_empty() {
38            return interp_ok(false);
39        }
40
41        let Either::Left(loc) = self.frame().loc else {
42            // We are unwinding and this fn has no cleanup code.
43            // Just go on unwinding.
44            trace!("unwinding: skipping frame");
45            self.return_from_current_stack_frame(/* unwinding */ true)?;
46            return interp_ok(true);
47        };
48        let basic_block = &self.body().basic_blocks[loc.block];
49
50        if let Some(stmt) = basic_block.statements.get(loc.statement_index) {
51            let old_frames = self.frame_idx();
52            self.eval_statement(stmt)?;
53            // Make sure we are not updating `statement_index` of the wrong frame.
54            assert_eq!(old_frames, self.frame_idx());
55            // Advance the program counter.
56            self.frame_mut().loc.as_mut().left().unwrap().statement_index += 1;
57            return interp_ok(true);
58        }
59
60        M::before_terminator(self)?;
61
62        let terminator = basic_block.terminator();
63        self.eval_terminator(terminator)?;
64        if !self.stack().is_empty() {
65            if let Either::Left(loc) = self.frame().loc {
66                info!("// executing {:?}", loc.block);
67            }
68        }
69        interp_ok(true)
70    }
71
72    /// Runs the interpretation logic for the given `mir::Statement` at the current frame and
73    /// statement counter.
74    ///
75    /// This does NOT move the statement counter forward, the caller has to do that!
76    pub fn eval_statement(&mut self, stmt: &mir::Statement<'tcx>) -> InterpResult<'tcx> {
77        info!("{:?}", stmt);
78
79        use rustc_middle::mir::StatementKind::*;
80
81        match &stmt.kind {
82            Assign(box (place, rvalue)) => self.eval_rvalue_into_place(rvalue, *place)?,
83
84            SetDiscriminant { place, variant_index } => {
85                let dest = self.eval_place(**place)?;
86                self.write_discriminant(*variant_index, &dest)?;
87            }
88
89            Deinit(place) => {
90                let dest = self.eval_place(**place)?;
91                self.write_uninit(&dest)?;
92            }
93
94            // Mark locals as alive
95            StorageLive(local) => {
96                self.storage_live(*local)?;
97            }
98
99            // Mark locals as dead
100            StorageDead(local) => {
101                self.storage_dead(*local)?;
102            }
103
104            // No dynamic semantics attached to `FakeRead`; MIR
105            // interpreter is solely intended for borrowck'ed code.
106            FakeRead(..) => {}
107
108            // Stacked Borrows.
109            Retag(kind, place) => {
110                let dest = self.eval_place(**place)?;
111                M::retag_place_contents(self, *kind, &dest)?;
112            }
113
114            Intrinsic(box intrinsic) => self.eval_nondiverging_intrinsic(intrinsic)?,
115
116            // Evaluate the place expression, without reading from it.
117            PlaceMention(box place) => {
118                let _ = self.eval_place(*place)?;
119            }
120
121            // This exists purely to guide borrowck lifetime inference, and does not have
122            // an operational effect.
123            AscribeUserType(..) => {}
124
125            // Currently, Miri discards Coverage statements. Coverage statements are only injected
126            // via an optional compile time MIR pass and have no side effects. Since Coverage
127            // statements don't exist at the source level, it is safe for Miri to ignore them, even
128            // for undefined behavior (UB) checks.
129            //
130            // A coverage counter inside a const expression (for example, a counter injected in a
131            // const function) is discarded when the const is evaluated at compile time. Whether
132            // this should change, and/or how to implement a const eval counter, is a subject of the
133            // following issue:
134            //
135            // FIXME(#73156): Handle source code coverage in const eval
136            Coverage(..) => {}
137
138            ConstEvalCounter => {
139                M::increment_const_eval_counter(self)?;
140            }
141
142            // Defined to do nothing. These are added by optimization passes, to avoid changing the
143            // size of MIR constantly.
144            Nop => {}
145
146            // Only used for temporary lifetime lints
147            BackwardIncompatibleDropHint { .. } => {}
148        }
149
150        interp_ok(())
151    }
152
153    /// Evaluate an assignment statement.
154    ///
155    /// There is no separate `eval_rvalue` function. Instead, the code for handling each rvalue
156    /// type writes its results directly into the memory specified by the place.
157    pub fn eval_rvalue_into_place(
158        &mut self,
159        rvalue: &mir::Rvalue<'tcx>,
160        place: mir::Place<'tcx>,
161    ) -> InterpResult<'tcx> {
162        let dest = self.eval_place(place)?;
163        // FIXME: ensure some kind of non-aliasing between LHS and RHS?
164        // Also see https://github.com/rust-lang/rust/issues/68364.
165
166        use rustc_middle::mir::Rvalue::*;
167        match *rvalue {
168            ThreadLocalRef(did) => {
169                let ptr = M::thread_local_static_pointer(self, did)?;
170                self.write_pointer(ptr, &dest)?;
171            }
172
173            Use(ref operand) => {
174                // Avoid recomputing the layout
175                let op = self.eval_operand(operand, Some(dest.layout))?;
176                self.copy_op(&op, &dest)?;
177            }
178
179            CopyForDeref(place) => {
180                let op = self.eval_place_to_op(place, Some(dest.layout))?;
181                self.copy_op(&op, &dest)?;
182            }
183
184            BinaryOp(bin_op, box (ref left, ref right)) => {
185                let layout = util::binop_left_homogeneous(bin_op).then_some(dest.layout);
186                let left = self.read_immediate(&self.eval_operand(left, layout)?)?;
187                let layout = util::binop_right_homogeneous(bin_op).then_some(left.layout);
188                let right = self.read_immediate(&self.eval_operand(right, layout)?)?;
189                let result = self.binary_op(bin_op, &left, &right)?;
190                assert_eq!(result.layout, dest.layout, "layout mismatch for result of {bin_op:?}");
191                self.write_immediate(*result, &dest)?;
192            }
193
194            UnaryOp(un_op, ref operand) => {
195                // The operand always has the same type as the result.
196                let val = self.read_immediate(&self.eval_operand(operand, Some(dest.layout))?)?;
197                let result = self.unary_op(un_op, &val)?;
198                assert_eq!(result.layout, dest.layout, "layout mismatch for result of {un_op:?}");
199                self.write_immediate(*result, &dest)?;
200            }
201
202            NullaryOp(null_op, ty) => {
203                let ty = self.instantiate_from_current_frame_and_normalize_erasing_regions(ty)?;
204                let val = self.nullary_op(null_op, ty)?;
205                self.write_immediate(*val, &dest)?;
206            }
207
208            Aggregate(box ref kind, ref operands) => {
209                self.write_aggregate(kind, operands, &dest)?;
210            }
211
212            Repeat(ref operand, _) => {
213                self.write_repeat(operand, &dest)?;
214            }
215
216            Len(place) => {
217                let src = self.eval_place(place)?;
218                let len = src.len(self)?;
219                self.write_scalar(Scalar::from_target_usize(len, self), &dest)?;
220            }
221
222            Ref(_, borrow_kind, place) => {
223                let src = self.eval_place(place)?;
224                let place = self.force_allocation(&src)?;
225                let val = ImmTy::from_immediate(place.to_ref(self), dest.layout);
226                // A fresh reference was created, make sure it gets retagged.
227                let val = M::retag_ptr_value(
228                    self,
229                    if borrow_kind.allows_two_phase_borrow() {
230                        mir::RetagKind::TwoPhase
231                    } else {
232                        mir::RetagKind::Default
233                    },
234                    &val,
235                )?;
236                self.write_immediate(*val, &dest)?;
237            }
238
239            RawPtr(kind, place) => {
240                // Figure out whether this is an addr_of of an already raw place.
241                let place_base_raw = if place.is_indirect_first_projection() {
242                    let ty = self.frame().body.local_decls[place.local].ty;
243                    ty.is_raw_ptr()
244                } else {
245                    // Not a deref, and thus not raw.
246                    false
247                };
248
249                let src = self.eval_place(place)?;
250                let place = self.force_allocation(&src)?;
251                let mut val = ImmTy::from_immediate(place.to_ref(self), dest.layout);
252                if !place_base_raw && !kind.is_fake() {
253                    // If this was not already raw, it needs retagging -- except for "fake"
254                    // raw borrows whose defining property is that they do not get retagged.
255                    val = M::retag_ptr_value(self, mir::RetagKind::Raw, &val)?;
256                }
257                self.write_immediate(*val, &dest)?;
258            }
259
260            ShallowInitBox(ref operand, _) => {
261                let src = self.eval_operand(operand, None)?;
262                let v = self.read_immediate(&src)?;
263                self.write_immediate(*v, &dest)?;
264            }
265
266            Cast(cast_kind, ref operand, cast_ty) => {
267                let src = self.eval_operand(operand, None)?;
268                let cast_ty =
269                    self.instantiate_from_current_frame_and_normalize_erasing_regions(cast_ty)?;
270                self.cast(&src, cast_kind, cast_ty, &dest)?;
271            }
272
273            Discriminant(place) => {
274                let op = self.eval_place_to_op(place, None)?;
275                let variant = self.read_discriminant(&op)?;
276                let discr = self.discriminant_for_variant(op.layout.ty, variant)?;
277                self.write_immediate(*discr, &dest)?;
278            }
279
280            WrapUnsafeBinder(ref op, _ty) => {
281                // Constructing an unsafe binder acts like a transmute
282                // since the operand's layout does not change.
283                let op = self.eval_operand(op, None)?;
284                self.copy_op_allow_transmute(&op, &dest)?;
285            }
286        }
287
288        trace!("{:?}", self.dump_place(&dest));
289
290        interp_ok(())
291    }
292
293    /// Writes the aggregate to the destination.
294    #[instrument(skip(self), level = "trace")]
295    fn write_aggregate(
296        &mut self,
297        kind: &mir::AggregateKind<'tcx>,
298        operands: &IndexSlice<FieldIdx, mir::Operand<'tcx>>,
299        dest: &PlaceTy<'tcx, M::Provenance>,
300    ) -> InterpResult<'tcx> {
301        self.write_uninit(dest)?; // make sure all the padding ends up as uninit
302        let (variant_index, variant_dest, active_field_index) = match *kind {
303            mir::AggregateKind::Adt(_, variant_index, _, _, active_field_index) => {
304                let variant_dest = self.project_downcast(dest, variant_index)?;
305                (variant_index, variant_dest, active_field_index)
306            }
307            mir::AggregateKind::RawPtr(..) => {
308                // Pointers don't have "fields" in the normal sense, so the
309                // projection-based code below would either fail in projection
310                // or in type mismatches. Instead, build an `Immediate` from
311                // the parts and write that to the destination.
312                let [data, meta] = &operands.raw else {
313                    bug!("{kind:?} should have 2 operands, had {operands:?}");
314                };
315                let data = self.eval_operand(data, None)?;
316                let data = self.read_pointer(&data)?;
317                let meta = self.eval_operand(meta, None)?;
318                let meta = if meta.layout.is_zst() {
319                    MemPlaceMeta::None
320                } else {
321                    MemPlaceMeta::Meta(self.read_scalar(&meta)?)
322                };
323                let ptr_imm = Immediate::new_pointer_with_meta(data, meta, self);
324                let ptr = ImmTy::from_immediate(ptr_imm, dest.layout);
325                self.copy_op(&ptr, dest)?;
326                return interp_ok(());
327            }
328            _ => (FIRST_VARIANT, dest.clone(), None),
329        };
330        if active_field_index.is_some() {
331            assert_eq!(operands.len(), 1);
332        }
333        for (field_index, operand) in operands.iter_enumerated() {
334            let field_index = active_field_index.unwrap_or(field_index);
335            let field_dest = self.project_field(&variant_dest, field_index)?;
336            let op = self.eval_operand(operand, Some(field_dest.layout))?;
337            self.copy_op(&op, &field_dest)?;
338        }
339        self.write_discriminant(variant_index, dest)
340    }
341
342    /// Repeats `operand` into the destination. `dest` must have array type, and that type
343    /// determines how often `operand` is repeated.
344    fn write_repeat(
345        &mut self,
346        operand: &mir::Operand<'tcx>,
347        dest: &PlaceTy<'tcx, M::Provenance>,
348    ) -> InterpResult<'tcx> {
349        let src = self.eval_operand(operand, None)?;
350        assert!(src.layout.is_sized());
351        let dest = self.force_allocation(&dest)?;
352        let length = dest.len(self)?;
353
354        if length == 0 {
355            // Nothing to copy... but let's still make sure that `dest` as a place is valid.
356            self.get_place_alloc_mut(&dest)?;
357        } else {
358            // Write the src to the first element.
359            let first = self.project_index(&dest, 0)?;
360            self.copy_op(&src, &first)?;
361
362            // This is performance-sensitive code for big static/const arrays! So we
363            // avoid writing each operand individually and instead just make many copies
364            // of the first element.
365            let elem_size = first.layout.size;
366            let first_ptr = first.ptr();
367            let rest_ptr = first_ptr.wrapping_offset(elem_size, self);
368            // No alignment requirement since `copy_op` above already checked it.
369            self.mem_copy_repeatedly(
370                first_ptr,
371                rest_ptr,
372                elem_size,
373                length - 1,
374                /*nonoverlapping:*/ true,
375            )?;
376        }
377
378        interp_ok(())
379    }
380
381    /// Evaluate the arguments of a function call
382    fn eval_fn_call_argument(
383        &self,
384        op: &mir::Operand<'tcx>,
385    ) -> InterpResult<'tcx, FnArg<'tcx, M::Provenance>> {
386        interp_ok(match op {
387            mir::Operand::Copy(_) | mir::Operand::Constant(_) => {
388                // Make a regular copy.
389                let op = self.eval_operand(op, None)?;
390                FnArg::Copy(op)
391            }
392            mir::Operand::Move(place) => {
393                // If this place lives in memory, preserve its location.
394                // We call `place_to_op` which will be an `MPlaceTy` whenever there exists
395                // an mplace for this place. (This is in contrast to `PlaceTy::as_mplace_or_local`
396                // which can return a local even if that has an mplace.)
397                let place = self.eval_place(*place)?;
398                let op = self.place_to_op(&place)?;
399
400                match op.as_mplace_or_imm() {
401                    Either::Left(mplace) => FnArg::InPlace(mplace),
402                    Either::Right(_imm) => {
403                        // This argument doesn't live in memory, so there's no place
404                        // to make inaccessible during the call.
405                        // We rely on there not being any stray `PlaceTy` that would let the
406                        // caller directly access this local!
407                        // This is also crucial for tail calls, where we want the `FnArg` to
408                        // stay valid when the old stack frame gets popped.
409                        FnArg::Copy(op)
410                    }
411                }
412            }
413        })
414    }
415
416    /// Shared part of `Call` and `TailCall` implementation — finding and evaluating all the
417    /// necessary information about callee and arguments to make a call.
418    fn eval_callee_and_args(
419        &self,
420        terminator: &mir::Terminator<'tcx>,
421        func: &mir::Operand<'tcx>,
422        args: &[Spanned<mir::Operand<'tcx>>],
423    ) -> InterpResult<'tcx, EvaluatedCalleeAndArgs<'tcx, M>> {
424        let func = self.eval_operand(func, None)?;
425        let args = args
426            .iter()
427            .map(|arg| self.eval_fn_call_argument(&arg.node))
428            .collect::<InterpResult<'tcx, Vec<_>>>()?;
429
430        let fn_sig_binder = func.layout.ty.fn_sig(*self.tcx);
431        let fn_sig = self.tcx.normalize_erasing_late_bound_regions(self.typing_env, fn_sig_binder);
432        let extra_args = &args[fn_sig.inputs().len()..];
433        let extra_args =
434            self.tcx.mk_type_list_from_iter(extra_args.iter().map(|arg| arg.layout().ty));
435
436        let (callee, fn_abi, with_caller_location) = match *func.layout.ty.kind() {
437            ty::FnPtr(..) => {
438                let fn_ptr = self.read_pointer(&func)?;
439                let fn_val = self.get_ptr_fn(fn_ptr)?;
440                (fn_val, self.fn_abi_of_fn_ptr(fn_sig_binder, extra_args)?, false)
441            }
442            ty::FnDef(def_id, args) => {
443                let instance = self.resolve(def_id, args)?;
444                (
445                    FnVal::Instance(instance),
446                    self.fn_abi_of_instance(instance, extra_args)?,
447                    instance.def.requires_caller_location(*self.tcx),
448                )
449            }
450            _ => {
451                span_bug!(terminator.source_info.span, "invalid callee of type {}", func.layout.ty)
452            }
453        };
454
455        interp_ok(EvaluatedCalleeAndArgs { callee, args, fn_sig, fn_abi, with_caller_location })
456    }
457
458    fn eval_terminator(&mut self, terminator: &mir::Terminator<'tcx>) -> InterpResult<'tcx> {
459        info!("{:?}", terminator.kind);
460
461        use rustc_middle::mir::TerminatorKind::*;
462        match terminator.kind {
463            Return => {
464                self.return_from_current_stack_frame(/* unwinding */ false)?
465            }
466
467            Goto { target } => self.go_to_block(target),
468
469            SwitchInt { ref discr, ref targets } => {
470                let discr = self.read_immediate(&self.eval_operand(discr, None)?)?;
471                trace!("SwitchInt({:?})", *discr);
472
473                // Branch to the `otherwise` case by default, if no match is found.
474                let mut target_block = targets.otherwise();
475
476                for (const_int, target) in targets.iter() {
477                    // Compare using MIR BinOp::Eq, to also support pointer values.
478                    // (Avoiding `self.binary_op` as that does some redundant layout computation.)
479                    let res = self.binary_op(
480                        mir::BinOp::Eq,
481                        &discr,
482                        &ImmTy::from_uint(const_int, discr.layout),
483                    )?;
484                    if res.to_scalar().to_bool()? {
485                        target_block = target;
486                        break;
487                    }
488                }
489
490                self.go_to_block(target_block);
491            }
492
493            Call {
494                ref func,
495                ref args,
496                destination,
497                target,
498                unwind,
499                call_source: _,
500                fn_span: _,
501            } => {
502                let old_stack = self.frame_idx();
503                let old_loc = self.frame().loc;
504
505                let EvaluatedCalleeAndArgs { callee, args, fn_sig, fn_abi, with_caller_location } =
506                    self.eval_callee_and_args(terminator, func, args)?;
507
508                let destination = self.eval_place(destination)?;
509                self.init_fn_call(
510                    callee,
511                    (fn_sig.abi, fn_abi),
512                    &args,
513                    with_caller_location,
514                    &destination,
515                    target,
516                    if fn_abi.can_unwind { unwind } else { mir::UnwindAction::Unreachable },
517                )?;
518                // Sanity-check that `eval_fn_call` either pushed a new frame or
519                // did a jump to another block.
520                if self.frame_idx() == old_stack && self.frame().loc == old_loc {
521                    span_bug!(terminator.source_info.span, "evaluating this call made no progress");
522                }
523            }
524
525            TailCall { ref func, ref args, fn_span: _ } => {
526                let old_frame_idx = self.frame_idx();
527
528                let EvaluatedCalleeAndArgs { callee, args, fn_sig, fn_abi, with_caller_location } =
529                    self.eval_callee_and_args(terminator, func, args)?;
530
531                self.init_fn_tail_call(callee, (fn_sig.abi, fn_abi), &args, with_caller_location)?;
532
533                if self.frame_idx() != old_frame_idx {
534                    span_bug!(
535                        terminator.source_info.span,
536                        "evaluating this tail call pushed a new stack frame"
537                    );
538                }
539            }
540
541            Drop { place, target, unwind, replace: _, drop, async_fut } => {
542                assert!(
543                    async_fut.is_none() && drop.is_none(),
544                    "Async Drop must be expanded or reset to sync in runtime MIR"
545                );
546                let place = self.eval_place(place)?;
547                let instance = Instance::resolve_drop_in_place(*self.tcx, place.layout.ty);
548                if let ty::InstanceKind::DropGlue(_, None) = instance.def {
549                    // This is the branch we enter if and only if the dropped type has no drop glue
550                    // whatsoever. This can happen as a result of monomorphizing a drop of a
551                    // generic. In order to make sure that generic and non-generic code behaves
552                    // roughly the same (and in keeping with Mir semantics) we do nothing here.
553                    self.go_to_block(target);
554                    return interp_ok(());
555                }
556                trace!("TerminatorKind::drop: {:?}, type {}", place, place.layout.ty);
557                self.init_drop_in_place_call(&place, instance, target, unwind)?;
558            }
559
560            Assert { ref cond, expected, ref msg, target, unwind } => {
561                let ignored =
562                    M::ignore_optional_overflow_checks(self) && msg.is_optional_overflow_check();
563                let cond_val = self.read_scalar(&self.eval_operand(cond, None)?)?.to_bool()?;
564                if ignored || expected == cond_val {
565                    self.go_to_block(target);
566                } else {
567                    M::assert_panic(self, msg, unwind)?;
568                }
569            }
570
571            UnwindTerminate(reason) => {
572                M::unwind_terminate(self, reason)?;
573            }
574
575            // When we encounter Resume, we've finished unwinding
576            // cleanup for the current stack frame. We pop it in order
577            // to continue unwinding the next frame
578            UnwindResume => {
579                trace!("unwinding: resuming from cleanup");
580                // By definition, a Resume terminator means
581                // that we're unwinding
582                self.return_from_current_stack_frame(/* unwinding */ true)?;
583                return interp_ok(());
584            }
585
586            // It is UB to ever encounter this.
587            Unreachable => throw_ub!(Unreachable),
588
589            // These should never occur for MIR we actually run.
590            FalseEdge { .. } | FalseUnwind { .. } | Yield { .. } | CoroutineDrop => span_bug!(
591                terminator.source_info.span,
592                "{:#?} should have been eliminated by MIR pass",
593                terminator.kind
594            ),
595
596            InlineAsm { .. } => {
597                throw_unsup_format!("inline assembly is not supported");
598            }
599        }
600
601        interp_ok(())
602    }
603}