rustc_const_eval/interpret/
step.rs1use either::Either;
6use rustc_abi::{FIRST_VARIANT, FieldIdx};
7use rustc_index::IndexSlice;
8use rustc_middle::ty::{self, Instance, Ty};
9use rustc_middle::{bug, mir, span_bug};
10use rustc_span::source_map::Spanned;
11use rustc_target::callconv::FnAbi;
12use tracing::{info, instrument, trace};
13
14use super::{
15 FnArg, FnVal, ImmTy, Immediate, InterpCx, InterpResult, Machine, MemPlaceMeta, PlaceTy,
16 Projectable, Scalar, interp_ok, throw_ub, throw_unsup_format,
17};
18use crate::util;
19
20struct EvaluatedCalleeAndArgs<'tcx, M: Machine<'tcx>> {
21 callee: FnVal<'tcx, M::ExtraFnVal>,
22 args: Vec<FnArg<'tcx, M::Provenance>>,
23 fn_sig: ty::FnSig<'tcx>,
24 fn_abi: &'tcx FnAbi<'tcx, Ty<'tcx>>,
25 with_caller_location: bool,
27}
28
29impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
30 #[inline(always)]
36 pub fn step(&mut self) -> InterpResult<'tcx, bool> {
37 if self.stack().is_empty() {
38 return interp_ok(false);
39 }
40
41 let Either::Left(loc) = self.frame().loc else {
42 trace!("unwinding: skipping frame");
45 self.return_from_current_stack_frame(true)?;
46 return interp_ok(true);
47 };
48 let basic_block = &self.body().basic_blocks[loc.block];
49
50 if let Some(stmt) = basic_block.statements.get(loc.statement_index) {
51 let old_frames = self.frame_idx();
52 self.eval_statement(stmt)?;
53 assert_eq!(old_frames, self.frame_idx());
55 self.frame_mut().loc.as_mut().left().unwrap().statement_index += 1;
57 return interp_ok(true);
58 }
59
60 M::before_terminator(self)?;
61
62 let terminator = basic_block.terminator();
63 self.eval_terminator(terminator)?;
64 if !self.stack().is_empty() {
65 if let Either::Left(loc) = self.frame().loc {
66 info!("// executing {:?}", loc.block);
67 }
68 }
69 interp_ok(true)
70 }
71
72 pub fn eval_statement(&mut self, stmt: &mir::Statement<'tcx>) -> InterpResult<'tcx> {
77 info!("{:?}", stmt);
78
79 use rustc_middle::mir::StatementKind::*;
80
81 match &stmt.kind {
82 Assign(box (place, rvalue)) => self.eval_rvalue_into_place(rvalue, *place)?,
83
84 SetDiscriminant { place, variant_index } => {
85 let dest = self.eval_place(**place)?;
86 self.write_discriminant(*variant_index, &dest)?;
87 }
88
89 Deinit(place) => {
90 let dest = self.eval_place(**place)?;
91 self.write_uninit(&dest)?;
92 }
93
94 StorageLive(local) => {
96 self.storage_live(*local)?;
97 }
98
99 StorageDead(local) => {
101 self.storage_dead(*local)?;
102 }
103
104 FakeRead(..) => {}
107
108 Retag(kind, place) => {
110 let dest = self.eval_place(**place)?;
111 M::retag_place_contents(self, *kind, &dest)?;
112 }
113
114 Intrinsic(box intrinsic) => self.eval_nondiverging_intrinsic(intrinsic)?,
115
116 PlaceMention(box place) => {
118 let _ = self.eval_place(*place)?;
119 }
120
121 AscribeUserType(..) => {}
124
125 Coverage(..) => {}
137
138 ConstEvalCounter => {
139 M::increment_const_eval_counter(self)?;
140 }
141
142 Nop => {}
145
146 BackwardIncompatibleDropHint { .. } => {}
148 }
149
150 interp_ok(())
151 }
152
153 pub fn eval_rvalue_into_place(
158 &mut self,
159 rvalue: &mir::Rvalue<'tcx>,
160 place: mir::Place<'tcx>,
161 ) -> InterpResult<'tcx> {
162 let dest = self.eval_place(place)?;
163 use rustc_middle::mir::Rvalue::*;
167 match *rvalue {
168 ThreadLocalRef(did) => {
169 let ptr = M::thread_local_static_pointer(self, did)?;
170 self.write_pointer(ptr, &dest)?;
171 }
172
173 Use(ref operand) => {
174 let op = self.eval_operand(operand, Some(dest.layout))?;
176 self.copy_op(&op, &dest)?;
177 }
178
179 CopyForDeref(place) => {
180 let op = self.eval_place_to_op(place, Some(dest.layout))?;
181 self.copy_op(&op, &dest)?;
182 }
183
184 BinaryOp(bin_op, box (ref left, ref right)) => {
185 let layout = util::binop_left_homogeneous(bin_op).then_some(dest.layout);
186 let left = self.read_immediate(&self.eval_operand(left, layout)?)?;
187 let layout = util::binop_right_homogeneous(bin_op).then_some(left.layout);
188 let right = self.read_immediate(&self.eval_operand(right, layout)?)?;
189 let result = self.binary_op(bin_op, &left, &right)?;
190 assert_eq!(result.layout, dest.layout, "layout mismatch for result of {bin_op:?}");
191 self.write_immediate(*result, &dest)?;
192 }
193
194 UnaryOp(un_op, ref operand) => {
195 let val = self.read_immediate(&self.eval_operand(operand, Some(dest.layout))?)?;
197 let result = self.unary_op(un_op, &val)?;
198 assert_eq!(result.layout, dest.layout, "layout mismatch for result of {un_op:?}");
199 self.write_immediate(*result, &dest)?;
200 }
201
202 NullaryOp(null_op, ty) => {
203 let ty = self.instantiate_from_current_frame_and_normalize_erasing_regions(ty)?;
204 let val = self.nullary_op(null_op, ty)?;
205 self.write_immediate(*val, &dest)?;
206 }
207
208 Aggregate(box ref kind, ref operands) => {
209 self.write_aggregate(kind, operands, &dest)?;
210 }
211
212 Repeat(ref operand, _) => {
213 self.write_repeat(operand, &dest)?;
214 }
215
216 Len(place) => {
217 let src = self.eval_place(place)?;
218 let len = src.len(self)?;
219 self.write_scalar(Scalar::from_target_usize(len, self), &dest)?;
220 }
221
222 Ref(_, borrow_kind, place) => {
223 let src = self.eval_place(place)?;
224 let place = self.force_allocation(&src)?;
225 let val = ImmTy::from_immediate(place.to_ref(self), dest.layout);
226 let val = M::retag_ptr_value(
228 self,
229 if borrow_kind.allows_two_phase_borrow() {
230 mir::RetagKind::TwoPhase
231 } else {
232 mir::RetagKind::Default
233 },
234 &val,
235 )?;
236 self.write_immediate(*val, &dest)?;
237 }
238
239 RawPtr(kind, place) => {
240 let place_base_raw = if place.is_indirect_first_projection() {
242 let ty = self.frame().body.local_decls[place.local].ty;
243 ty.is_raw_ptr()
244 } else {
245 false
247 };
248
249 let src = self.eval_place(place)?;
250 let place = self.force_allocation(&src)?;
251 let mut val = ImmTy::from_immediate(place.to_ref(self), dest.layout);
252 if !place_base_raw && !kind.is_fake() {
253 val = M::retag_ptr_value(self, mir::RetagKind::Raw, &val)?;
256 }
257 self.write_immediate(*val, &dest)?;
258 }
259
260 ShallowInitBox(ref operand, _) => {
261 let src = self.eval_operand(operand, None)?;
262 let v = self.read_immediate(&src)?;
263 self.write_immediate(*v, &dest)?;
264 }
265
266 Cast(cast_kind, ref operand, cast_ty) => {
267 let src = self.eval_operand(operand, None)?;
268 let cast_ty =
269 self.instantiate_from_current_frame_and_normalize_erasing_regions(cast_ty)?;
270 self.cast(&src, cast_kind, cast_ty, &dest)?;
271 }
272
273 Discriminant(place) => {
274 let op = self.eval_place_to_op(place, None)?;
275 let variant = self.read_discriminant(&op)?;
276 let discr = self.discriminant_for_variant(op.layout.ty, variant)?;
277 self.write_immediate(*discr, &dest)?;
278 }
279
280 WrapUnsafeBinder(ref op, _ty) => {
281 let op = self.eval_operand(op, None)?;
284 self.copy_op_allow_transmute(&op, &dest)?;
285 }
286 }
287
288 trace!("{:?}", self.dump_place(&dest));
289
290 interp_ok(())
291 }
292
293 #[instrument(skip(self), level = "trace")]
295 fn write_aggregate(
296 &mut self,
297 kind: &mir::AggregateKind<'tcx>,
298 operands: &IndexSlice<FieldIdx, mir::Operand<'tcx>>,
299 dest: &PlaceTy<'tcx, M::Provenance>,
300 ) -> InterpResult<'tcx> {
301 self.write_uninit(dest)?; let (variant_index, variant_dest, active_field_index) = match *kind {
303 mir::AggregateKind::Adt(_, variant_index, _, _, active_field_index) => {
304 let variant_dest = self.project_downcast(dest, variant_index)?;
305 (variant_index, variant_dest, active_field_index)
306 }
307 mir::AggregateKind::RawPtr(..) => {
308 let [data, meta] = &operands.raw else {
313 bug!("{kind:?} should have 2 operands, had {operands:?}");
314 };
315 let data = self.eval_operand(data, None)?;
316 let data = self.read_pointer(&data)?;
317 let meta = self.eval_operand(meta, None)?;
318 let meta = if meta.layout.is_zst() {
319 MemPlaceMeta::None
320 } else {
321 MemPlaceMeta::Meta(self.read_scalar(&meta)?)
322 };
323 let ptr_imm = Immediate::new_pointer_with_meta(data, meta, self);
324 let ptr = ImmTy::from_immediate(ptr_imm, dest.layout);
325 self.copy_op(&ptr, dest)?;
326 return interp_ok(());
327 }
328 _ => (FIRST_VARIANT, dest.clone(), None),
329 };
330 if active_field_index.is_some() {
331 assert_eq!(operands.len(), 1);
332 }
333 for (field_index, operand) in operands.iter_enumerated() {
334 let field_index = active_field_index.unwrap_or(field_index);
335 let field_dest = self.project_field(&variant_dest, field_index)?;
336 let op = self.eval_operand(operand, Some(field_dest.layout))?;
337 self.copy_op(&op, &field_dest)?;
338 }
339 self.write_discriminant(variant_index, dest)
340 }
341
342 fn write_repeat(
345 &mut self,
346 operand: &mir::Operand<'tcx>,
347 dest: &PlaceTy<'tcx, M::Provenance>,
348 ) -> InterpResult<'tcx> {
349 let src = self.eval_operand(operand, None)?;
350 assert!(src.layout.is_sized());
351 let dest = self.force_allocation(&dest)?;
352 let length = dest.len(self)?;
353
354 if length == 0 {
355 self.get_place_alloc_mut(&dest)?;
357 } else {
358 let first = self.project_index(&dest, 0)?;
360 self.copy_op(&src, &first)?;
361
362 let elem_size = first.layout.size;
366 let first_ptr = first.ptr();
367 let rest_ptr = first_ptr.wrapping_offset(elem_size, self);
368 self.mem_copy_repeatedly(
370 first_ptr,
371 rest_ptr,
372 elem_size,
373 length - 1,
374 true,
375 )?;
376 }
377
378 interp_ok(())
379 }
380
381 fn eval_fn_call_argument(
383 &self,
384 op: &mir::Operand<'tcx>,
385 ) -> InterpResult<'tcx, FnArg<'tcx, M::Provenance>> {
386 interp_ok(match op {
387 mir::Operand::Copy(_) | mir::Operand::Constant(_) => {
388 let op = self.eval_operand(op, None)?;
390 FnArg::Copy(op)
391 }
392 mir::Operand::Move(place) => {
393 let place = self.eval_place(*place)?;
398 let op = self.place_to_op(&place)?;
399
400 match op.as_mplace_or_imm() {
401 Either::Left(mplace) => FnArg::InPlace(mplace),
402 Either::Right(_imm) => {
403 FnArg::Copy(op)
410 }
411 }
412 }
413 })
414 }
415
416 fn eval_callee_and_args(
419 &self,
420 terminator: &mir::Terminator<'tcx>,
421 func: &mir::Operand<'tcx>,
422 args: &[Spanned<mir::Operand<'tcx>>],
423 ) -> InterpResult<'tcx, EvaluatedCalleeAndArgs<'tcx, M>> {
424 let func = self.eval_operand(func, None)?;
425 let args = args
426 .iter()
427 .map(|arg| self.eval_fn_call_argument(&arg.node))
428 .collect::<InterpResult<'tcx, Vec<_>>>()?;
429
430 let fn_sig_binder = func.layout.ty.fn_sig(*self.tcx);
431 let fn_sig = self.tcx.normalize_erasing_late_bound_regions(self.typing_env, fn_sig_binder);
432 let extra_args = &args[fn_sig.inputs().len()..];
433 let extra_args =
434 self.tcx.mk_type_list_from_iter(extra_args.iter().map(|arg| arg.layout().ty));
435
436 let (callee, fn_abi, with_caller_location) = match *func.layout.ty.kind() {
437 ty::FnPtr(..) => {
438 let fn_ptr = self.read_pointer(&func)?;
439 let fn_val = self.get_ptr_fn(fn_ptr)?;
440 (fn_val, self.fn_abi_of_fn_ptr(fn_sig_binder, extra_args)?, false)
441 }
442 ty::FnDef(def_id, args) => {
443 let instance = self.resolve(def_id, args)?;
444 (
445 FnVal::Instance(instance),
446 self.fn_abi_of_instance(instance, extra_args)?,
447 instance.def.requires_caller_location(*self.tcx),
448 )
449 }
450 _ => {
451 span_bug!(terminator.source_info.span, "invalid callee of type {}", func.layout.ty)
452 }
453 };
454
455 interp_ok(EvaluatedCalleeAndArgs { callee, args, fn_sig, fn_abi, with_caller_location })
456 }
457
458 fn eval_terminator(&mut self, terminator: &mir::Terminator<'tcx>) -> InterpResult<'tcx> {
459 info!("{:?}", terminator.kind);
460
461 use rustc_middle::mir::TerminatorKind::*;
462 match terminator.kind {
463 Return => {
464 self.return_from_current_stack_frame(false)?
465 }
466
467 Goto { target } => self.go_to_block(target),
468
469 SwitchInt { ref discr, ref targets } => {
470 let discr = self.read_immediate(&self.eval_operand(discr, None)?)?;
471 trace!("SwitchInt({:?})", *discr);
472
473 let mut target_block = targets.otherwise();
475
476 for (const_int, target) in targets.iter() {
477 let res = self.binary_op(
480 mir::BinOp::Eq,
481 &discr,
482 &ImmTy::from_uint(const_int, discr.layout),
483 )?;
484 if res.to_scalar().to_bool()? {
485 target_block = target;
486 break;
487 }
488 }
489
490 self.go_to_block(target_block);
491 }
492
493 Call {
494 ref func,
495 ref args,
496 destination,
497 target,
498 unwind,
499 call_source: _,
500 fn_span: _,
501 } => {
502 let old_stack = self.frame_idx();
503 let old_loc = self.frame().loc;
504
505 let EvaluatedCalleeAndArgs { callee, args, fn_sig, fn_abi, with_caller_location } =
506 self.eval_callee_and_args(terminator, func, args)?;
507
508 let destination = self.eval_place(destination)?;
509 self.init_fn_call(
510 callee,
511 (fn_sig.abi, fn_abi),
512 &args,
513 with_caller_location,
514 &destination,
515 target,
516 if fn_abi.can_unwind { unwind } else { mir::UnwindAction::Unreachable },
517 )?;
518 if self.frame_idx() == old_stack && self.frame().loc == old_loc {
521 span_bug!(terminator.source_info.span, "evaluating this call made no progress");
522 }
523 }
524
525 TailCall { ref func, ref args, fn_span: _ } => {
526 let old_frame_idx = self.frame_idx();
527
528 let EvaluatedCalleeAndArgs { callee, args, fn_sig, fn_abi, with_caller_location } =
529 self.eval_callee_and_args(terminator, func, args)?;
530
531 self.init_fn_tail_call(callee, (fn_sig.abi, fn_abi), &args, with_caller_location)?;
532
533 if self.frame_idx() != old_frame_idx {
534 span_bug!(
535 terminator.source_info.span,
536 "evaluating this tail call pushed a new stack frame"
537 );
538 }
539 }
540
541 Drop { place, target, unwind, replace: _, drop, async_fut } => {
542 assert!(
543 async_fut.is_none() && drop.is_none(),
544 "Async Drop must be expanded or reset to sync in runtime MIR"
545 );
546 let place = self.eval_place(place)?;
547 let instance = Instance::resolve_drop_in_place(*self.tcx, place.layout.ty);
548 if let ty::InstanceKind::DropGlue(_, None) = instance.def {
549 self.go_to_block(target);
554 return interp_ok(());
555 }
556 trace!("TerminatorKind::drop: {:?}, type {}", place, place.layout.ty);
557 self.init_drop_in_place_call(&place, instance, target, unwind)?;
558 }
559
560 Assert { ref cond, expected, ref msg, target, unwind } => {
561 let ignored =
562 M::ignore_optional_overflow_checks(self) && msg.is_optional_overflow_check();
563 let cond_val = self.read_scalar(&self.eval_operand(cond, None)?)?.to_bool()?;
564 if ignored || expected == cond_val {
565 self.go_to_block(target);
566 } else {
567 M::assert_panic(self, msg, unwind)?;
568 }
569 }
570
571 UnwindTerminate(reason) => {
572 M::unwind_terminate(self, reason)?;
573 }
574
575 UnwindResume => {
579 trace!("unwinding: resuming from cleanup");
580 self.return_from_current_stack_frame(true)?;
583 return interp_ok(());
584 }
585
586 Unreachable => throw_ub!(Unreachable),
588
589 FalseEdge { .. } | FalseUnwind { .. } | Yield { .. } | CoroutineDrop => span_bug!(
591 terminator.source_info.span,
592 "{:#?} should have been eliminated by MIR pass",
593 terminator.kind
594 ),
595
596 InlineAsm { .. } => {
597 throw_unsup_format!("inline assembly is not supported");
598 }
599 }
600
601 interp_ok(())
602 }
603}