rustc_mir_build/builder/scope.rs
1/*!
2Managing the scope stack. The scopes are tied to lexical scopes, so as
3we descend the THIR, we push a scope on the stack, build its
4contents, and then pop it off. Every scope is named by a
5`region::Scope`.
6
7### SEME Regions
8
9When pushing a new [Scope], we record the current point in the graph (a
10basic block); this marks the entry to the scope. We then generate more
11stuff in the control-flow graph. Whenever the scope is exited, either
12via a `break` or `return` or just by fallthrough, that marks an exit
13from the scope. Each lexical scope thus corresponds to a single-entry,
14multiple-exit (SEME) region in the control-flow graph.
15
16For now, we record the `region::Scope` to each SEME region for later reference
17(see caveat in next paragraph). This is because destruction scopes are tied to
18them. This may change in the future so that MIR lowering determines its own
19destruction scopes.
20
21### Not so SEME Regions
22
23In the course of building matches, it sometimes happens that certain code
24(namely guards) gets executed multiple times. This means that the scope lexical
25scope may in fact correspond to multiple, disjoint SEME regions. So in fact our
26mapping is from one scope to a vector of SEME regions. Since the SEME regions
27are disjoint, the mapping is still one-to-one for the set of SEME regions that
28we're currently in.
29
30Also in matches, the scopes assigned to arms are not always even SEME regions!
31Each arm has a single region with one entry for each pattern. We manually
32manipulate the scheduled drops in this scope to avoid dropping things multiple
33times.
34
35### Drops
36
37The primary purpose for scopes is to insert drops: while building
38the contents, we also accumulate places that need to be dropped upon
39exit from each scope. This is done by calling `schedule_drop`. Once a
40drop is scheduled, whenever we branch out we will insert drops of all
41those places onto the outgoing edge. Note that we don't know the full
42set of scheduled drops up front, and so whenever we exit from the
43scope we only drop the values scheduled thus far. For example, consider
44the scope S corresponding to this loop:
45
46```
47# let cond = true;
48loop {
49 let x = ..;
50 if cond { break; }
51 let y = ..;
52}
53```
54
55When processing the `let x`, we will add one drop to the scope for
56`x`. The break will then insert a drop for `x`. When we process `let
57y`, we will add another drop (in fact, to a subscope, but let's ignore
58that for now); any later drops would also drop `y`.
59
60### Early exit
61
62There are numerous "normal" ways to early exit a scope: `break`,
63`continue`, `return` (panics are handled separately). Whenever an
64early exit occurs, the method `break_scope` is called. It is given the
65current point in execution where the early exit occurs, as well as the
66scope you want to branch to (note that all early exits from to some
67other enclosing scope). `break_scope` will record the set of drops currently
68scheduled in a [DropTree]. Later, before `in_breakable_scope` exits, the drops
69will be added to the CFG.
70
71Panics are handled in a similar fashion, except that the drops are added to the
72MIR once the rest of the function has finished being lowered. If a terminator
73can panic, call `diverge_from(block)` with the block containing the terminator
74`block`.
75
76### Breakable scopes
77
78In addition to the normal scope stack, we track a loop scope stack
79that contains only loops and breakable blocks. It tracks where a `break`,
80`continue` or `return` should go to.
81
82*/
83
84use std::mem;
85
86use interpret::ErrorHandled;
87use rustc_data_structures::fx::FxHashMap;
88use rustc_hir::HirId;
89use rustc_index::{IndexSlice, IndexVec};
90use rustc_middle::middle::region;
91use rustc_middle::mir::{self, *};
92use rustc_middle::thir::{AdtExpr, AdtExprBase, ArmId, ExprId, ExprKind, LintLevel};
93use rustc_middle::ty::{self, Ty, TyCtxt, TypeVisitableExt, ValTree};
94use rustc_middle::{bug, span_bug};
95use rustc_pattern_analysis::rustc::RustcPatCtxt;
96use rustc_session::lint::Level;
97use rustc_span::source_map::Spanned;
98use rustc_span::{DUMMY_SP, Span};
99use tracing::{debug, instrument};
100
101use super::matches::BuiltMatchTree;
102use crate::builder::{BlockAnd, BlockAndExtension, BlockFrame, Builder, CFG};
103use crate::errors::{ConstContinueBadConst, ConstContinueUnknownJumpTarget};
104
105#[derive(Debug)]
106pub(crate) struct Scopes<'tcx> {
107 scopes: Vec<Scope>,
108
109 /// The current set of breakable scopes. See module comment for more details.
110 breakable_scopes: Vec<BreakableScope<'tcx>>,
111
112 const_continuable_scopes: Vec<ConstContinuableScope<'tcx>>,
113
114 /// The scope of the innermost if-then currently being lowered.
115 if_then_scope: Option<IfThenScope>,
116
117 /// Drops that need to be done on unwind paths. See the comment on
118 /// [DropTree] for more details.
119 unwind_drops: DropTree,
120
121 /// Drops that need to be done on paths to the `CoroutineDrop` terminator.
122 coroutine_drops: DropTree,
123}
124
125#[derive(Debug)]
126struct Scope {
127 /// The source scope this scope was created in.
128 source_scope: SourceScope,
129
130 /// the region span of this scope within source code.
131 region_scope: region::Scope,
132
133 /// set of places to drop when exiting this scope. This starts
134 /// out empty but grows as variables are declared during the
135 /// building process. This is a stack, so we always drop from the
136 /// end of the vector (top of the stack) first.
137 drops: Vec<DropData>,
138
139 moved_locals: Vec<Local>,
140
141 /// The drop index that will drop everything in and below this scope on an
142 /// unwind path.
143 cached_unwind_block: Option<DropIdx>,
144
145 /// The drop index that will drop everything in and below this scope on a
146 /// coroutine drop path.
147 cached_coroutine_drop_block: Option<DropIdx>,
148}
149
150#[derive(Clone, Copy, Debug)]
151struct DropData {
152 /// The `Span` where drop obligation was incurred (typically where place was
153 /// declared)
154 source_info: SourceInfo,
155
156 /// local to drop
157 local: Local,
158
159 /// Whether this is a value Drop or a StorageDead.
160 kind: DropKind,
161}
162
163#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
164pub(crate) enum DropKind {
165 Value,
166 Storage,
167 ForLint,
168}
169
170#[derive(Debug)]
171struct BreakableScope<'tcx> {
172 /// Region scope of the loop
173 region_scope: region::Scope,
174 /// The destination of the loop/block expression itself (i.e., where to put
175 /// the result of a `break` or `return` expression)
176 break_destination: Place<'tcx>,
177 /// Drops that happen on the `break`/`return` path.
178 break_drops: DropTree,
179 /// Drops that happen on the `continue` path.
180 continue_drops: Option<DropTree>,
181}
182
183#[derive(Debug)]
184struct ConstContinuableScope<'tcx> {
185 /// The scope for the `#[loop_match]` which its `#[const_continue]`s will jump to.
186 region_scope: region::Scope,
187 /// The place of the state of a `#[loop_match]`, which a `#[const_continue]` must update.
188 state_place: Place<'tcx>,
189
190 arms: Box<[ArmId]>,
191 built_match_tree: BuiltMatchTree<'tcx>,
192
193 /// Drops that happen on a `#[const_continue]`
194 const_continue_drops: DropTree,
195}
196
197#[derive(Debug)]
198struct IfThenScope {
199 /// The if-then scope or arm scope
200 region_scope: region::Scope,
201 /// Drops that happen on the `else` path.
202 else_drops: DropTree,
203}
204
205/// The target of an expression that breaks out of a scope
206#[derive(Clone, Copy, Debug)]
207pub(crate) enum BreakableTarget {
208 Continue(region::Scope),
209 Break(region::Scope),
210 Return,
211}
212
213rustc_index::newtype_index! {
214 #[orderable]
215 struct DropIdx {}
216}
217
218const ROOT_NODE: DropIdx = DropIdx::ZERO;
219
220/// A tree of drops that we have deferred lowering. It's used for:
221///
222/// * Drops on unwind paths
223/// * Drops on coroutine drop paths (when a suspended coroutine is dropped)
224/// * Drops on return and loop exit paths
225/// * Drops on the else path in an `if let` chain
226///
227/// Once no more nodes could be added to the tree, we lower it to MIR in one go
228/// in `build_mir`.
229#[derive(Debug)]
230struct DropTree {
231 /// Nodes in the drop tree, containing drop data and a link to the next node.
232 drop_nodes: IndexVec<DropIdx, DropNode>,
233 /// Map for finding the index of an existing node, given its contents.
234 existing_drops_map: FxHashMap<DropNodeKey, DropIdx>,
235 /// Edges into the `DropTree` that need to be added once it's lowered.
236 entry_points: Vec<(DropIdx, BasicBlock)>,
237}
238
239/// A single node in the drop tree.
240#[derive(Debug)]
241struct DropNode {
242 /// Info about the drop to be performed at this node in the drop tree.
243 data: DropData,
244 /// Index of the "next" drop to perform (in drop order, not declaration order).
245 next: DropIdx,
246}
247
248/// Subset of [`DropNode`] used for reverse lookup in a hash table.
249#[derive(Debug, PartialEq, Eq, Hash)]
250struct DropNodeKey {
251 next: DropIdx,
252 local: Local,
253}
254
255impl Scope {
256 /// Whether there's anything to do for the cleanup path, that is,
257 /// when unwinding through this scope. This includes destructors,
258 /// but not StorageDead statements, which don't get emitted at all
259 /// for unwinding, for several reasons:
260 /// * clang doesn't emit llvm.lifetime.end for C++ unwinding
261 /// * LLVM's memory dependency analysis can't handle it atm
262 /// * polluting the cleanup MIR with StorageDead creates
263 /// landing pads even though there's no actual destructors
264 /// * freeing up stack space has no effect during unwinding
265 /// Note that for coroutines we do emit StorageDeads, for the
266 /// use of optimizations in the MIR coroutine transform.
267 fn needs_cleanup(&self) -> bool {
268 self.drops.iter().any(|drop| match drop.kind {
269 DropKind::Value | DropKind::ForLint => true,
270 DropKind::Storage => false,
271 })
272 }
273
274 fn invalidate_cache(&mut self) {
275 self.cached_unwind_block = None;
276 self.cached_coroutine_drop_block = None;
277 }
278}
279
280/// A trait that determined how [DropTree] creates its blocks and
281/// links to any entry nodes.
282trait DropTreeBuilder<'tcx> {
283 /// Create a new block for the tree. This should call either
284 /// `cfg.start_new_block()` or `cfg.start_new_cleanup_block()`.
285 fn make_block(cfg: &mut CFG<'tcx>) -> BasicBlock;
286
287 /// Links a block outside the drop tree, `from`, to the block `to` inside
288 /// the drop tree.
289 fn link_entry_point(cfg: &mut CFG<'tcx>, from: BasicBlock, to: BasicBlock);
290}
291
292impl DropTree {
293 fn new() -> Self {
294 // The root node of the tree doesn't represent a drop, but instead
295 // represents the block in the tree that should be jumped to once all
296 // of the required drops have been performed.
297 let fake_source_info = SourceInfo::outermost(DUMMY_SP);
298 let fake_data =
299 DropData { source_info: fake_source_info, local: Local::MAX, kind: DropKind::Storage };
300 let drop_nodes = IndexVec::from_raw(vec![DropNode { data: fake_data, next: DropIdx::MAX }]);
301 Self { drop_nodes, entry_points: Vec::new(), existing_drops_map: FxHashMap::default() }
302 }
303
304 /// Adds a node to the drop tree, consisting of drop data and the index of
305 /// the "next" drop (in drop order), which could be the sentinel [`ROOT_NODE`].
306 ///
307 /// If there is already an equivalent node in the tree, nothing is added, and
308 /// that node's index is returned. Otherwise, the new node's index is returned.
309 fn add_drop(&mut self, data: DropData, next: DropIdx) -> DropIdx {
310 let drop_nodes = &mut self.drop_nodes;
311 *self
312 .existing_drops_map
313 .entry(DropNodeKey { next, local: data.local })
314 // Create a new node, and also add its index to the map.
315 .or_insert_with(|| drop_nodes.push(DropNode { data, next }))
316 }
317
318 /// Registers `from` as an entry point to this drop tree, at `to`.
319 ///
320 /// During [`Self::build_mir`], `from` will be linked to the corresponding
321 /// block within the drop tree.
322 fn add_entry_point(&mut self, from: BasicBlock, to: DropIdx) {
323 debug_assert!(to < self.drop_nodes.next_index());
324 self.entry_points.push((to, from));
325 }
326
327 /// Builds the MIR for a given drop tree.
328 fn build_mir<'tcx, T: DropTreeBuilder<'tcx>>(
329 &mut self,
330 cfg: &mut CFG<'tcx>,
331 root_node: Option<BasicBlock>,
332 ) -> IndexVec<DropIdx, Option<BasicBlock>> {
333 debug!("DropTree::build_mir(drops = {:#?})", self);
334
335 let mut blocks = self.assign_blocks::<T>(cfg, root_node);
336 self.link_blocks(cfg, &mut blocks);
337
338 blocks
339 }
340
341 /// Assign blocks for all of the drops in the drop tree that need them.
342 fn assign_blocks<'tcx, T: DropTreeBuilder<'tcx>>(
343 &mut self,
344 cfg: &mut CFG<'tcx>,
345 root_node: Option<BasicBlock>,
346 ) -> IndexVec<DropIdx, Option<BasicBlock>> {
347 // StorageDead statements can share blocks with each other and also with
348 // a Drop terminator. We iterate through the drops to find which drops
349 // need their own block.
350 #[derive(Clone, Copy)]
351 enum Block {
352 // This drop is unreachable
353 None,
354 // This drop is only reachable through the `StorageDead` with the
355 // specified index.
356 Shares(DropIdx),
357 // This drop has more than one way of being reached, or it is
358 // branched to from outside the tree, or its predecessor is a
359 // `Value` drop.
360 Own,
361 }
362
363 let mut blocks = IndexVec::from_elem(None, &self.drop_nodes);
364 blocks[ROOT_NODE] = root_node;
365
366 let mut needs_block = IndexVec::from_elem(Block::None, &self.drop_nodes);
367 if root_node.is_some() {
368 // In some cases (such as drops for `continue`) the root node
369 // already has a block. In this case, make sure that we don't
370 // override it.
371 needs_block[ROOT_NODE] = Block::Own;
372 }
373
374 // Sort so that we only need to check the last value.
375 let entry_points = &mut self.entry_points;
376 entry_points.sort();
377
378 for (drop_idx, drop_node) in self.drop_nodes.iter_enumerated().rev() {
379 if entry_points.last().is_some_and(|entry_point| entry_point.0 == drop_idx) {
380 let block = *blocks[drop_idx].get_or_insert_with(|| T::make_block(cfg));
381 needs_block[drop_idx] = Block::Own;
382 while entry_points.last().is_some_and(|entry_point| entry_point.0 == drop_idx) {
383 let entry_block = entry_points.pop().unwrap().1;
384 T::link_entry_point(cfg, entry_block, block);
385 }
386 }
387 match needs_block[drop_idx] {
388 Block::None => continue,
389 Block::Own => {
390 blocks[drop_idx].get_or_insert_with(|| T::make_block(cfg));
391 }
392 Block::Shares(pred) => {
393 blocks[drop_idx] = blocks[pred];
394 }
395 }
396 if let DropKind::Value = drop_node.data.kind {
397 needs_block[drop_node.next] = Block::Own;
398 } else if drop_idx != ROOT_NODE {
399 match &mut needs_block[drop_node.next] {
400 pred @ Block::None => *pred = Block::Shares(drop_idx),
401 pred @ Block::Shares(_) => *pred = Block::Own,
402 Block::Own => (),
403 }
404 }
405 }
406
407 debug!("assign_blocks: blocks = {:#?}", blocks);
408 assert!(entry_points.is_empty());
409
410 blocks
411 }
412
413 fn link_blocks<'tcx>(
414 &self,
415 cfg: &mut CFG<'tcx>,
416 blocks: &IndexSlice<DropIdx, Option<BasicBlock>>,
417 ) {
418 for (drop_idx, drop_node) in self.drop_nodes.iter_enumerated().rev() {
419 let Some(block) = blocks[drop_idx] else { continue };
420 match drop_node.data.kind {
421 DropKind::Value => {
422 let terminator = TerminatorKind::Drop {
423 target: blocks[drop_node.next].unwrap(),
424 // The caller will handle this if needed.
425 unwind: UnwindAction::Terminate(UnwindTerminateReason::InCleanup),
426 place: drop_node.data.local.into(),
427 replace: false,
428 drop: None,
429 async_fut: None,
430 };
431 cfg.terminate(block, drop_node.data.source_info, terminator);
432 }
433 DropKind::ForLint => {
434 let stmt = Statement::new(
435 drop_node.data.source_info,
436 StatementKind::BackwardIncompatibleDropHint {
437 place: Box::new(drop_node.data.local.into()),
438 reason: BackwardIncompatibleDropReason::Edition2024,
439 },
440 );
441 cfg.push(block, stmt);
442 let target = blocks[drop_node.next].unwrap();
443 if target != block {
444 // Diagnostics don't use this `Span` but debuginfo
445 // might. Since we don't want breakpoints to be placed
446 // here, especially when this is on an unwind path, we
447 // use `DUMMY_SP`.
448 let source_info =
449 SourceInfo { span: DUMMY_SP, ..drop_node.data.source_info };
450 let terminator = TerminatorKind::Goto { target };
451 cfg.terminate(block, source_info, terminator);
452 }
453 }
454 // Root nodes don't correspond to a drop.
455 DropKind::Storage if drop_idx == ROOT_NODE => {}
456 DropKind::Storage => {
457 let stmt = Statement::new(
458 drop_node.data.source_info,
459 StatementKind::StorageDead(drop_node.data.local),
460 );
461 cfg.push(block, stmt);
462 let target = blocks[drop_node.next].unwrap();
463 if target != block {
464 // Diagnostics don't use this `Span` but debuginfo
465 // might. Since we don't want breakpoints to be placed
466 // here, especially when this is on an unwind path, we
467 // use `DUMMY_SP`.
468 let source_info =
469 SourceInfo { span: DUMMY_SP, ..drop_node.data.source_info };
470 let terminator = TerminatorKind::Goto { target };
471 cfg.terminate(block, source_info, terminator);
472 }
473 }
474 }
475 }
476 }
477}
478
479impl<'tcx> Scopes<'tcx> {
480 pub(crate) fn new() -> Self {
481 Self {
482 scopes: Vec::new(),
483 breakable_scopes: Vec::new(),
484 const_continuable_scopes: Vec::new(),
485 if_then_scope: None,
486 unwind_drops: DropTree::new(),
487 coroutine_drops: DropTree::new(),
488 }
489 }
490
491 fn push_scope(&mut self, region_scope: (region::Scope, SourceInfo), vis_scope: SourceScope) {
492 debug!("push_scope({:?})", region_scope);
493 self.scopes.push(Scope {
494 source_scope: vis_scope,
495 region_scope: region_scope.0,
496 drops: vec![],
497 moved_locals: vec![],
498 cached_unwind_block: None,
499 cached_coroutine_drop_block: None,
500 });
501 }
502
503 fn pop_scope(&mut self, region_scope: (region::Scope, SourceInfo)) -> Scope {
504 let scope = self.scopes.pop().unwrap();
505 assert_eq!(scope.region_scope, region_scope.0);
506 scope
507 }
508
509 fn scope_index(&self, region_scope: region::Scope, span: Span) -> usize {
510 self.scopes
511 .iter()
512 .rposition(|scope| scope.region_scope == region_scope)
513 .unwrap_or_else(|| span_bug!(span, "region_scope {:?} does not enclose", region_scope))
514 }
515
516 /// Returns the topmost active scope, which is known to be alive until
517 /// the next scope expression.
518 fn topmost(&self) -> region::Scope {
519 self.scopes.last().expect("topmost_scope: no scopes present").region_scope
520 }
521}
522
523impl<'a, 'tcx> Builder<'a, 'tcx> {
524 // Adding and removing scopes
525 // ==========================
526
527 /// Start a breakable scope, which tracks where `continue`, `break` and
528 /// `return` should branch to.
529 pub(crate) fn in_breakable_scope<F>(
530 &mut self,
531 loop_block: Option<BasicBlock>,
532 break_destination: Place<'tcx>,
533 span: Span,
534 f: F,
535 ) -> BlockAnd<()>
536 where
537 F: FnOnce(&mut Builder<'a, 'tcx>) -> Option<BlockAnd<()>>,
538 {
539 let region_scope = self.scopes.topmost();
540 let scope = BreakableScope {
541 region_scope,
542 break_destination,
543 break_drops: DropTree::new(),
544 continue_drops: loop_block.map(|_| DropTree::new()),
545 };
546 self.scopes.breakable_scopes.push(scope);
547 let normal_exit_block = f(self);
548 let breakable_scope = self.scopes.breakable_scopes.pop().unwrap();
549 assert!(breakable_scope.region_scope == region_scope);
550 let break_block =
551 self.build_exit_tree(breakable_scope.break_drops, region_scope, span, None);
552 if let Some(drops) = breakable_scope.continue_drops {
553 self.build_exit_tree(drops, region_scope, span, loop_block);
554 }
555 match (normal_exit_block, break_block) {
556 (Some(block), None) | (None, Some(block)) => block,
557 (None, None) => self.cfg.start_new_block().unit(),
558 (Some(normal_block), Some(exit_block)) => {
559 let target = self.cfg.start_new_block();
560 let source_info = self.source_info(span);
561 self.cfg.terminate(
562 normal_block.into_block(),
563 source_info,
564 TerminatorKind::Goto { target },
565 );
566 self.cfg.terminate(
567 exit_block.into_block(),
568 source_info,
569 TerminatorKind::Goto { target },
570 );
571 target.unit()
572 }
573 }
574 }
575
576 /// Start a const-continuable scope, which tracks where `#[const_continue] break` should
577 /// branch to.
578 pub(crate) fn in_const_continuable_scope<F>(
579 &mut self,
580 arms: Box<[ArmId]>,
581 built_match_tree: BuiltMatchTree<'tcx>,
582 state_place: Place<'tcx>,
583 span: Span,
584 f: F,
585 ) -> BlockAnd<()>
586 where
587 F: FnOnce(&mut Builder<'a, 'tcx>) -> BlockAnd<()>,
588 {
589 let region_scope = self.scopes.topmost();
590 let scope = ConstContinuableScope {
591 region_scope,
592 state_place,
593 const_continue_drops: DropTree::new(),
594 arms,
595 built_match_tree,
596 };
597 self.scopes.const_continuable_scopes.push(scope);
598 let normal_exit_block = f(self);
599 let const_continue_scope = self.scopes.const_continuable_scopes.pop().unwrap();
600 assert!(const_continue_scope.region_scope == region_scope);
601
602 let break_block = self.build_exit_tree(
603 const_continue_scope.const_continue_drops,
604 region_scope,
605 span,
606 None,
607 );
608
609 match (normal_exit_block, break_block) {
610 (block, None) => block,
611 (normal_block, Some(exit_block)) => {
612 let target = self.cfg.start_new_block();
613 let source_info = self.source_info(span);
614 self.cfg.terminate(
615 normal_block.into_block(),
616 source_info,
617 TerminatorKind::Goto { target },
618 );
619 self.cfg.terminate(
620 exit_block.into_block(),
621 source_info,
622 TerminatorKind::Goto { target },
623 );
624 target.unit()
625 }
626 }
627 }
628
629 /// Start an if-then scope which tracks drop for `if` expressions and `if`
630 /// guards.
631 ///
632 /// For an if-let chain:
633 ///
634 /// if let Some(x) = a && let Some(y) = b && let Some(z) = c { ... }
635 ///
636 /// There are three possible ways the condition can be false and we may have
637 /// to drop `x`, `x` and `y`, or neither depending on which binding fails.
638 /// To handle this correctly we use a `DropTree` in a similar way to a
639 /// `loop` expression and 'break' out on all of the 'else' paths.
640 ///
641 /// Notes:
642 /// - We don't need to keep a stack of scopes in the `Builder` because the
643 /// 'else' paths will only leave the innermost scope.
644 /// - This is also used for match guards.
645 pub(crate) fn in_if_then_scope<F>(
646 &mut self,
647 region_scope: region::Scope,
648 span: Span,
649 f: F,
650 ) -> (BasicBlock, BasicBlock)
651 where
652 F: FnOnce(&mut Builder<'a, 'tcx>) -> BlockAnd<()>,
653 {
654 let scope = IfThenScope { region_scope, else_drops: DropTree::new() };
655 let previous_scope = mem::replace(&mut self.scopes.if_then_scope, Some(scope));
656
657 let then_block = f(self).into_block();
658
659 let if_then_scope = mem::replace(&mut self.scopes.if_then_scope, previous_scope).unwrap();
660 assert!(if_then_scope.region_scope == region_scope);
661
662 let else_block =
663 self.build_exit_tree(if_then_scope.else_drops, region_scope, span, None).map_or_else(
664 || self.cfg.start_new_block(),
665 |else_block_and| else_block_and.into_block(),
666 );
667
668 (then_block, else_block)
669 }
670
671 /// Convenience wrapper that pushes a scope and then executes `f`
672 /// to build its contents, popping the scope afterwards.
673 #[instrument(skip(self, f), level = "debug")]
674 pub(crate) fn in_scope<F, R>(
675 &mut self,
676 region_scope: (region::Scope, SourceInfo),
677 lint_level: LintLevel,
678 f: F,
679 ) -> BlockAnd<R>
680 where
681 F: FnOnce(&mut Builder<'a, 'tcx>) -> BlockAnd<R>,
682 {
683 let source_scope = self.source_scope;
684 if let LintLevel::Explicit(current_hir_id) = lint_level {
685 let parent_id =
686 self.source_scopes[source_scope].local_data.as_ref().unwrap_crate_local().lint_root;
687 self.maybe_new_source_scope(region_scope.1.span, current_hir_id, parent_id);
688 }
689 self.push_scope(region_scope);
690 let mut block;
691 let rv = unpack!(block = f(self));
692 block = self.pop_scope(region_scope, block).into_block();
693 self.source_scope = source_scope;
694 debug!(?block);
695 block.and(rv)
696 }
697
698 /// Push a scope onto the stack. You can then build code in this
699 /// scope and call `pop_scope` afterwards. Note that these two
700 /// calls must be paired; using `in_scope` as a convenience
701 /// wrapper maybe preferable.
702 pub(crate) fn push_scope(&mut self, region_scope: (region::Scope, SourceInfo)) {
703 self.scopes.push_scope(region_scope, self.source_scope);
704 }
705
706 /// Pops a scope, which should have region scope `region_scope`,
707 /// adding any drops onto the end of `block` that are needed.
708 /// This must match 1-to-1 with `push_scope`.
709 pub(crate) fn pop_scope(
710 &mut self,
711 region_scope: (region::Scope, SourceInfo),
712 mut block: BasicBlock,
713 ) -> BlockAnd<()> {
714 debug!("pop_scope({:?}, {:?})", region_scope, block);
715
716 block = self.leave_top_scope(block);
717
718 self.scopes.pop_scope(region_scope);
719
720 block.unit()
721 }
722
723 /// Sets up the drops for breaking from `block` to `target`.
724 pub(crate) fn break_scope(
725 &mut self,
726 mut block: BasicBlock,
727 value: Option<ExprId>,
728 target: BreakableTarget,
729 source_info: SourceInfo,
730 ) -> BlockAnd<()> {
731 let span = source_info.span;
732
733 let get_scope_index = |scope: region::Scope| {
734 // find the loop-scope by its `region::Scope`.
735 self.scopes
736 .breakable_scopes
737 .iter()
738 .rposition(|breakable_scope| breakable_scope.region_scope == scope)
739 .unwrap_or_else(|| span_bug!(span, "no enclosing breakable scope found"))
740 };
741 let (break_index, destination) = match target {
742 BreakableTarget::Return => {
743 let scope = &self.scopes.breakable_scopes[0];
744 if scope.break_destination != Place::return_place() {
745 span_bug!(span, "`return` in item with no return scope");
746 }
747 (0, Some(scope.break_destination))
748 }
749 BreakableTarget::Break(scope) => {
750 let break_index = get_scope_index(scope);
751 let scope = &self.scopes.breakable_scopes[break_index];
752 (break_index, Some(scope.break_destination))
753 }
754 BreakableTarget::Continue(scope) => {
755 let break_index = get_scope_index(scope);
756 (break_index, None)
757 }
758 };
759
760 match (destination, value) {
761 (Some(destination), Some(value)) => {
762 debug!("stmt_expr Break val block_context.push(SubExpr)");
763 self.block_context.push(BlockFrame::SubExpr);
764 block = self.expr_into_dest(destination, block, value).into_block();
765 self.block_context.pop();
766 }
767 (Some(destination), None) => {
768 self.cfg.push_assign_unit(block, source_info, destination, self.tcx)
769 }
770 (None, Some(_)) => {
771 panic!("`return`, `become` and `break` with value and must have a destination")
772 }
773 (None, None) => {
774 if self.tcx.sess.instrument_coverage() {
775 // Normally we wouldn't build any MIR in this case, but that makes it
776 // harder for coverage instrumentation to extract a relevant span for
777 // `continue` expressions. So here we inject a dummy statement with the
778 // desired span.
779 self.cfg.push_coverage_span_marker(block, source_info);
780 }
781 }
782 }
783
784 let region_scope = self.scopes.breakable_scopes[break_index].region_scope;
785 let scope_index = self.scopes.scope_index(region_scope, span);
786 let drops = if destination.is_some() {
787 &mut self.scopes.breakable_scopes[break_index].break_drops
788 } else {
789 let Some(drops) = self.scopes.breakable_scopes[break_index].continue_drops.as_mut()
790 else {
791 self.tcx.dcx().span_delayed_bug(
792 source_info.span,
793 "unlabelled `continue` within labelled block",
794 );
795 self.cfg.terminate(block, source_info, TerminatorKind::Unreachable);
796
797 return self.cfg.start_new_block().unit();
798 };
799 drops
800 };
801
802 let mut drop_idx = ROOT_NODE;
803 for scope in &self.scopes.scopes[scope_index + 1..] {
804 for drop in &scope.drops {
805 drop_idx = drops.add_drop(*drop, drop_idx);
806 }
807 }
808 drops.add_entry_point(block, drop_idx);
809
810 // `build_drop_trees` doesn't have access to our source_info, so we
811 // create a dummy terminator now. `TerminatorKind::UnwindResume` is used
812 // because MIR type checking will panic if it hasn't been overwritten.
813 // (See `<ExitScopes as DropTreeBuilder>::link_entry_point`.)
814 self.cfg.terminate(block, source_info, TerminatorKind::UnwindResume);
815
816 self.cfg.start_new_block().unit()
817 }
818
819 /// Based on `FunctionCx::eval_unevaluated_mir_constant_to_valtree`.
820 fn eval_unevaluated_mir_constant_to_valtree(
821 &self,
822 constant: ConstOperand<'tcx>,
823 ) -> Result<(ty::ValTree<'tcx>, Ty<'tcx>), interpret::ErrorHandled> {
824 assert!(!constant.const_.ty().has_param());
825 let (uv, ty) = match constant.const_ {
826 mir::Const::Unevaluated(uv, ty) => (uv.shrink(), ty),
827 mir::Const::Ty(_, c) => match c.kind() {
828 // A constant that came from a const generic but was then used as an argument to
829 // old-style simd_shuffle (passing as argument instead of as a generic param).
830 ty::ConstKind::Value(cv) => return Ok((cv.valtree, cv.ty)),
831 other => span_bug!(constant.span, "{other:#?}"),
832 },
833 mir::Const::Val(mir::ConstValue::Scalar(mir::interpret::Scalar::Int(val)), ty) => {
834 return Ok((ValTree::from_scalar_int(self.tcx, val), ty));
835 }
836 // We should never encounter `Const::Val` unless MIR opts (like const prop) evaluate
837 // a constant and write that value back into `Operand`s. This could happen, but is
838 // unlikely. Also: all users of `simd_shuffle` are on unstable and already need to take
839 // a lot of care around intrinsics. For an issue to happen here, it would require a
840 // macro expanding to a `simd_shuffle` call without wrapping the constant argument in a
841 // `const {}` block, but the user pass through arbitrary expressions.
842
843 // FIXME(oli-obk): Replace the magic const generic argument of `simd_shuffle` with a
844 // real const generic, and get rid of this entire function.
845 other => span_bug!(constant.span, "{other:#?}"),
846 };
847
848 match self.tcx.const_eval_resolve_for_typeck(self.typing_env(), uv, constant.span) {
849 Ok(Ok(valtree)) => Ok((valtree, ty)),
850 Ok(Err(ty)) => span_bug!(constant.span, "could not convert {ty:?} to a valtree"),
851 Err(e) => Err(e),
852 }
853 }
854
855 /// Sets up the drops for jumping from `block` to `scope`.
856 pub(crate) fn break_const_continuable_scope(
857 &mut self,
858 mut block: BasicBlock,
859 value: ExprId,
860 scope: region::Scope,
861 source_info: SourceInfo,
862 ) -> BlockAnd<()> {
863 let span = source_info.span;
864
865 // A break can only break out of a scope, so the value should be a scope.
866 let rustc_middle::thir::ExprKind::Scope { value, .. } = self.thir[value].kind else {
867 span_bug!(span, "break value must be a scope")
868 };
869
870 let constant = match &self.thir[value].kind {
871 ExprKind::Adt(box AdtExpr { variant_index, fields, base, .. }) => {
872 assert!(matches!(base, AdtExprBase::None));
873 assert!(fields.is_empty());
874 ConstOperand {
875 span: self.thir[value].span,
876 user_ty: None,
877 const_: Const::Ty(
878 self.thir[value].ty,
879 ty::Const::new_value(
880 self.tcx,
881 ValTree::from_branches(
882 self.tcx,
883 [ValTree::from_scalar_int(self.tcx, variant_index.as_u32().into())],
884 ),
885 self.thir[value].ty,
886 ),
887 ),
888 }
889 }
890 _ => self.as_constant(&self.thir[value]),
891 };
892
893 let break_index = self
894 .scopes
895 .const_continuable_scopes
896 .iter()
897 .rposition(|const_continuable_scope| const_continuable_scope.region_scope == scope)
898 .unwrap_or_else(|| span_bug!(span, "no enclosing const-continuable scope found"));
899
900 let scope = &self.scopes.const_continuable_scopes[break_index];
901
902 let state_decl = &self.local_decls[scope.state_place.as_local().unwrap()];
903 let state_ty = state_decl.ty;
904 let (discriminant_ty, rvalue) = match state_ty.kind() {
905 ty::Adt(adt_def, _) if adt_def.is_enum() => {
906 (state_ty.discriminant_ty(self.tcx), Rvalue::Discriminant(scope.state_place))
907 }
908 ty::Uint(_) | ty::Int(_) | ty::Float(_) | ty::Bool | ty::Char => {
909 (state_ty, Rvalue::Use(Operand::Copy(scope.state_place)))
910 }
911 _ => span_bug!(state_decl.source_info.span, "unsupported #[loop_match] state"),
912 };
913
914 // The `PatCtxt` is normally used in pattern exhaustiveness checking, but reused
915 // here because it performs normalization and const evaluation.
916 let dropless_arena = rustc_arena::DroplessArena::default();
917 let typeck_results = self.tcx.typeck(self.def_id);
918 let cx = RustcPatCtxt {
919 tcx: self.tcx,
920 typeck_results,
921 module: self.tcx.parent_module(self.hir_id).to_def_id(),
922 // FIXME(#132279): We're in a body, should handle opaques.
923 typing_env: rustc_middle::ty::TypingEnv::non_body_analysis(self.tcx, self.def_id),
924 dropless_arena: &dropless_arena,
925 match_lint_level: self.hir_id,
926 whole_match_span: Some(rustc_span::Span::default()),
927 scrut_span: rustc_span::Span::default(),
928 refutable: true,
929 known_valid_scrutinee: true,
930 };
931
932 let valtree = match self.eval_unevaluated_mir_constant_to_valtree(constant) {
933 Ok((valtree, ty)) => {
934 // Defensively check that the type is monomorphic.
935 assert!(!ty.has_param());
936
937 valtree
938 }
939 Err(ErrorHandled::Reported(..)) => {
940 return block.unit();
941 }
942 Err(ErrorHandled::TooGeneric(_)) => {
943 self.tcx.dcx().emit_fatal(ConstContinueBadConst { span: constant.span });
944 }
945 };
946
947 let Some(real_target) =
948 self.static_pattern_match(&cx, valtree, &*scope.arms, &scope.built_match_tree)
949 else {
950 self.tcx.dcx().emit_fatal(ConstContinueUnknownJumpTarget { span })
951 };
952
953 self.block_context.push(BlockFrame::SubExpr);
954 let state_place = scope.state_place;
955 block = self.expr_into_dest(state_place, block, value).into_block();
956 self.block_context.pop();
957
958 let discr = self.temp(discriminant_ty, source_info.span);
959 let scope_index = self
960 .scopes
961 .scope_index(self.scopes.const_continuable_scopes[break_index].region_scope, span);
962 let scope = &mut self.scopes.const_continuable_scopes[break_index];
963 self.cfg.push_assign(block, source_info, discr, rvalue);
964 let drop_and_continue_block = self.cfg.start_new_block();
965 let imaginary_target = self.cfg.start_new_block();
966 self.cfg.terminate(
967 block,
968 source_info,
969 TerminatorKind::FalseEdge { real_target: drop_and_continue_block, imaginary_target },
970 );
971
972 let drops = &mut scope.const_continue_drops;
973
974 let drop_idx = self.scopes.scopes[scope_index + 1..]
975 .iter()
976 .flat_map(|scope| &scope.drops)
977 .fold(ROOT_NODE, |drop_idx, &drop| drops.add_drop(drop, drop_idx));
978
979 drops.add_entry_point(imaginary_target, drop_idx);
980
981 self.cfg.terminate(imaginary_target, source_info, TerminatorKind::UnwindResume);
982
983 let region_scope = scope.region_scope;
984 let scope_index = self.scopes.scope_index(region_scope, span);
985 let mut drops = DropTree::new();
986
987 let drop_idx = self.scopes.scopes[scope_index + 1..]
988 .iter()
989 .flat_map(|scope| &scope.drops)
990 .fold(ROOT_NODE, |drop_idx, &drop| drops.add_drop(drop, drop_idx));
991
992 drops.add_entry_point(drop_and_continue_block, drop_idx);
993
994 // `build_drop_trees` doesn't have access to our source_info, so we
995 // create a dummy terminator now. `TerminatorKind::UnwindResume` is used
996 // because MIR type checking will panic if it hasn't been overwritten.
997 // (See `<ExitScopes as DropTreeBuilder>::link_entry_point`.)
998 self.cfg.terminate(drop_and_continue_block, source_info, TerminatorKind::UnwindResume);
999
1000 self.build_exit_tree(drops, region_scope, span, Some(real_target));
1001
1002 return self.cfg.start_new_block().unit();
1003 }
1004
1005 /// Sets up the drops for breaking from `block` due to an `if` condition
1006 /// that turned out to be false.
1007 ///
1008 /// Must be called in the context of [`Builder::in_if_then_scope`], so that
1009 /// there is an if-then scope to tell us what the target scope is.
1010 pub(crate) fn break_for_else(&mut self, block: BasicBlock, source_info: SourceInfo) {
1011 let if_then_scope = self
1012 .scopes
1013 .if_then_scope
1014 .as_ref()
1015 .unwrap_or_else(|| span_bug!(source_info.span, "no if-then scope found"));
1016
1017 let target = if_then_scope.region_scope;
1018 let scope_index = self.scopes.scope_index(target, source_info.span);
1019
1020 // Upgrade `if_then_scope` to `&mut`.
1021 let if_then_scope = self.scopes.if_then_scope.as_mut().expect("upgrading & to &mut");
1022
1023 let mut drop_idx = ROOT_NODE;
1024 let drops = &mut if_then_scope.else_drops;
1025 for scope in &self.scopes.scopes[scope_index + 1..] {
1026 for drop in &scope.drops {
1027 drop_idx = drops.add_drop(*drop, drop_idx);
1028 }
1029 }
1030 drops.add_entry_point(block, drop_idx);
1031
1032 // `build_drop_trees` doesn't have access to our source_info, so we
1033 // create a dummy terminator now. `TerminatorKind::UnwindResume` is used
1034 // because MIR type checking will panic if it hasn't been overwritten.
1035 // (See `<ExitScopes as DropTreeBuilder>::link_entry_point`.)
1036 self.cfg.terminate(block, source_info, TerminatorKind::UnwindResume);
1037 }
1038
1039 /// Sets up the drops for explicit tail calls.
1040 ///
1041 /// Unlike other kinds of early exits, tail calls do not go through the drop tree.
1042 /// Instead, all scheduled drops are immediately added to the CFG.
1043 pub(crate) fn break_for_tail_call(
1044 &mut self,
1045 mut block: BasicBlock,
1046 args: &[Spanned<Operand<'tcx>>],
1047 source_info: SourceInfo,
1048 ) -> BlockAnd<()> {
1049 let arg_drops: Vec<_> = args
1050 .iter()
1051 .rev()
1052 .filter_map(|arg| match &arg.node {
1053 Operand::Copy(_) => bug!("copy op in tail call args"),
1054 Operand::Move(place) => {
1055 let local =
1056 place.as_local().unwrap_or_else(|| bug!("projection in tail call args"));
1057
1058 if !self.local_decls[local].ty.needs_drop(self.tcx, self.typing_env()) {
1059 return None;
1060 }
1061
1062 Some(DropData { source_info, local, kind: DropKind::Value })
1063 }
1064 Operand::Constant(_) => None,
1065 })
1066 .collect();
1067
1068 let mut unwind_to = self.diverge_cleanup_target(
1069 self.scopes.scopes.iter().rev().nth(1).unwrap().region_scope,
1070 DUMMY_SP,
1071 );
1072 let typing_env = self.typing_env();
1073 let unwind_drops = &mut self.scopes.unwind_drops;
1074
1075 // the innermost scope contains only the destructors for the tail call arguments
1076 // we only want to drop these in case of a panic, so we skip it
1077 for scope in self.scopes.scopes[1..].iter().rev().skip(1) {
1078 // FIXME(explicit_tail_calls) code duplication with `build_scope_drops`
1079 for drop_data in scope.drops.iter().rev() {
1080 let source_info = drop_data.source_info;
1081 let local = drop_data.local;
1082
1083 if !self.local_decls[local].ty.needs_drop(self.tcx, typing_env) {
1084 continue;
1085 }
1086
1087 match drop_data.kind {
1088 DropKind::Value => {
1089 // `unwind_to` should drop the value that we're about to
1090 // schedule. If dropping this value panics, then we continue
1091 // with the *next* value on the unwind path.
1092 debug_assert_eq!(
1093 unwind_drops.drop_nodes[unwind_to].data.local,
1094 drop_data.local
1095 );
1096 debug_assert_eq!(
1097 unwind_drops.drop_nodes[unwind_to].data.kind,
1098 drop_data.kind
1099 );
1100 unwind_to = unwind_drops.drop_nodes[unwind_to].next;
1101
1102 let mut unwind_entry_point = unwind_to;
1103
1104 // the tail call arguments must be dropped if any of these drops panic
1105 for drop in arg_drops.iter().copied() {
1106 unwind_entry_point = unwind_drops.add_drop(drop, unwind_entry_point);
1107 }
1108
1109 unwind_drops.add_entry_point(block, unwind_entry_point);
1110
1111 let next = self.cfg.start_new_block();
1112 self.cfg.terminate(
1113 block,
1114 source_info,
1115 TerminatorKind::Drop {
1116 place: local.into(),
1117 target: next,
1118 unwind: UnwindAction::Continue,
1119 replace: false,
1120 drop: None,
1121 async_fut: None,
1122 },
1123 );
1124 block = next;
1125 }
1126 DropKind::ForLint => {
1127 self.cfg.push(
1128 block,
1129 Statement::new(
1130 source_info,
1131 StatementKind::BackwardIncompatibleDropHint {
1132 place: Box::new(local.into()),
1133 reason: BackwardIncompatibleDropReason::Edition2024,
1134 },
1135 ),
1136 );
1137 }
1138 DropKind::Storage => {
1139 // Only temps and vars need their storage dead.
1140 assert!(local.index() > self.arg_count);
1141 self.cfg.push(
1142 block,
1143 Statement::new(source_info, StatementKind::StorageDead(local)),
1144 );
1145 }
1146 }
1147 }
1148 }
1149
1150 block.unit()
1151 }
1152
1153 fn is_async_drop_impl(
1154 tcx: TyCtxt<'tcx>,
1155 local_decls: &IndexVec<Local, LocalDecl<'tcx>>,
1156 typing_env: ty::TypingEnv<'tcx>,
1157 local: Local,
1158 ) -> bool {
1159 let ty = local_decls[local].ty;
1160 if ty.is_async_drop(tcx, typing_env) || ty.is_coroutine() {
1161 return true;
1162 }
1163 ty.needs_async_drop(tcx, typing_env)
1164 }
1165 fn is_async_drop(&self, local: Local) -> bool {
1166 Self::is_async_drop_impl(self.tcx, &self.local_decls, self.typing_env(), local)
1167 }
1168
1169 fn leave_top_scope(&mut self, block: BasicBlock) -> BasicBlock {
1170 // If we are emitting a `drop` statement, we need to have the cached
1171 // diverge cleanup pads ready in case that drop panics.
1172 let needs_cleanup = self.scopes.scopes.last().is_some_and(|scope| scope.needs_cleanup());
1173 let is_coroutine = self.coroutine.is_some();
1174 let unwind_to = if needs_cleanup { self.diverge_cleanup() } else { DropIdx::MAX };
1175
1176 let scope = self.scopes.scopes.last().expect("leave_top_scope called with no scopes");
1177 let has_async_drops = is_coroutine
1178 && scope.drops.iter().any(|v| v.kind == DropKind::Value && self.is_async_drop(v.local));
1179 let dropline_to = if has_async_drops { Some(self.diverge_dropline()) } else { None };
1180 let scope = self.scopes.scopes.last().expect("leave_top_scope called with no scopes");
1181 let typing_env = self.typing_env();
1182 build_scope_drops(
1183 &mut self.cfg,
1184 &mut self.scopes.unwind_drops,
1185 &mut self.scopes.coroutine_drops,
1186 scope,
1187 block,
1188 unwind_to,
1189 dropline_to,
1190 is_coroutine && needs_cleanup,
1191 self.arg_count,
1192 |v: Local| Self::is_async_drop_impl(self.tcx, &self.local_decls, typing_env, v),
1193 )
1194 .into_block()
1195 }
1196
1197 /// Possibly creates a new source scope if `current_root` and `parent_root`
1198 /// are different, or if -Zmaximal-hir-to-mir-coverage is enabled.
1199 pub(crate) fn maybe_new_source_scope(
1200 &mut self,
1201 span: Span,
1202 current_id: HirId,
1203 parent_id: HirId,
1204 ) {
1205 let (current_root, parent_root) =
1206 if self.tcx.sess.opts.unstable_opts.maximal_hir_to_mir_coverage {
1207 // Some consumers of rustc need to map MIR locations back to HIR nodes. Currently
1208 // the only part of rustc that tracks MIR -> HIR is the
1209 // `SourceScopeLocalData::lint_root` field that tracks lint levels for MIR
1210 // locations. Normally the number of source scopes is limited to the set of nodes
1211 // with lint annotations. The -Zmaximal-hir-to-mir-coverage flag changes this
1212 // behavior to maximize the number of source scopes, increasing the granularity of
1213 // the MIR->HIR mapping.
1214 (current_id, parent_id)
1215 } else {
1216 // Use `maybe_lint_level_root_bounded` to avoid adding Hir dependencies on our
1217 // parents. We estimate the true lint roots here to avoid creating a lot of source
1218 // scopes.
1219 (
1220 self.maybe_lint_level_root_bounded(current_id),
1221 if parent_id == self.hir_id {
1222 parent_id // this is very common
1223 } else {
1224 self.maybe_lint_level_root_bounded(parent_id)
1225 },
1226 )
1227 };
1228
1229 if current_root != parent_root {
1230 let lint_level = LintLevel::Explicit(current_root);
1231 self.source_scope = self.new_source_scope(span, lint_level);
1232 }
1233 }
1234
1235 /// Walks upwards from `orig_id` to find a node which might change lint levels with attributes.
1236 /// It stops at `self.hir_id` and just returns it if reached.
1237 fn maybe_lint_level_root_bounded(&mut self, orig_id: HirId) -> HirId {
1238 // This assertion lets us just store `ItemLocalId` in the cache, rather
1239 // than the full `HirId`.
1240 assert_eq!(orig_id.owner, self.hir_id.owner);
1241
1242 let mut id = orig_id;
1243 loop {
1244 if id == self.hir_id {
1245 // This is a moderately common case, mostly hit for previously unseen nodes.
1246 break;
1247 }
1248
1249 if self.tcx.hir_attrs(id).iter().any(|attr| Level::from_attr(attr).is_some()) {
1250 // This is a rare case. It's for a node path that doesn't reach the root due to an
1251 // intervening lint level attribute. This result doesn't get cached.
1252 return id;
1253 }
1254
1255 let next = self.tcx.parent_hir_id(id);
1256 if next == id {
1257 bug!("lint traversal reached the root of the crate");
1258 }
1259 id = next;
1260
1261 // This lookup is just an optimization; it can be removed without affecting
1262 // functionality. It might seem strange to see this at the end of this loop, but the
1263 // `orig_id` passed in to this function is almost always previously unseen, for which a
1264 // lookup will be a miss. So we only do lookups for nodes up the parent chain, where
1265 // cache lookups have a very high hit rate.
1266 if self.lint_level_roots_cache.contains(id.local_id) {
1267 break;
1268 }
1269 }
1270
1271 // `orig_id` traced to `self_id`; record this fact. If `orig_id` is a leaf node it will
1272 // rarely (never?) subsequently be searched for, but it's hard to know if that is the case.
1273 // The performance wins from the cache all come from caching non-leaf nodes.
1274 self.lint_level_roots_cache.insert(orig_id.local_id);
1275 self.hir_id
1276 }
1277
1278 /// Creates a new source scope, nested in the current one.
1279 pub(crate) fn new_source_scope(&mut self, span: Span, lint_level: LintLevel) -> SourceScope {
1280 let parent = self.source_scope;
1281 debug!(
1282 "new_source_scope({:?}, {:?}) - parent({:?})={:?}",
1283 span,
1284 lint_level,
1285 parent,
1286 self.source_scopes.get(parent)
1287 );
1288 let scope_local_data = SourceScopeLocalData {
1289 lint_root: if let LintLevel::Explicit(lint_root) = lint_level {
1290 lint_root
1291 } else {
1292 self.source_scopes[parent].local_data.as_ref().unwrap_crate_local().lint_root
1293 },
1294 };
1295 self.source_scopes.push(SourceScopeData {
1296 span,
1297 parent_scope: Some(parent),
1298 inlined: None,
1299 inlined_parent_scope: None,
1300 local_data: ClearCrossCrate::Set(scope_local_data),
1301 })
1302 }
1303
1304 /// Given a span and the current source scope, make a SourceInfo.
1305 pub(crate) fn source_info(&self, span: Span) -> SourceInfo {
1306 SourceInfo { span, scope: self.source_scope }
1307 }
1308
1309 // Finding scopes
1310 // ==============
1311
1312 /// Returns the scope that we should use as the lifetime of an
1313 /// operand. Basically, an operand must live until it is consumed.
1314 /// This is similar to, but not quite the same as, the temporary
1315 /// scope (which can be larger or smaller).
1316 ///
1317 /// Consider:
1318 /// ```ignore (illustrative)
1319 /// let x = foo(bar(X, Y));
1320 /// ```
1321 /// We wish to pop the storage for X and Y after `bar()` is
1322 /// called, not after the whole `let` is completed.
1323 ///
1324 /// As another example, if the second argument diverges:
1325 /// ```ignore (illustrative)
1326 /// foo(Box::new(2), panic!())
1327 /// ```
1328 /// We would allocate the box but then free it on the unwinding
1329 /// path; we would also emit a free on the 'success' path from
1330 /// panic, but that will turn out to be removed as dead-code.
1331 pub(crate) fn local_scope(&self) -> region::Scope {
1332 self.scopes.topmost()
1333 }
1334
1335 // Scheduling drops
1336 // ================
1337
1338 pub(crate) fn schedule_drop_storage_and_value(
1339 &mut self,
1340 span: Span,
1341 region_scope: region::Scope,
1342 local: Local,
1343 ) {
1344 self.schedule_drop(span, region_scope, local, DropKind::Storage);
1345 self.schedule_drop(span, region_scope, local, DropKind::Value);
1346 }
1347
1348 /// Indicates that `place` should be dropped on exit from `region_scope`.
1349 ///
1350 /// When called with `DropKind::Storage`, `place` shouldn't be the return
1351 /// place, or a function parameter.
1352 pub(crate) fn schedule_drop(
1353 &mut self,
1354 span: Span,
1355 region_scope: region::Scope,
1356 local: Local,
1357 drop_kind: DropKind,
1358 ) {
1359 let needs_drop = match drop_kind {
1360 DropKind::Value | DropKind::ForLint => {
1361 if !self.local_decls[local].ty.needs_drop(self.tcx, self.typing_env()) {
1362 return;
1363 }
1364 true
1365 }
1366 DropKind::Storage => {
1367 if local.index() <= self.arg_count {
1368 span_bug!(
1369 span,
1370 "`schedule_drop` called with body argument {:?} \
1371 but its storage does not require a drop",
1372 local,
1373 )
1374 }
1375 false
1376 }
1377 };
1378
1379 // When building drops, we try to cache chains of drops to reduce the
1380 // number of `DropTree::add_drop` calls. This, however, means that
1381 // whenever we add a drop into a scope which already had some entries
1382 // in the drop tree built (and thus, cached) for it, we must invalidate
1383 // all caches which might branch into the scope which had a drop just
1384 // added to it. This is necessary, because otherwise some other code
1385 // might use the cache to branch into already built chain of drops,
1386 // essentially ignoring the newly added drop.
1387 //
1388 // For example consider there’s two scopes with a drop in each. These
1389 // are built and thus the caches are filled:
1390 //
1391 // +--------------------------------------------------------+
1392 // | +---------------------------------+ |
1393 // | | +--------+ +-------------+ | +---------------+ |
1394 // | | | return | <-+ | drop(outer) | <-+ | drop(middle) | |
1395 // | | +--------+ +-------------+ | +---------------+ |
1396 // | +------------|outer_scope cache|--+ |
1397 // +------------------------------|middle_scope cache|------+
1398 //
1399 // Now, a new, innermost scope is added along with a new drop into
1400 // both innermost and outermost scopes:
1401 //
1402 // +------------------------------------------------------------+
1403 // | +----------------------------------+ |
1404 // | | +--------+ +-------------+ | +---------------+ | +-------------+
1405 // | | | return | <+ | drop(new) | <-+ | drop(middle) | <--+| drop(inner) |
1406 // | | +--------+ | | drop(outer) | | +---------------+ | +-------------+
1407 // | | +-+ +-------------+ | |
1408 // | +---|invalid outer_scope cache|----+ |
1409 // +----=----------------|invalid middle_scope cache|-----------+
1410 //
1411 // If, when adding `drop(new)` we do not invalidate the cached blocks for both
1412 // outer_scope and middle_scope, then, when building drops for the inner (rightmost)
1413 // scope, the old, cached blocks, without `drop(new)` will get used, producing the
1414 // wrong results.
1415 //
1416 // Note that this code iterates scopes from the innermost to the outermost,
1417 // invalidating caches of each scope visited. This way bare minimum of the
1418 // caches gets invalidated. i.e., if a new drop is added into the middle scope, the
1419 // cache of outer scope stays intact.
1420 //
1421 // Since we only cache drops for the unwind path and the coroutine drop
1422 // path, we only need to invalidate the cache for drops that happen on
1423 // the unwind or coroutine drop paths. This means that for
1424 // non-coroutines we don't need to invalidate caches for `DropKind::Storage`.
1425 let invalidate_caches = needs_drop || self.coroutine.is_some();
1426 for scope in self.scopes.scopes.iter_mut().rev() {
1427 if invalidate_caches {
1428 scope.invalidate_cache();
1429 }
1430
1431 if scope.region_scope == region_scope {
1432 let region_scope_span = region_scope.span(self.tcx, self.region_scope_tree);
1433 // Attribute scope exit drops to scope's closing brace.
1434 let scope_end = self.tcx.sess.source_map().end_point(region_scope_span);
1435
1436 scope.drops.push(DropData {
1437 source_info: SourceInfo { span: scope_end, scope: scope.source_scope },
1438 local,
1439 kind: drop_kind,
1440 });
1441
1442 return;
1443 }
1444 }
1445
1446 span_bug!(span, "region scope {:?} not in scope to drop {:?}", region_scope, local);
1447 }
1448
1449 /// Schedule emission of a backwards incompatible drop lint hint.
1450 /// Applicable only to temporary values for now.
1451 #[instrument(level = "debug", skip(self))]
1452 pub(crate) fn schedule_backwards_incompatible_drop(
1453 &mut self,
1454 span: Span,
1455 region_scope: region::Scope,
1456 local: Local,
1457 ) {
1458 // Note that we are *not* gating BIDs here on whether they have significant destructor.
1459 // We need to know all of them so that we can capture potential borrow-checking errors.
1460 for scope in self.scopes.scopes.iter_mut().rev() {
1461 // Since we are inserting linting MIR statement, we have to invalidate the caches
1462 scope.invalidate_cache();
1463 if scope.region_scope == region_scope {
1464 let region_scope_span = region_scope.span(self.tcx, self.region_scope_tree);
1465 let scope_end = self.tcx.sess.source_map().end_point(region_scope_span);
1466
1467 scope.drops.push(DropData {
1468 source_info: SourceInfo { span: scope_end, scope: scope.source_scope },
1469 local,
1470 kind: DropKind::ForLint,
1471 });
1472
1473 return;
1474 }
1475 }
1476 span_bug!(
1477 span,
1478 "region scope {:?} not in scope to drop {:?} for linting",
1479 region_scope,
1480 local
1481 );
1482 }
1483
1484 /// Indicates that the "local operand" stored in `local` is
1485 /// *moved* at some point during execution (see `local_scope` for
1486 /// more information about what a "local operand" is -- in short,
1487 /// it's an intermediate operand created as part of preparing some
1488 /// MIR instruction). We use this information to suppress
1489 /// redundant drops on the non-unwind paths. This results in less
1490 /// MIR, but also avoids spurious borrow check errors
1491 /// (c.f. #64391).
1492 ///
1493 /// Example: when compiling the call to `foo` here:
1494 ///
1495 /// ```ignore (illustrative)
1496 /// foo(bar(), ...)
1497 /// ```
1498 ///
1499 /// we would evaluate `bar()` to an operand `_X`. We would also
1500 /// schedule `_X` to be dropped when the expression scope for
1501 /// `foo(bar())` is exited. This is relevant, for example, if the
1502 /// later arguments should unwind (it would ensure that `_X` gets
1503 /// dropped). However, if no unwind occurs, then `_X` will be
1504 /// unconditionally consumed by the `call`:
1505 ///
1506 /// ```ignore (illustrative)
1507 /// bb {
1508 /// ...
1509 /// _R = CALL(foo, _X, ...)
1510 /// }
1511 /// ```
1512 ///
1513 /// However, `_X` is still registered to be dropped, and so if we
1514 /// do nothing else, we would generate a `DROP(_X)` that occurs
1515 /// after the call. This will later be optimized out by the
1516 /// drop-elaboration code, but in the meantime it can lead to
1517 /// spurious borrow-check errors -- the problem, ironically, is
1518 /// not the `DROP(_X)` itself, but the (spurious) unwind pathways
1519 /// that it creates. See #64391 for an example.
1520 pub(crate) fn record_operands_moved(&mut self, operands: &[Spanned<Operand<'tcx>>]) {
1521 let local_scope = self.local_scope();
1522 let scope = self.scopes.scopes.last_mut().unwrap();
1523
1524 assert_eq!(scope.region_scope, local_scope, "local scope is not the topmost scope!",);
1525
1526 // look for moves of a local variable, like `MOVE(_X)`
1527 let locals_moved = operands.iter().flat_map(|operand| match operand.node {
1528 Operand::Copy(_) | Operand::Constant(_) => None,
1529 Operand::Move(place) => place.as_local(),
1530 });
1531
1532 for local in locals_moved {
1533 // check if we have a Drop for this operand and -- if so
1534 // -- add it to the list of moved operands. Note that this
1535 // local might not have been an operand created for this
1536 // call, it could come from other places too.
1537 if scope.drops.iter().any(|drop| drop.local == local && drop.kind == DropKind::Value) {
1538 scope.moved_locals.push(local);
1539 }
1540 }
1541 }
1542
1543 // Other
1544 // =====
1545
1546 /// Returns the [DropIdx] for the innermost drop if the function unwound at
1547 /// this point. The `DropIdx` will be created if it doesn't already exist.
1548 fn diverge_cleanup(&mut self) -> DropIdx {
1549 // It is okay to use dummy span because the getting scope index on the topmost scope
1550 // must always succeed.
1551 self.diverge_cleanup_target(self.scopes.topmost(), DUMMY_SP)
1552 }
1553
1554 /// This is similar to [diverge_cleanup](Self::diverge_cleanup) except its target is set to
1555 /// some ancestor scope instead of the current scope.
1556 /// It is possible to unwind to some ancestor scope if some drop panics as
1557 /// the program breaks out of a if-then scope.
1558 fn diverge_cleanup_target(&mut self, target_scope: region::Scope, span: Span) -> DropIdx {
1559 let target = self.scopes.scope_index(target_scope, span);
1560 let (uncached_scope, mut cached_drop) = self.scopes.scopes[..=target]
1561 .iter()
1562 .enumerate()
1563 .rev()
1564 .find_map(|(scope_idx, scope)| {
1565 scope.cached_unwind_block.map(|cached_block| (scope_idx + 1, cached_block))
1566 })
1567 .unwrap_or((0, ROOT_NODE));
1568
1569 if uncached_scope > target {
1570 return cached_drop;
1571 }
1572
1573 let is_coroutine = self.coroutine.is_some();
1574 for scope in &mut self.scopes.scopes[uncached_scope..=target] {
1575 for drop in &scope.drops {
1576 if is_coroutine || drop.kind == DropKind::Value {
1577 cached_drop = self.scopes.unwind_drops.add_drop(*drop, cached_drop);
1578 }
1579 }
1580 scope.cached_unwind_block = Some(cached_drop);
1581 }
1582
1583 cached_drop
1584 }
1585
1586 /// Prepares to create a path that performs all required cleanup for a
1587 /// terminator that can unwind at the given basic block.
1588 ///
1589 /// This path terminates in Resume. The path isn't created until after all
1590 /// of the non-unwind paths in this item have been lowered.
1591 pub(crate) fn diverge_from(&mut self, start: BasicBlock) {
1592 debug_assert!(
1593 matches!(
1594 self.cfg.block_data(start).terminator().kind,
1595 TerminatorKind::Assert { .. }
1596 | TerminatorKind::Call { .. }
1597 | TerminatorKind::Drop { .. }
1598 | TerminatorKind::FalseUnwind { .. }
1599 | TerminatorKind::InlineAsm { .. }
1600 ),
1601 "diverge_from called on block with terminator that cannot unwind."
1602 );
1603
1604 let next_drop = self.diverge_cleanup();
1605 self.scopes.unwind_drops.add_entry_point(start, next_drop);
1606 }
1607
1608 /// Returns the [DropIdx] for the innermost drop for dropline (coroutine drop path).
1609 /// The `DropIdx` will be created if it doesn't already exist.
1610 fn diverge_dropline(&mut self) -> DropIdx {
1611 // It is okay to use dummy span because the getting scope index on the topmost scope
1612 // must always succeed.
1613 self.diverge_dropline_target(self.scopes.topmost(), DUMMY_SP)
1614 }
1615
1616 /// Similar to diverge_cleanup_target, but for dropline (coroutine drop path)
1617 fn diverge_dropline_target(&mut self, target_scope: region::Scope, span: Span) -> DropIdx {
1618 debug_assert!(
1619 self.coroutine.is_some(),
1620 "diverge_dropline_target is valid only for coroutine"
1621 );
1622 let target = self.scopes.scope_index(target_scope, span);
1623 let (uncached_scope, mut cached_drop) = self.scopes.scopes[..=target]
1624 .iter()
1625 .enumerate()
1626 .rev()
1627 .find_map(|(scope_idx, scope)| {
1628 scope.cached_coroutine_drop_block.map(|cached_block| (scope_idx + 1, cached_block))
1629 })
1630 .unwrap_or((0, ROOT_NODE));
1631
1632 if uncached_scope > target {
1633 return cached_drop;
1634 }
1635
1636 for scope in &mut self.scopes.scopes[uncached_scope..=target] {
1637 for drop in &scope.drops {
1638 cached_drop = self.scopes.coroutine_drops.add_drop(*drop, cached_drop);
1639 }
1640 scope.cached_coroutine_drop_block = Some(cached_drop);
1641 }
1642
1643 cached_drop
1644 }
1645
1646 /// Sets up a path that performs all required cleanup for dropping a
1647 /// coroutine, starting from the given block that ends in
1648 /// [TerminatorKind::Yield].
1649 ///
1650 /// This path terminates in CoroutineDrop.
1651 pub(crate) fn coroutine_drop_cleanup(&mut self, yield_block: BasicBlock) {
1652 debug_assert!(
1653 matches!(
1654 self.cfg.block_data(yield_block).terminator().kind,
1655 TerminatorKind::Yield { .. }
1656 ),
1657 "coroutine_drop_cleanup called on block with non-yield terminator."
1658 );
1659 let cached_drop = self.diverge_dropline();
1660 self.scopes.coroutine_drops.add_entry_point(yield_block, cached_drop);
1661 }
1662
1663 /// Utility function for *non*-scope code to build their own drops
1664 /// Force a drop at this point in the MIR by creating a new block.
1665 pub(crate) fn build_drop_and_replace(
1666 &mut self,
1667 block: BasicBlock,
1668 span: Span,
1669 place: Place<'tcx>,
1670 value: Rvalue<'tcx>,
1671 ) -> BlockAnd<()> {
1672 let source_info = self.source_info(span);
1673
1674 // create the new block for the assignment
1675 let assign = self.cfg.start_new_block();
1676 self.cfg.push_assign(assign, source_info, place, value.clone());
1677
1678 // create the new block for the assignment in the case of unwinding
1679 let assign_unwind = self.cfg.start_new_cleanup_block();
1680 self.cfg.push_assign(assign_unwind, source_info, place, value.clone());
1681
1682 self.cfg.terminate(
1683 block,
1684 source_info,
1685 TerminatorKind::Drop {
1686 place,
1687 target: assign,
1688 unwind: UnwindAction::Cleanup(assign_unwind),
1689 replace: true,
1690 drop: None,
1691 async_fut: None,
1692 },
1693 );
1694 self.diverge_from(block);
1695
1696 assign.unit()
1697 }
1698
1699 /// Creates an `Assert` terminator and return the success block.
1700 /// If the boolean condition operand is not the expected value,
1701 /// a runtime panic will be caused with the given message.
1702 pub(crate) fn assert(
1703 &mut self,
1704 block: BasicBlock,
1705 cond: Operand<'tcx>,
1706 expected: bool,
1707 msg: AssertMessage<'tcx>,
1708 span: Span,
1709 ) -> BasicBlock {
1710 let source_info = self.source_info(span);
1711 let success_block = self.cfg.start_new_block();
1712
1713 self.cfg.terminate(
1714 block,
1715 source_info,
1716 TerminatorKind::Assert {
1717 cond,
1718 expected,
1719 msg: Box::new(msg),
1720 target: success_block,
1721 unwind: UnwindAction::Continue,
1722 },
1723 );
1724 self.diverge_from(block);
1725
1726 success_block
1727 }
1728
1729 /// Unschedules any drops in the top scope.
1730 ///
1731 /// This is only needed for `match` arm scopes, because they have one
1732 /// entrance per pattern, but only one exit.
1733 pub(crate) fn clear_top_scope(&mut self, region_scope: region::Scope) {
1734 let top_scope = self.scopes.scopes.last_mut().unwrap();
1735
1736 assert_eq!(top_scope.region_scope, region_scope);
1737
1738 top_scope.drops.clear();
1739 top_scope.invalidate_cache();
1740 }
1741}
1742
1743/// Builds drops for `pop_scope` and `leave_top_scope`.
1744///
1745/// # Parameters
1746///
1747/// * `unwind_drops`, the drop tree data structure storing what needs to be cleaned up if unwind occurs
1748/// * `scope`, describes the drops that will occur on exiting the scope in regular execution
1749/// * `block`, the block to branch to once drops are complete (assuming no unwind occurs)
1750/// * `unwind_to`, describes the drops that would occur at this point in the code if a
1751/// panic occurred (a subset of the drops in `scope`, since we sometimes elide StorageDead and other
1752/// instructions on unwinding)
1753/// * `dropline_to`, describes the drops that would occur at this point in the code if a
1754/// coroutine drop occurred.
1755/// * `storage_dead_on_unwind`, if true, then we should emit `StorageDead` even when unwinding
1756/// * `arg_count`, number of MIR local variables corresponding to fn arguments (used to assert that we don't drop those)
1757fn build_scope_drops<'tcx, F>(
1758 cfg: &mut CFG<'tcx>,
1759 unwind_drops: &mut DropTree,
1760 coroutine_drops: &mut DropTree,
1761 scope: &Scope,
1762 block: BasicBlock,
1763 unwind_to: DropIdx,
1764 dropline_to: Option<DropIdx>,
1765 storage_dead_on_unwind: bool,
1766 arg_count: usize,
1767 is_async_drop: F,
1768) -> BlockAnd<()>
1769where
1770 F: Fn(Local) -> bool,
1771{
1772 debug!("build_scope_drops({:?} -> {:?}), dropline_to={:?}", block, scope, dropline_to);
1773
1774 // Build up the drops in evaluation order. The end result will
1775 // look like:
1776 //
1777 // [SDs, drops[n]] --..> [SDs, drop[1]] -> [SDs, drop[0]] -> [[SDs]]
1778 // | | |
1779 // : | |
1780 // V V
1781 // [drop[n]] -...-> [drop[1]] ------> [drop[0]] ------> [last_unwind_to]
1782 //
1783 // The horizontal arrows represent the execution path when the drops return
1784 // successfully. The downwards arrows represent the execution path when the
1785 // drops panic (panicking while unwinding will abort, so there's no need for
1786 // another set of arrows).
1787 //
1788 // For coroutines, we unwind from a drop on a local to its StorageDead
1789 // statement. For other functions we don't worry about StorageDead. The
1790 // drops for the unwind path should have already been generated by
1791 // `diverge_cleanup_gen`.
1792
1793 // `unwind_to` indicates what needs to be dropped should unwinding occur.
1794 // This is a subset of what needs to be dropped when exiting the scope.
1795 // As we unwind the scope, we will also move `unwind_to` backwards to match,
1796 // so that we can use it should a destructor panic.
1797 let mut unwind_to = unwind_to;
1798
1799 // The block that we should jump to after drops complete. We start by building the final drop (`drops[n]`
1800 // in the diagram above) and then build the drops (e.g., `drop[1]`, `drop[0]`) that come before it.
1801 // block begins as the successor of `drops[n]` and then becomes `drops[n]` so that `drops[n-1]`
1802 // will branch to `drops[n]`.
1803 let mut block = block;
1804
1805 // `dropline_to` indicates what needs to be dropped should coroutine drop occur.
1806 let mut dropline_to = dropline_to;
1807
1808 for drop_data in scope.drops.iter().rev() {
1809 let source_info = drop_data.source_info;
1810 let local = drop_data.local;
1811
1812 match drop_data.kind {
1813 DropKind::Value => {
1814 // `unwind_to` should drop the value that we're about to
1815 // schedule. If dropping this value panics, then we continue
1816 // with the *next* value on the unwind path.
1817 //
1818 // We adjust this BEFORE we create the drop (e.g., `drops[n]`)
1819 // because `drops[n]` should unwind to `drops[n-1]`.
1820 debug_assert_eq!(unwind_drops.drop_nodes[unwind_to].data.local, drop_data.local);
1821 debug_assert_eq!(unwind_drops.drop_nodes[unwind_to].data.kind, drop_data.kind);
1822 unwind_to = unwind_drops.drop_nodes[unwind_to].next;
1823
1824 if let Some(idx) = dropline_to {
1825 debug_assert_eq!(coroutine_drops.drop_nodes[idx].data.local, drop_data.local);
1826 debug_assert_eq!(coroutine_drops.drop_nodes[idx].data.kind, drop_data.kind);
1827 dropline_to = Some(coroutine_drops.drop_nodes[idx].next);
1828 }
1829
1830 // If the operand has been moved, and we are not on an unwind
1831 // path, then don't generate the drop. (We only take this into
1832 // account for non-unwind paths so as not to disturb the
1833 // caching mechanism.)
1834 if scope.moved_locals.contains(&local) {
1835 continue;
1836 }
1837
1838 unwind_drops.add_entry_point(block, unwind_to);
1839 if let Some(to) = dropline_to
1840 && is_async_drop(local)
1841 {
1842 coroutine_drops.add_entry_point(block, to);
1843 }
1844
1845 let next = cfg.start_new_block();
1846 cfg.terminate(
1847 block,
1848 source_info,
1849 TerminatorKind::Drop {
1850 place: local.into(),
1851 target: next,
1852 unwind: UnwindAction::Continue,
1853 replace: false,
1854 drop: None,
1855 async_fut: None,
1856 },
1857 );
1858 block = next;
1859 }
1860 DropKind::ForLint => {
1861 // As in the `DropKind::Storage` case below:
1862 // normally lint-related drops are not emitted for unwind,
1863 // so we can just leave `unwind_to` unmodified, but in some
1864 // cases we emit things ALSO on the unwind path, so we need to adjust
1865 // `unwind_to` in that case.
1866 if storage_dead_on_unwind {
1867 debug_assert_eq!(
1868 unwind_drops.drop_nodes[unwind_to].data.local,
1869 drop_data.local
1870 );
1871 debug_assert_eq!(unwind_drops.drop_nodes[unwind_to].data.kind, drop_data.kind);
1872 unwind_to = unwind_drops.drop_nodes[unwind_to].next;
1873 }
1874
1875 // If the operand has been moved, and we are not on an unwind
1876 // path, then don't generate the drop. (We only take this into
1877 // account for non-unwind paths so as not to disturb the
1878 // caching mechanism.)
1879 if scope.moved_locals.contains(&local) {
1880 continue;
1881 }
1882
1883 cfg.push(
1884 block,
1885 Statement::new(
1886 source_info,
1887 StatementKind::BackwardIncompatibleDropHint {
1888 place: Box::new(local.into()),
1889 reason: BackwardIncompatibleDropReason::Edition2024,
1890 },
1891 ),
1892 );
1893 }
1894 DropKind::Storage => {
1895 // Ordinarily, storage-dead nodes are not emitted on unwind, so we don't
1896 // need to adjust `unwind_to` on this path. However, in some specific cases
1897 // we *do* emit storage-dead nodes on the unwind path, and in that case now that
1898 // the storage-dead has completed, we need to adjust the `unwind_to` pointer
1899 // so that any future drops we emit will not register storage-dead.
1900 if storage_dead_on_unwind {
1901 debug_assert_eq!(
1902 unwind_drops.drop_nodes[unwind_to].data.local,
1903 drop_data.local
1904 );
1905 debug_assert_eq!(unwind_drops.drop_nodes[unwind_to].data.kind, drop_data.kind);
1906 unwind_to = unwind_drops.drop_nodes[unwind_to].next;
1907 }
1908 if let Some(idx) = dropline_to {
1909 debug_assert_eq!(coroutine_drops.drop_nodes[idx].data.local, drop_data.local);
1910 debug_assert_eq!(coroutine_drops.drop_nodes[idx].data.kind, drop_data.kind);
1911 dropline_to = Some(coroutine_drops.drop_nodes[idx].next);
1912 }
1913 // Only temps and vars need their storage dead.
1914 assert!(local.index() > arg_count);
1915 cfg.push(block, Statement::new(source_info, StatementKind::StorageDead(local)));
1916 }
1917 }
1918 }
1919 block.unit()
1920}
1921
1922impl<'a, 'tcx: 'a> Builder<'a, 'tcx> {
1923 /// Build a drop tree for a breakable scope.
1924 ///
1925 /// If `continue_block` is `Some`, then the tree is for `continue` inside a
1926 /// loop. Otherwise this is for `break` or `return`.
1927 fn build_exit_tree(
1928 &mut self,
1929 mut drops: DropTree,
1930 else_scope: region::Scope,
1931 span: Span,
1932 continue_block: Option<BasicBlock>,
1933 ) -> Option<BlockAnd<()>> {
1934 let blocks = drops.build_mir::<ExitScopes>(&mut self.cfg, continue_block);
1935 let is_coroutine = self.coroutine.is_some();
1936
1937 // Link the exit drop tree to unwind drop tree.
1938 if drops.drop_nodes.iter().any(|drop_node| drop_node.data.kind == DropKind::Value) {
1939 let unwind_target = self.diverge_cleanup_target(else_scope, span);
1940 let mut unwind_indices = IndexVec::from_elem_n(unwind_target, 1);
1941 for (drop_idx, drop_node) in drops.drop_nodes.iter_enumerated().skip(1) {
1942 match drop_node.data.kind {
1943 DropKind::Storage | DropKind::ForLint => {
1944 if is_coroutine {
1945 let unwind_drop = self
1946 .scopes
1947 .unwind_drops
1948 .add_drop(drop_node.data, unwind_indices[drop_node.next]);
1949 unwind_indices.push(unwind_drop);
1950 } else {
1951 unwind_indices.push(unwind_indices[drop_node.next]);
1952 }
1953 }
1954 DropKind::Value => {
1955 let unwind_drop = self
1956 .scopes
1957 .unwind_drops
1958 .add_drop(drop_node.data, unwind_indices[drop_node.next]);
1959 self.scopes.unwind_drops.add_entry_point(
1960 blocks[drop_idx].unwrap(),
1961 unwind_indices[drop_node.next],
1962 );
1963 unwind_indices.push(unwind_drop);
1964 }
1965 }
1966 }
1967 }
1968 // Link the exit drop tree to dropline drop tree (coroutine drop path) for async drops
1969 if is_coroutine
1970 && drops.drop_nodes.iter().any(|DropNode { data, next: _ }| {
1971 data.kind == DropKind::Value && self.is_async_drop(data.local)
1972 })
1973 {
1974 let dropline_target = self.diverge_dropline_target(else_scope, span);
1975 let mut dropline_indices = IndexVec::from_elem_n(dropline_target, 1);
1976 for (drop_idx, drop_data) in drops.drop_nodes.iter_enumerated().skip(1) {
1977 let coroutine_drop = self
1978 .scopes
1979 .coroutine_drops
1980 .add_drop(drop_data.data, dropline_indices[drop_data.next]);
1981 match drop_data.data.kind {
1982 DropKind::Storage | DropKind::ForLint => {}
1983 DropKind::Value => {
1984 if self.is_async_drop(drop_data.data.local) {
1985 self.scopes.coroutine_drops.add_entry_point(
1986 blocks[drop_idx].unwrap(),
1987 dropline_indices[drop_data.next],
1988 );
1989 }
1990 }
1991 }
1992 dropline_indices.push(coroutine_drop);
1993 }
1994 }
1995 blocks[ROOT_NODE].map(BasicBlock::unit)
1996 }
1997
1998 /// Build the unwind and coroutine drop trees.
1999 pub(crate) fn build_drop_trees(&mut self) {
2000 if self.coroutine.is_some() {
2001 self.build_coroutine_drop_trees();
2002 } else {
2003 Self::build_unwind_tree(
2004 &mut self.cfg,
2005 &mut self.scopes.unwind_drops,
2006 self.fn_span,
2007 &mut None,
2008 );
2009 }
2010 }
2011
2012 fn build_coroutine_drop_trees(&mut self) {
2013 // Build the drop tree for dropping the coroutine while it's suspended.
2014 let drops = &mut self.scopes.coroutine_drops;
2015 let cfg = &mut self.cfg;
2016 let fn_span = self.fn_span;
2017 let blocks = drops.build_mir::<CoroutineDrop>(cfg, None);
2018 if let Some(root_block) = blocks[ROOT_NODE] {
2019 cfg.terminate(
2020 root_block,
2021 SourceInfo::outermost(fn_span),
2022 TerminatorKind::CoroutineDrop,
2023 );
2024 }
2025
2026 // Build the drop tree for unwinding in the normal control flow paths.
2027 let resume_block = &mut None;
2028 let unwind_drops = &mut self.scopes.unwind_drops;
2029 Self::build_unwind_tree(cfg, unwind_drops, fn_span, resume_block);
2030
2031 // Build the drop tree for unwinding when dropping a suspended
2032 // coroutine.
2033 //
2034 // This is a different tree to the standard unwind paths here to
2035 // prevent drop elaboration from creating drop flags that would have
2036 // to be captured by the coroutine. I'm not sure how important this
2037 // optimization is, but it is here.
2038 for (drop_idx, drop_node) in drops.drop_nodes.iter_enumerated() {
2039 if let DropKind::Value = drop_node.data.kind
2040 && let Some(bb) = blocks[drop_idx]
2041 {
2042 debug_assert!(drop_node.next < drops.drop_nodes.next_index());
2043 drops.entry_points.push((drop_node.next, bb));
2044 }
2045 }
2046 Self::build_unwind_tree(cfg, drops, fn_span, resume_block);
2047 }
2048
2049 fn build_unwind_tree(
2050 cfg: &mut CFG<'tcx>,
2051 drops: &mut DropTree,
2052 fn_span: Span,
2053 resume_block: &mut Option<BasicBlock>,
2054 ) {
2055 let blocks = drops.build_mir::<Unwind>(cfg, *resume_block);
2056 if let (None, Some(resume)) = (*resume_block, blocks[ROOT_NODE]) {
2057 cfg.terminate(resume, SourceInfo::outermost(fn_span), TerminatorKind::UnwindResume);
2058
2059 *resume_block = blocks[ROOT_NODE];
2060 }
2061 }
2062}
2063
2064// DropTreeBuilder implementations.
2065
2066struct ExitScopes;
2067
2068impl<'tcx> DropTreeBuilder<'tcx> for ExitScopes {
2069 fn make_block(cfg: &mut CFG<'tcx>) -> BasicBlock {
2070 cfg.start_new_block()
2071 }
2072 fn link_entry_point(cfg: &mut CFG<'tcx>, from: BasicBlock, to: BasicBlock) {
2073 // There should be an existing terminator with real source info and a
2074 // dummy TerminatorKind. Replace it with a proper goto.
2075 // (The dummy is added by `break_scope` and `break_for_else`.)
2076 let term = cfg.block_data_mut(from).terminator_mut();
2077 if let TerminatorKind::UnwindResume = term.kind {
2078 term.kind = TerminatorKind::Goto { target: to };
2079 } else {
2080 span_bug!(term.source_info.span, "unexpected dummy terminator kind: {:?}", term.kind);
2081 }
2082 }
2083}
2084
2085struct CoroutineDrop;
2086
2087impl<'tcx> DropTreeBuilder<'tcx> for CoroutineDrop {
2088 fn make_block(cfg: &mut CFG<'tcx>) -> BasicBlock {
2089 cfg.start_new_block()
2090 }
2091 fn link_entry_point(cfg: &mut CFG<'tcx>, from: BasicBlock, to: BasicBlock) {
2092 let term = cfg.block_data_mut(from).terminator_mut();
2093 if let TerminatorKind::Yield { ref mut drop, .. } = term.kind {
2094 *drop = Some(to);
2095 } else if let TerminatorKind::Drop { ref mut drop, .. } = term.kind {
2096 *drop = Some(to);
2097 } else {
2098 span_bug!(
2099 term.source_info.span,
2100 "cannot enter coroutine drop tree from {:?}",
2101 term.kind
2102 )
2103 }
2104 }
2105}
2106
2107struct Unwind;
2108
2109impl<'tcx> DropTreeBuilder<'tcx> for Unwind {
2110 fn make_block(cfg: &mut CFG<'tcx>) -> BasicBlock {
2111 cfg.start_new_cleanup_block()
2112 }
2113 fn link_entry_point(cfg: &mut CFG<'tcx>, from: BasicBlock, to: BasicBlock) {
2114 let term = &mut cfg.block_data_mut(from).terminator_mut();
2115 match &mut term.kind {
2116 TerminatorKind::Drop { unwind, .. } => {
2117 if let UnwindAction::Cleanup(unwind) = *unwind {
2118 let source_info = term.source_info;
2119 cfg.terminate(unwind, source_info, TerminatorKind::Goto { target: to });
2120 } else {
2121 *unwind = UnwindAction::Cleanup(to);
2122 }
2123 }
2124 TerminatorKind::FalseUnwind { unwind, .. }
2125 | TerminatorKind::Call { unwind, .. }
2126 | TerminatorKind::Assert { unwind, .. }
2127 | TerminatorKind::InlineAsm { unwind, .. } => {
2128 *unwind = UnwindAction::Cleanup(to);
2129 }
2130 TerminatorKind::Goto { .. }
2131 | TerminatorKind::SwitchInt { .. }
2132 | TerminatorKind::UnwindResume
2133 | TerminatorKind::UnwindTerminate(_)
2134 | TerminatorKind::Return
2135 | TerminatorKind::TailCall { .. }
2136 | TerminatorKind::Unreachable
2137 | TerminatorKind::Yield { .. }
2138 | TerminatorKind::CoroutineDrop
2139 | TerminatorKind::FalseEdge { .. } => {
2140 span_bug!(term.source_info.span, "cannot unwind from {:?}", term.kind)
2141 }
2142 }
2143 }
2144}